From 658d59c20b84946065556a59aabbcf03577d933a Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 12 Sep 2017 16:21:25 -0600 Subject: [PATCH] Retire Packaging Deb project repos This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: I36400d5ff47dc7ab7f90fd0648ef8684ddbbd724 --- .coveragerc | 12 - .gitignore | 74 - .gitreview | 4 - .mailmap | 3 - .testr.conf | 7 - CONTRIBUTING.rst | 16 - HACKING.rst | 11 - LICENSE | 176 --- README | 14 + README.rst | 31 - babel.cfg | 2 - devstack/files/apache-watcher-api.template | 42 - devstack/lib/watcher | 313 ---- devstack/local.conf.compute | 49 - devstack/local.conf.controller | 59 - devstack/plugin.sh | 53 - devstack/settings | 9 - doc/ext/__init__.py | 0 doc/ext/term.py | 172 -- doc/ext/versioned_notifications.py | 133 -- doc/notification_samples/action-create.json | 40 - doc/notification_samples/action-delete.json | 40 - .../action-execution-end.json | 41 - .../action-execution-error.json | 51 - .../action-execution-start.json | 41 - doc/notification_samples/action-update.json | 49 - .../action_plan-create.json | 54 - .../action_plan-delete.json | 54 - .../action_plan-execution-end.json | 55 - .../action_plan-execution-error.json | 65 - .../action_plan-execution-start.json | 55 - .../action_plan-update.json | 63 - doc/notification_samples/audit-create.json | 71 - doc/notification_samples/audit-delete.json | 71 - .../audit-planner-end.json | 72 - .../audit-planner-error.json | 82 - .../audit-planner-start.json | 72 - .../audit-strategy-end.json | 72 - .../audit-strategy-error.json | 82 - .../audit-strategy-start.json | 72 - doc/notification_samples/audit-update.json | 80 - .../infra-optim-exception.json | 16 - doc/notification_samples/service-update.json | 26 - doc/source/admin/apache-mod-wsgi.rst | 49 - doc/source/admin/conf-files.rst | 14 - doc/source/admin/configuration.rst | 460 ------ doc/source/admin/gmr.rst | 52 - doc/source/admin/index.rst | 14 - doc/source/admin/policy.rst | 142 -- doc/source/admin/ways-to-install.rst | 162 -- doc/source/api/index.rst | 4 - doc/source/api/v1.rst | 88 -- doc/source/architecture.rst | 464 ------ doc/source/conf.py | 145 -- doc/source/config-generator.conf | 1 - doc/source/contributor/contributing.rst | 72 - doc/source/contributor/devstack.rst | 241 --- doc/source/contributor/environment.rst | 275 ---- doc/source/contributor/index.rst | 8 - doc/source/contributor/notifications.rst | 13 - .../contributor/plugin/action-plugin.rst | 219 --- doc/source/contributor/plugin/base-setup.rst | 100 -- doc/source/contributor/plugin/cdmc-plugin.rst | 272 ---- doc/source/contributor/plugin/goal-plugin.rst | 215 --- doc/source/contributor/plugin/index.rst | 11 - .../contributor/plugin/planner-plugin.rst | 174 -- doc/source/contributor/plugin/plugins.rst | 76 - .../plugin/scoring-engine-plugin.rst | 210 --- .../contributor/plugin/strategy-plugin.rst | 314 ---- doc/source/contributor/rally_link.rst | 1 - doc/source/contributor/testing.rst | 50 - doc/source/glossary.rst | 386 ----- doc/source/image_src/dia/architecture.dia | Bin 3198 -> 0 bytes .../image_src/dia/functional_data_model.dia | Bin 3212 -> 0 bytes doc/source/image_src/plantuml/README.rst | 14 - .../plantuml/action_plan_state_machine.txt | 18 - .../plantuml/audit_state_machine.txt | 17 - .../sequence_architecture_cdmc_sync.txt | 41 - .../sequence_create_and_launch_audit.txt | 24 - .../sequence_create_audit_template.txt | 22 - ...audit_execution_to_actionplan_creation.txt | 44 - .../plantuml/sequence_launch_action_plan.txt | 23 - ...sequence_launch_action_plan_in_applier.txt | 31 - .../sequence_overview_watcher_usage.txt | 37 - ...uence_trigger_audit_in_decision_engine.txt | 50 - .../plantuml/watcher_db_schema_diagram.txt | 153 -- .../images/action_plan_state_machine.png | Bin 48927 -> 0 bytes doc/source/images/architecture.svg | 1407 ----------------- doc/source/images/audit_state_machine.png | Bin 48406 -> 0 bytes doc/source/images/functional_data_model.svg | 600 ------- .../sequence_architecture_cdmc_sync.png | Bin 46795 -> 0 bytes .../sequence_create_and_launch_audit.png | Bin 33521 -> 0 bytes .../images/sequence_create_audit_template.png | Bin 30711 -> 0 bytes ...audit_execution_to_actionplan_creation.png | Bin 47585 -> 0 bytes .../images/sequence_launch_action_plan.png | Bin 27899 -> 0 bytes ...sequence_launch_action_plan_in_applier.png | Bin 42190 -> 0 bytes .../sequence_overview_watcher_usage.png | Bin 46482 -> 0 bytes ...uence_trigger_audit_in_decision_engine.png | Bin 73345 -> 0 bytes .../images/watcher_db_schema_diagram.png | Bin 73815 -> 0 bytes doc/source/index.rst | 120 -- doc/source/install/common_configure.rst | 71 - doc/source/install/common_prerequisites.rst | 139 -- doc/source/install/get_started.rst | 27 - doc/source/install/index.rst | 39 - doc/source/install/install-obs.rst | 35 - doc/source/install/install-rdo.rst | 38 - doc/source/install/install-ubuntu.rst | 34 - doc/source/install/install.rst | 20 - doc/source/install/next-steps.rst | 9 - doc/source/install/verify.rst | 119 -- doc/source/man/footer.rst | 5 - doc/source/man/general-options.rst | 66 - doc/source/man/index.rst | 10 - doc/source/man/watcher-api.rst | 39 - doc/source/man/watcher-applier.rst | 39 - doc/source/man/watcher-db-manage.rst | 260 --- doc/source/man/watcher-decision-engine.rst | 39 - .../strategies/basic-server-consolidation.rst | 99 -- doc/source/strategies/index.rst | 8 - doc/source/strategies/outlet_temp_control.rst | 104 -- doc/source/strategies/strategy-template.rst | 115 -- doc/source/strategies/uniform_airflow.rst | 107 -- .../strategies/vm_workload_consolidation.rst | 114 -- .../strategies/workload-stabilization.rst | 141 -- doc/source/strategies/workload_balance.rst | 98 -- doc/source/user/index.rst | 4 - doc/source/user/user-guide.rst | 236 --- etc/apache2/watcher | 33 - etc/watcher/README-watcher.conf.txt | 4 - etc/watcher/policy.json | 45 - etc/watcher/watcher-config-generator.conf | 16 - rally-jobs/README.rst | 42 - rally-jobs/watcher-watcher.yaml | 63 - releasenotes/notes/.placeholder | 0 .../action-plan-cancel-c54726378019e096.yaml | 4 - ...ed-notifications-api-e8ca4f5d37aa5b4b.yaml | 3 - ...d-plugins-parameters-376eb6b0b8978b44.yaml | 8 - .../add-power-on-off-a77673d482568a8b.yaml | 3 - .../add-scoring-module-fa00d013ed2d614e.yaml | 7 - ...ed-notifications-api-bca7738e16954bad.yaml | 3 - ...tic-triggering-audit-8a9b0540d547db60.yaml | 5 - ...ntralise-config-opts-95670987dfbdb0e7.yaml | 3 - ...er-model-integration-baa394a72a0a33bf.yaml | 4 - ...odel-objects-wrapper-9c799ea262c56a5b.yaml | 6 - ...ghts-default-planner-3746b33160bc7347.yaml | 4 - ...nuously-optimization-35364f4d2c0b81fc.yaml | 4 - .../notes/db-migration-e1a705a8b54ccdd2.yaml | 3 - ...fine-the-audit-scope-e89edc5051dcf3f2.yaml | 5 - .../efficacy-indicator-95380ad7b84e3be2.yaml | 4 - ...t-goal-from-strategy-396c9b13a38bb650.yaml | 5 - ...-based-cluster-model-523937a6f5e66537.yaml | 6 - .../monasca-support-0b0486b8572ac38b.yaml | 4 - ...timization-threshold-21ad38f0470d0e1a.yaml | 5 - ...ent-audit-parameters-ae41dd7252ba9672.yaml | 6 - ...-storage-action-plan-26ef37893c5e8648.yaml | 4 - .../stale-action-plan-b6a6b08df873c128.yaml | 4 - ...d-deviation-strategy-cd1d0c443fdfde9c.yaml | 7 - ...uspended-audit-state-07f998c94e9d9a47.yaml | 4 - ...orm-airflow-strategy-68cdba1419c3f770.yaml | 5 - ...er-notifications-ovo-7b44d52ef6400dd0.yaml | 8 - .../watcher-policies-1e86a30f0f11c6fa.yaml | 4 - ...watcher-service-list-7b2f4b64f71e9b89.yaml | 3 - ...er-versioned-objects-fc5abf5c81c4590c.yaml | 5 - ...e-migration-strategy-a0b05148a57815c0.yaml | 7 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/conf.py | 258 --- releasenotes/source/index.rst | 26 - .../locale/fr/LC_MESSAGES/releasenotes.po | 33 - releasenotes/source/newton.rst | 6 - releasenotes/source/ocata.rst | 6 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 49 - setup.cfg | 129 -- setup.py | 29 - test-requirements.txt | 27 - tox.ini | 73 - watcher/__init__.py | 16 - watcher/_i18n.py | 41 - watcher/api/__init__.py | 0 watcher/api/acl.py | 41 - watcher/api/app.py | 58 - watcher/api/app.wsgi | 40 - watcher/api/config.py | 54 - watcher/api/controllers/__init__.py | 0 watcher/api/controllers/base.py | 51 - watcher/api/controllers/link.py | 60 - watcher/api/controllers/root.py | 98 -- watcher/api/controllers/v1/__init__.py | 197 --- watcher/api/controllers/v1/action.py | 403 ----- watcher/api/controllers/v1/action_plan.py | 558 ------- watcher/api/controllers/v1/audit.py | 615 ------- watcher/api/controllers/v1/audit_template.py | 657 -------- watcher/api/controllers/v1/collection.py | 50 - .../api/controllers/v1/efficacy_indicator.py | 72 - watcher/api/controllers/v1/goal.py | 240 --- watcher/api/controllers/v1/scoring_engine.py | 248 --- watcher/api/controllers/v1/service.py | 264 ---- watcher/api/controllers/v1/strategy.py | 305 ---- watcher/api/controllers/v1/types.py | 233 --- watcher/api/controllers/v1/utils.py | 123 -- watcher/api/hooks.py | 119 -- watcher/api/middleware/__init__.py | 25 - watcher/api/middleware/auth_token.py | 61 - watcher/api/middleware/parsable_error.py | 97 -- watcher/api/scheduling.py | 97 -- watcher/applier/__init__.py | 0 watcher/applier/action_plan/__init__.py | 0 watcher/applier/action_plan/base.py | 28 - watcher/applier/action_plan/default.py | 85 - watcher/applier/actions/__init__.py | 0 watcher/applier/actions/base.py | 152 -- .../actions/change_node_power_state.py | 118 -- .../actions/change_nova_service_state.py | 115 -- watcher/applier/actions/factory.py | 42 - watcher/applier/actions/migration.py | 211 --- watcher/applier/actions/nop.py | 78 - watcher/applier/actions/resize.py | 111 -- watcher/applier/actions/sleep.py | 81 - watcher/applier/base.py | 36 - watcher/applier/default.py | 63 - watcher/applier/loading/__init__.py | 0 watcher/applier/loading/default.py | 29 - watcher/applier/manager.py | 57 - watcher/applier/messaging/__init__.py | 0 watcher/applier/messaging/trigger.py | 50 - watcher/applier/rpcapi.py | 71 - watcher/applier/workflow_engine/__init__.py | 0 watcher/applier/workflow_engine/base.py | 260 --- watcher/applier/workflow_engine/default.py | 165 -- watcher/cmd/__init__.py | 0 watcher/cmd/api.py | 53 - watcher/cmd/applier.py | 42 - watcher/cmd/dbmanage.py | 157 -- watcher/cmd/decisionengine.py | 53 - watcher/cmd/sync.py | 40 - watcher/common/__init__.py | 0 watcher/common/cinder_helper.py | 79 - watcher/common/clients.py | 204 --- watcher/common/config.py | 32 - watcher/common/context.py | 120 -- watcher/common/exception.py | 477 ------ watcher/common/loader/__init__.py | 0 watcher/common/loader/base.py | 32 - watcher/common/loader/default.py | 96 -- watcher/common/loader/loadable.py | 72 - watcher/common/nova_helper.py | 866 ---------- watcher/common/observable.py | 57 - watcher/common/paths.py | 37 - watcher/common/policy.py | 94 -- watcher/common/rpc.py | 144 -- watcher/common/scheduling.py | 44 - watcher/common/service.py | 308 ---- watcher/common/service_manager.py | 50 - watcher/common/synchronization.py | 22 - watcher/common/utils.py | 160 -- watcher/conf/__init__.py | 60 - watcher/conf/_opts.py | 60 - watcher/conf/api.py | 68 - watcher/conf/applier.py | 53 - watcher/conf/ceilometer_client.py | 42 - watcher/conf/cinder_client.py | 41 - watcher/conf/clients_auth.py | 31 - watcher/conf/db.py | 44 - watcher/conf/decision_engine.py | 73 - watcher/conf/exception.py | 33 - watcher/conf/glance_client.py | 41 - watcher/conf/gnocchi_client.py | 47 - watcher/conf/ironic_client.py | 41 - watcher/conf/monasca_client.py | 41 - watcher/conf/neutron_client.py | 41 - watcher/conf/nova_client.py | 41 - watcher/conf/opts.py | 95 -- watcher/conf/paths.py | 57 - watcher/conf/planner.py | 41 - watcher/conf/plugins.py | 72 - watcher/conf/service.py | 50 - watcher/conf/utils.py | 36 - watcher/datasource/__init__.py | 0 watcher/datasource/ceilometer.py | 184 --- watcher/datasource/gnocchi.py | 92 -- watcher/datasource/monasca.py | 124 -- watcher/db/__init__.py | 0 watcher/db/api.py | 871 ---------- watcher/db/migration.py | 56 - watcher/db/purge.py | 476 ------ watcher/db/sqlalchemy/__init__.py | 0 watcher/db/sqlalchemy/alembic.ini | 54 - watcher/db/sqlalchemy/alembic/README.rst | 62 - watcher/db/sqlalchemy/alembic/env.py | 54 - watcher/db/sqlalchemy/alembic/script.py.mako | 22 - .../sqlalchemy/alembic/versions/001_ocata.py | 203 --- .../0f6042416884_add_apscheduler_jobs.py | 33 - .../d098df6021e2_cron_support_for_audit.py | 26 - watcher/db/sqlalchemy/api.py | 1129 ------------- watcher/db/sqlalchemy/job_store.py | 112 -- watcher/db/sqlalchemy/migration.py | 115 -- watcher/db/sqlalchemy/models.py | 280 ---- watcher/decision_engine/__init__.py | 0 watcher/decision_engine/audit/__init__.py | 0 watcher/decision_engine/audit/base.py | 135 -- watcher/decision_engine/audit/continuous.py | 170 -- watcher/decision_engine/audit/oneshot.py | 34 - watcher/decision_engine/gmr.py | 48 - watcher/decision_engine/goal/__init__.py | 28 - watcher/decision_engine/goal/base.py | 64 - .../decision_engine/goal/efficacy/__init__.py | 0 watcher/decision_engine/goal/efficacy/base.py | 84 - .../goal/efficacy/indicators.py | 146 -- .../decision_engine/goal/efficacy/specs.py | 53 - watcher/decision_engine/goal/goals.py | 194 --- watcher/decision_engine/loading/__init__.py | 0 watcher/decision_engine/loading/default.py | 61 - watcher/decision_engine/manager.py | 81 - watcher/decision_engine/messaging/__init__.py | 0 .../messaging/audit_endpoint.py | 55 - watcher/decision_engine/model/__init__.py | 0 watcher/decision_engine/model/base.py | 40 - .../model/collector/__init__.py | 0 .../decision_engine/model/collector/base.py | 185 --- .../decision_engine/model/collector/cinder.py | 209 --- .../model/collector/manager.py | 61 - .../decision_engine/model/collector/nova.py | 370 ----- .../decision_engine/model/element/__init__.py | 40 - watcher/decision_engine/model/element/base.py | 68 - .../model/element/compute_resource.py | 33 - .../decision_engine/model/element/instance.py | 55 - watcher/decision_engine/model/element/node.py | 80 - .../model/element/storage_resource.py | 33 - .../decision_engine/model/element/volume.py | 56 - watcher/decision_engine/model/model_root.py | 541 ------- .../model/notification/__init__.py | 0 .../model/notification/base.py | 38 - .../model/notification/cinder.py | 387 ----- .../model/notification/filtering.py | 88 -- .../model/notification/nova.py | 466 ------ watcher/decision_engine/planner/__init__.py | 0 watcher/decision_engine/planner/base.py | 78 - watcher/decision_engine/planner/manager.py | 39 - watcher/decision_engine/planner/weight.py | 222 --- .../planner/workload_stabilization.py | 300 ---- watcher/decision_engine/rpcapi.py | 72 - watcher/decision_engine/scheduling.py | 108 -- watcher/decision_engine/scope/__init__.py | 0 watcher/decision_engine/scope/base.py | 39 - watcher/decision_engine/scope/default.py | 263 --- watcher/decision_engine/scoring/__init__.py | 0 watcher/decision_engine/scoring/base.py | 126 -- .../decision_engine/scoring/dummy_scorer.py | 169 -- .../scoring/dummy_scoring_container.py | 98 -- .../scoring/scoring_factory.py | 106 -- watcher/decision_engine/solution/__init__.py | 0 watcher/decision_engine/solution/base.py | 117 -- watcher/decision_engine/solution/default.py | 69 - watcher/decision_engine/solution/efficacy.py | 105 -- .../solution/solution_comparator.py | 27 - .../solution/solution_evaluator.py | 27 - watcher/decision_engine/strategy/__init__.py | 0 .../strategy/common/__init__.py | 0 .../decision_engine/strategy/common/level.py | 27 - .../strategy/context/__init__.py | 0 .../decision_engine/strategy/context/base.py | 70 - .../strategy/context/default.py | 68 - .../strategy/selection/__init__.py | 0 .../strategy/selection/base.py | 28 - .../strategy/selection/default.py | 73 - .../strategy/strategies/__init__.py | 40 - .../strategy/strategies/base.py | 360 ----- .../strategies/basic_consolidation.py | 565 ------- .../strategy/strategies/dummy_strategy.py | 103 -- .../strategy/strategies/dummy_with_resize.py | 121 -- .../strategy/strategies/dummy_with_scorer.py | 166 -- .../strategy/strategies/noisy_neighbor.py | 304 ---- .../strategies/outlet_temp_control.py | 333 ---- .../strategy/strategies/uniform_airflow.py | 442 ------ .../strategies/vm_workload_consolidation.py | 651 -------- .../strategy/strategies/workload_balance.py | 414 ----- .../strategies/workload_stabilization.py | 520 ------ watcher/decision_engine/sync.py | 571 ------- watcher/hacking/__init__.py | 0 watcher/hacking/checks.py | 288 ---- watcher/notifications/__init__.py | 29 - watcher/notifications/action.py | 302 ---- watcher/notifications/action_plan.py | 340 ---- watcher/notifications/audit.py | 368 ----- watcher/notifications/base.py | 216 --- watcher/notifications/exception.py | 55 - watcher/notifications/goal.py | 53 - watcher/notifications/service.py | 113 -- watcher/notifications/strategy.py | 53 - watcher/objects/__init__.py | 35 - watcher/objects/action.py | 182 --- watcher/objects/action_plan.py | 340 ---- watcher/objects/audit.py | 328 ---- watcher/objects/audit_template.py | 241 --- watcher/objects/base.py | 179 --- watcher/objects/efficacy_indicator.py | 185 --- watcher/objects/fields.py | 168 -- watcher/objects/goal.py | 176 --- watcher/objects/scoring_engine.py | 198 --- watcher/objects/service.py | 145 -- watcher/objects/strategy.py | 237 --- watcher/objects/utils.py | 151 -- watcher/tests/__init__.py | 22 - watcher/tests/api/__init__.py | 0 watcher/tests/api/base.py | 291 ---- watcher/tests/api/test_base.py | 30 - watcher/tests/api/test_hooks.py | 273 ---- watcher/tests/api/test_root.py | 45 - watcher/tests/api/test_scheduling.py | 114 -- watcher/tests/api/test_utils.py | 59 - watcher/tests/api/utils.py | 108 -- watcher/tests/api/v1/__init__.py | 0 watcher/tests/api/v1/test_actions.py | 509 ------ watcher/tests/api/v1/test_actions_plans.py | 625 -------- watcher/tests/api/v1/test_audit_templates.py | 754 --------- watcher/tests/api/v1/test_audits.py | 918 ----------- watcher/tests/api/v1/test_goals.py | 167 -- watcher/tests/api/v1/test_root.py | 20 - watcher/tests/api/v1/test_scoring_engines.py | 160 -- watcher/tests/api/v1/test_services.py | 178 --- watcher/tests/api/v1/test_strategies.py | 248 --- watcher/tests/api/v1/test_types.py | 249 --- watcher/tests/api/v1/test_utils.py | 70 - watcher/tests/applier/__init__.py | 0 watcher/tests/applier/action_plan/__init__.py | 0 .../test_default_action_handler.py | 126 -- watcher/tests/applier/actions/__init__.py | 0 .../tests/applier/actions/loading/__init__.py | 0 .../loading/test_default_actions_loader.py | 31 - .../actions/test_change_node_power_state.py | 134 -- .../actions/test_change_nova_service_state.py | 133 -- .../tests/applier/actions/test_migration.py | 254 --- watcher/tests/applier/actions/test_resize.py | 93 -- watcher/tests/applier/actions/test_sleep.py | 46 - watcher/tests/applier/messaging/__init__.py | 0 .../test_trigger_action_plan_endpoint.py | 41 - watcher/tests/applier/test_applier_manager.py | 42 - watcher/tests/applier/test_rpcapi.py | 58 - .../tests/applier/workflow_engine/__init__.py | 0 .../workflow_engine/loading/__init__.py | 0 .../loading/test_default_engine_loader.py | 32 - .../test_default_workflow_engine.py | 354 ----- .../test_taskflow_action_container.py | 79 - watcher/tests/base.py | 142 -- watcher/tests/cmd/__init__.py | 0 watcher/tests/cmd/test_api.py | 66 - watcher/tests/cmd/test_applier.py | 55 - watcher/tests/cmd/test_db_manage.py | 175 -- watcher/tests/cmd/test_decision_engine.py | 64 - watcher/tests/common/__init__.py | 0 watcher/tests/common/loader/__init__.py | 0 watcher/tests/common/loader/test_loader.py | 105 -- watcher/tests/common/test_cinder_helper.py | 126 -- watcher/tests/common/test_clients.py | 432 ----- watcher/tests/common/test_nova_helper.py | 365 ----- watcher/tests/common/test_service.py | 106 -- watcher/tests/conf/__init__.py | 0 watcher/tests/conf/test_list_opts.py | 148 -- watcher/tests/conf_fixture.py | 59 - watcher/tests/config.py | 38 - watcher/tests/datasource/__init__.py | 0 .../datasource/test_ceilometer_helper.py | 95 -- .../tests/datasource/test_gnocchi_helper.py | 68 - .../tests/datasource/test_monasca_helper.py | 102 -- watcher/tests/db/__init__.py | 0 watcher/tests/db/base.py | 86 - watcher/tests/db/test_action.py | 394 ----- watcher/tests/db/test_action_plan.py | 391 ----- watcher/tests/db/test_audit.py | 391 ----- watcher/tests/db/test_audit_template.py | 388 ----- watcher/tests/db/test_efficacy_indicator.py | 410 ----- watcher/tests/db/test_goal.py | 327 ---- watcher/tests/db/test_purge.py | 502 ------ watcher/tests/db/test_scoring_engine.py | 337 ---- watcher/tests/db/test_service.py | 302 ---- watcher/tests/db/test_strategy.py | 364 ----- watcher/tests/db/utils.py | 333 ---- watcher/tests/decision_engine/__init__.py | 1 - .../tests/decision_engine/audit/__init__.py | 0 .../audit/test_audit_handlers.py | 363 ----- .../tests/decision_engine/cluster/__init__.py | 0 .../test_cluster_data_model_collector.py | 54 - .../decision_engine/cluster/test_nova_cdmc.py | 86 - .../event_consumer/__init__.py | 0 watcher/tests/decision_engine/fake_goals.py | 83 - .../tests/decision_engine/fake_strategies.py | 85 - .../tests/decision_engine/loading/__init__.py | 0 .../loading/test_collector_loader.py | 82 - .../loading/test_default_planner_loader.py | 31 - .../loading/test_default_strategy_loader.py | 78 - .../loading/test_goal_loader.py | 78 - .../decision_engine/messaging/__init__.py | 0 .../messaging/test_audit_endpoint.py | 68 - .../tests/decision_engine/model/__init__.py | 0 .../model/ceilometer_metrics.py | 295 ---- .../decision_engine/model/data/scenario_1.xml | 47 - .../model/data/scenario_1_with_metrics.xml | 8 - .../model/data/scenario_2_with_metrics.xml | 13 - .../model/data/scenario_3_with_2_nodes.xml | 8 - .../model/data/scenario_3_with_metrics.xml | 9 - .../scenario_4_with_1_node_no_instance.xml | 3 - .../data/scenario_5_with_instance_disk_0.xml | 5 - .../model/data/scenario_6_with_2_nodes.xml | 10 - .../model/data/scenario_7_with_2_nodes.xml | 10 - .../model/data/scenario_8_with_4_nodes.xml | 16 - ..._9_with_3_active_plus_1_disabled_nodes.xml | 16 - .../model/data/storage_scenario_1.xml | 23 - .../model/faker_cluster_and_metrics.py | 242 --- .../model/faker_cluster_state.py | 257 --- .../decision_engine/model/gnocchi_metrics.py | 244 --- .../decision_engine/model/monasca_metrics.py | 266 ---- .../model/notification/__init__.py | 0 .../model/notification/data/capacity.json | 14 - .../notification/data/instance-create.json | 70 - .../data/instance-delete-end.json | 49 - .../notification/data/instance-update.json | 65 - .../data/scenario3_instance-create.json | 70 - .../data/scenario3_instance-delete-end.json | 49 - .../data/scenario3_instance-update.json | 65 - .../scenario3_legacy_instance-create-end.json | 62 - .../scenario3_legacy_instance-delete-end.json | 46 - .../scenario3_legacy_instance-update.json | 52 - ...o3_legacy_livemigration-post-dest-end.json | 61 - .../scenario3_notfound_instance-update.json | 65 - ...ario3_notfound_legacy_instance-update.json | 52 - .../scenario3_service-update-disabled.json | 21 - .../scenario3_service-update-enabled.json | 21 - .../scenario_1_bootable-volume-create.json | 17 - .../data/scenario_1_capacity.json | 14 - .../scenario_1_capacity_node_notfound.json | 14 - .../scenario_1_capacity_pool_notfound.json | 14 - .../data/scenario_1_error-volume-create.json | 16 - .../data/scenario_1_volume-attach.json | 16 - .../data/scenario_1_volume-create.json | 16 - ...cenario_1_volume-create_pool_notfound.json | 16 - .../data/scenario_1_volume-delete.json | 16 - .../data/scenario_1_volume-detach.json | 16 - .../data/scenario_1_volume-resize.json | 16 - .../data/scenario_1_volume-update.json | 16 - .../notification/data/service-update.json | 21 - .../model/notification/fake_managers.py | 85 - .../notification/test_cinder_notifications.py | 607 ------- .../model/notification/test_notifications.py | 108 -- .../notification/test_nova_notifications.py | 523 ------ .../decision_engine/model/test_element.py | 154 -- .../tests/decision_engine/model/test_model.py | 369 ----- .../tests/decision_engine/planner/__init__.py | 0 .../planner/test_planner_manager.py | 28 - .../planner/test_weight_planner.py | 944 ----------- .../test_workload_stabilization_planner.py | 379 ----- .../tests/decision_engine/scope/__init__.py | 0 .../decision_engine/scope/fake_scopes.py | 35 - .../decision_engine/scope/test_default.py | 255 --- .../tests/decision_engine/scoring/__init__.py | 0 .../scoring/test_dummy_scorer.py | 54 - .../scoring/test_dummy_scoring_container.py | 51 - .../scoring/test_scoring_factory.py | 53 - .../decision_engine/solution/__init__.py | 0 .../solution/test_default_solution.py | 60 - .../decision_engine/strategy/__init__.py | 0 .../strategy/context/__init__.py | 0 .../strategy/context/test_strategy_context.py | 99 -- .../strategy/selector/__init__.py | 0 .../selector/test_strategy_selector.py | 62 - .../strategy/strategies/__init__.py | 0 .../strategies/test_basic_consolidation.py | 337 ---- .../strategies/test_dummy_strategy.py | 72 - .../strategies/test_dummy_with_scorer.py | 60 - .../strategies/test_noisy_neighbor.py | 179 --- .../strategies/test_outlet_temp_control.py | 207 --- .../strategies/test_uniform_airflow.py | 249 --- .../test_vm_workload_consolidation.py | 346 ---- .../strategies/test_workload_balance.py | 217 --- .../strategies/test_workload_stabilization.py | 277 ---- watcher/tests/decision_engine/test_gmr.py | 36 - watcher/tests/decision_engine/test_rpcapi.py | 51 - .../tests/decision_engine/test_scheduling.py | 86 - watcher/tests/decision_engine/test_sync.py | 661 -------- watcher/tests/fake_policy.py | 80 - watcher/tests/fakes.py | 91 -- watcher/tests/notifications/__init__.py | 0 .../notifications/test_action_notification.py | 355 ----- .../test_action_plan_notification.py | 429 ----- .../notifications/test_audit_notification.py | 477 ------ .../tests/notifications/test_notification.py | 355 ----- .../test_service_notifications.py | 77 - watcher/tests/objects/__init__.py | 0 watcher/tests/objects/test_action.py | 231 --- watcher/tests/objects/test_action_plan.py | 324 ---- watcher/tests/objects/test_audit.py | 325 ---- watcher/tests/objects/test_audit_template.py | 222 --- .../tests/objects/test_efficacy_indicator.py | 148 -- watcher/tests/objects/test_goal.py | 143 -- watcher/tests/objects/test_objects.py | 562 ------- watcher/tests/objects/test_scoring_engine.py | 152 -- watcher/tests/objects/test_service.py | 116 -- watcher/tests/objects/test_strategy.py | 167 -- watcher/tests/objects/utils.py | 256 --- watcher/tests/policy_fixture.py | 44 - watcher/version.py | 19 - watcher_tempest_plugin/README.rst | 158 -- watcher_tempest_plugin/__init__.py | 0 watcher_tempest_plugin/config.py | 23 - watcher_tempest_plugin/infra_optim_clients.py | 42 - watcher_tempest_plugin/plugin.py | 34 - watcher_tempest_plugin/services/__init__.py | 0 .../services/infra_optim/__init__.py | 0 .../services/infra_optim/base.py | 211 --- .../services/infra_optim/v1/__init__.py | 0 .../services/infra_optim/v1/json/__init__.py | 0 .../services/infra_optim/v1/json/client.py | 331 ---- watcher_tempest_plugin/tests/__init__.py | 0 watcher_tempest_plugin/tests/api/__init__.py | 0 .../tests/api/admin/__init__.py | 0 .../tests/api/admin/base.py | 263 --- .../tests/api/admin/test_action.py | 110 -- .../tests/api/admin/test_action_plan.py | 176 --- .../tests/api/admin/test_api_discovery.py | 47 - .../tests/api/admin/test_audit.py | 221 --- .../tests/api/admin/test_audit_template.py | 226 --- .../tests/api/admin/test_goal.py | 66 - .../tests/api/admin/test_scoring_engine.py | 65 - .../tests/api/admin/test_service.py | 90 -- .../tests/api/admin/test_strategy.py | 69 - .../tests/scenario/__init__.py | 0 watcher_tempest_plugin/tests/scenario/base.py | 170 -- .../tests/scenario/manager.py | 206 --- .../scenario/test_execute_basic_optim.py | 191 --- .../scenario/test_execute_dummy_optim.py | 85 - .../test_execute_workload_balancing.py | 198 --- 630 files changed, 14 insertions(+), 72530 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .mailmap delete mode 100644 .testr.conf delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE create mode 100644 README delete mode 100644 README.rst delete mode 100644 babel.cfg delete mode 100644 devstack/files/apache-watcher-api.template delete mode 100644 devstack/lib/watcher delete mode 100644 devstack/local.conf.compute delete mode 100644 devstack/local.conf.controller delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100644 doc/ext/__init__.py delete mode 100644 doc/ext/term.py delete mode 100644 doc/ext/versioned_notifications.py delete mode 100644 doc/notification_samples/action-create.json delete mode 100644 doc/notification_samples/action-delete.json delete mode 100644 doc/notification_samples/action-execution-end.json delete mode 100644 doc/notification_samples/action-execution-error.json delete mode 100644 doc/notification_samples/action-execution-start.json delete mode 100644 doc/notification_samples/action-update.json delete mode 100644 doc/notification_samples/action_plan-create.json delete mode 100644 doc/notification_samples/action_plan-delete.json delete mode 100644 doc/notification_samples/action_plan-execution-end.json delete mode 100644 doc/notification_samples/action_plan-execution-error.json delete mode 100644 doc/notification_samples/action_plan-execution-start.json delete mode 100644 doc/notification_samples/action_plan-update.json delete mode 100644 doc/notification_samples/audit-create.json delete mode 100644 doc/notification_samples/audit-delete.json delete mode 100644 doc/notification_samples/audit-planner-end.json delete mode 100644 doc/notification_samples/audit-planner-error.json delete mode 100644 doc/notification_samples/audit-planner-start.json delete mode 100644 doc/notification_samples/audit-strategy-end.json delete mode 100644 doc/notification_samples/audit-strategy-error.json delete mode 100644 doc/notification_samples/audit-strategy-start.json delete mode 100644 doc/notification_samples/audit-update.json delete mode 100644 doc/notification_samples/infra-optim-exception.json delete mode 100644 doc/notification_samples/service-update.json delete mode 100644 doc/source/admin/apache-mod-wsgi.rst delete mode 100644 doc/source/admin/conf-files.rst delete mode 100644 doc/source/admin/configuration.rst delete mode 100644 doc/source/admin/gmr.rst delete mode 100644 doc/source/admin/index.rst delete mode 100644 doc/source/admin/policy.rst delete mode 100644 doc/source/admin/ways-to-install.rst delete mode 100644 doc/source/api/index.rst delete mode 100644 doc/source/api/v1.rst delete mode 100644 doc/source/architecture.rst delete mode 100755 doc/source/conf.py delete mode 100644 doc/source/config-generator.conf delete mode 100644 doc/source/contributor/contributing.rst delete mode 100644 doc/source/contributor/devstack.rst delete mode 100644 doc/source/contributor/environment.rst delete mode 100644 doc/source/contributor/index.rst delete mode 100644 doc/source/contributor/notifications.rst delete mode 100644 doc/source/contributor/plugin/action-plugin.rst delete mode 100644 doc/source/contributor/plugin/base-setup.rst delete mode 100644 doc/source/contributor/plugin/cdmc-plugin.rst delete mode 100644 doc/source/contributor/plugin/goal-plugin.rst delete mode 100644 doc/source/contributor/plugin/index.rst delete mode 100644 doc/source/contributor/plugin/planner-plugin.rst delete mode 100644 doc/source/contributor/plugin/plugins.rst delete mode 100644 doc/source/contributor/plugin/scoring-engine-plugin.rst delete mode 100644 doc/source/contributor/plugin/strategy-plugin.rst delete mode 100644 doc/source/contributor/rally_link.rst delete mode 100644 doc/source/contributor/testing.rst delete mode 100644 doc/source/glossary.rst delete mode 100644 doc/source/image_src/dia/architecture.dia delete mode 100644 doc/source/image_src/dia/functional_data_model.dia delete mode 100644 doc/source/image_src/plantuml/README.rst delete mode 100644 doc/source/image_src/plantuml/action_plan_state_machine.txt delete mode 100644 doc/source/image_src/plantuml/audit_state_machine.txt delete mode 100644 doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt delete mode 100644 doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt delete mode 100644 doc/source/image_src/plantuml/sequence_create_audit_template.txt delete mode 100644 doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt delete mode 100644 doc/source/image_src/plantuml/sequence_launch_action_plan.txt delete mode 100644 doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt delete mode 100644 doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt delete mode 100644 doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt delete mode 100644 doc/source/image_src/plantuml/watcher_db_schema_diagram.txt delete mode 100644 doc/source/images/action_plan_state_machine.png delete mode 100644 doc/source/images/architecture.svg delete mode 100644 doc/source/images/audit_state_machine.png delete mode 100644 doc/source/images/functional_data_model.svg delete mode 100644 doc/source/images/sequence_architecture_cdmc_sync.png delete mode 100644 doc/source/images/sequence_create_and_launch_audit.png delete mode 100644 doc/source/images/sequence_create_audit_template.png delete mode 100644 doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png delete mode 100644 doc/source/images/sequence_launch_action_plan.png delete mode 100644 doc/source/images/sequence_launch_action_plan_in_applier.png delete mode 100644 doc/source/images/sequence_overview_watcher_usage.png delete mode 100644 doc/source/images/sequence_trigger_audit_in_decision_engine.png delete mode 100644 doc/source/images/watcher_db_schema_diagram.png delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install/common_configure.rst delete mode 100644 doc/source/install/common_prerequisites.rst delete mode 100644 doc/source/install/get_started.rst delete mode 100644 doc/source/install/index.rst delete mode 100644 doc/source/install/install-obs.rst delete mode 100644 doc/source/install/install-rdo.rst delete mode 100644 doc/source/install/install-ubuntu.rst delete mode 100644 doc/source/install/install.rst delete mode 100644 doc/source/install/next-steps.rst delete mode 100644 doc/source/install/verify.rst delete mode 100644 doc/source/man/footer.rst delete mode 100644 doc/source/man/general-options.rst delete mode 100644 doc/source/man/index.rst delete mode 100644 doc/source/man/watcher-api.rst delete mode 100644 doc/source/man/watcher-applier.rst delete mode 100644 doc/source/man/watcher-db-manage.rst delete mode 100644 doc/source/man/watcher-decision-engine.rst delete mode 100644 doc/source/strategies/basic-server-consolidation.rst delete mode 100644 doc/source/strategies/index.rst delete mode 100644 doc/source/strategies/outlet_temp_control.rst delete mode 100644 doc/source/strategies/strategy-template.rst delete mode 100644 doc/source/strategies/uniform_airflow.rst delete mode 100644 doc/source/strategies/vm_workload_consolidation.rst delete mode 100644 doc/source/strategies/workload-stabilization.rst delete mode 100644 doc/source/strategies/workload_balance.rst delete mode 100644 doc/source/user/index.rst delete mode 100644 doc/source/user/user-guide.rst delete mode 100644 etc/apache2/watcher delete mode 100644 etc/watcher/README-watcher.conf.txt delete mode 100644 etc/watcher/policy.json delete mode 100644 etc/watcher/watcher-config-generator.conf delete mode 100644 rally-jobs/README.rst delete mode 100644 rally-jobs/watcher-watcher.yaml delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/action-plan-cancel-c54726378019e096.yaml delete mode 100644 releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml delete mode 100644 releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml delete mode 100644 releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml delete mode 100644 releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml delete mode 100644 releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml delete mode 100644 releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml delete mode 100644 releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml delete mode 100644 releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml delete mode 100644 releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml delete mode 100644 releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml delete mode 100644 releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml delete mode 100644 releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml delete mode 100644 releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml delete mode 100644 releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml delete mode 100644 releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml delete mode 100644 releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml delete mode 100644 releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml delete mode 100644 releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml delete mode 100644 releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml delete mode 100644 releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml delete mode 100644 releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml delete mode 100644 releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml delete mode 100644 releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml delete mode 100644 releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml delete mode 100644 releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml delete mode 100644 releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml delete mode 100644 releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml delete mode 100644 releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml delete mode 100644 releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po delete mode 100644 releasenotes/source/newton.rst delete mode 100644 releasenotes/source/ocata.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 test-requirements.txt delete mode 100644 tox.ini delete mode 100644 watcher/__init__.py delete mode 100644 watcher/_i18n.py delete mode 100644 watcher/api/__init__.py delete mode 100644 watcher/api/acl.py delete mode 100644 watcher/api/app.py delete mode 100644 watcher/api/app.wsgi delete mode 100644 watcher/api/config.py delete mode 100644 watcher/api/controllers/__init__.py delete mode 100644 watcher/api/controllers/base.py delete mode 100644 watcher/api/controllers/link.py delete mode 100644 watcher/api/controllers/root.py delete mode 100644 watcher/api/controllers/v1/__init__.py delete mode 100644 watcher/api/controllers/v1/action.py delete mode 100644 watcher/api/controllers/v1/action_plan.py delete mode 100644 watcher/api/controllers/v1/audit.py delete mode 100644 watcher/api/controllers/v1/audit_template.py delete mode 100644 watcher/api/controllers/v1/collection.py delete mode 100644 watcher/api/controllers/v1/efficacy_indicator.py delete mode 100644 watcher/api/controllers/v1/goal.py delete mode 100644 watcher/api/controllers/v1/scoring_engine.py delete mode 100644 watcher/api/controllers/v1/service.py delete mode 100644 watcher/api/controllers/v1/strategy.py delete mode 100644 watcher/api/controllers/v1/types.py delete mode 100644 watcher/api/controllers/v1/utils.py delete mode 100644 watcher/api/hooks.py delete mode 100644 watcher/api/middleware/__init__.py delete mode 100644 watcher/api/middleware/auth_token.py delete mode 100644 watcher/api/middleware/parsable_error.py delete mode 100644 watcher/api/scheduling.py delete mode 100644 watcher/applier/__init__.py delete mode 100644 watcher/applier/action_plan/__init__.py delete mode 100644 watcher/applier/action_plan/base.py delete mode 100644 watcher/applier/action_plan/default.py delete mode 100644 watcher/applier/actions/__init__.py delete mode 100644 watcher/applier/actions/base.py delete mode 100644 watcher/applier/actions/change_node_power_state.py delete mode 100644 watcher/applier/actions/change_nova_service_state.py delete mode 100644 watcher/applier/actions/factory.py delete mode 100644 watcher/applier/actions/migration.py delete mode 100644 watcher/applier/actions/nop.py delete mode 100644 watcher/applier/actions/resize.py delete mode 100644 watcher/applier/actions/sleep.py delete mode 100644 watcher/applier/base.py delete mode 100755 watcher/applier/default.py delete mode 100644 watcher/applier/loading/__init__.py delete mode 100644 watcher/applier/loading/default.py delete mode 100644 watcher/applier/manager.py delete mode 100644 watcher/applier/messaging/__init__.py delete mode 100644 watcher/applier/messaging/trigger.py delete mode 100644 watcher/applier/rpcapi.py delete mode 100644 watcher/applier/workflow_engine/__init__.py delete mode 100644 watcher/applier/workflow_engine/base.py delete mode 100644 watcher/applier/workflow_engine/default.py delete mode 100644 watcher/cmd/__init__.py delete mode 100644 watcher/cmd/api.py delete mode 100644 watcher/cmd/applier.py delete mode 100644 watcher/cmd/dbmanage.py delete mode 100644 watcher/cmd/decisionengine.py delete mode 100644 watcher/cmd/sync.py delete mode 100644 watcher/common/__init__.py delete mode 100644 watcher/common/cinder_helper.py delete mode 100755 watcher/common/clients.py delete mode 100644 watcher/common/config.py delete mode 100644 watcher/common/context.py delete mode 100644 watcher/common/exception.py delete mode 100644 watcher/common/loader/__init__.py delete mode 100644 watcher/common/loader/base.py delete mode 100644 watcher/common/loader/default.py delete mode 100644 watcher/common/loader/loadable.py delete mode 100644 watcher/common/nova_helper.py delete mode 100644 watcher/common/observable.py delete mode 100644 watcher/common/paths.py delete mode 100644 watcher/common/policy.py delete mode 100644 watcher/common/rpc.py delete mode 100644 watcher/common/scheduling.py delete mode 100644 watcher/common/service.py delete mode 100644 watcher/common/service_manager.py delete mode 100644 watcher/common/synchronization.py delete mode 100644 watcher/common/utils.py delete mode 100755 watcher/conf/__init__.py delete mode 100644 watcher/conf/_opts.py delete mode 100644 watcher/conf/api.py delete mode 100644 watcher/conf/applier.py delete mode 100644 watcher/conf/ceilometer_client.py delete mode 100644 watcher/conf/cinder_client.py delete mode 100644 watcher/conf/clients_auth.py delete mode 100644 watcher/conf/db.py delete mode 100644 watcher/conf/decision_engine.py delete mode 100644 watcher/conf/exception.py delete mode 100644 watcher/conf/glance_client.py delete mode 100644 watcher/conf/gnocchi_client.py delete mode 100755 watcher/conf/ironic_client.py delete mode 100644 watcher/conf/monasca_client.py delete mode 100644 watcher/conf/neutron_client.py delete mode 100755 watcher/conf/nova_client.py delete mode 100644 watcher/conf/opts.py delete mode 100644 watcher/conf/paths.py delete mode 100644 watcher/conf/planner.py delete mode 100644 watcher/conf/plugins.py delete mode 100644 watcher/conf/service.py delete mode 100644 watcher/conf/utils.py delete mode 100644 watcher/datasource/__init__.py delete mode 100644 watcher/datasource/ceilometer.py delete mode 100644 watcher/datasource/gnocchi.py delete mode 100644 watcher/datasource/monasca.py delete mode 100644 watcher/db/__init__.py delete mode 100644 watcher/db/api.py delete mode 100644 watcher/db/migration.py delete mode 100644 watcher/db/purge.py delete mode 100644 watcher/db/sqlalchemy/__init__.py delete mode 100644 watcher/db/sqlalchemy/alembic.ini delete mode 100644 watcher/db/sqlalchemy/alembic/README.rst delete mode 100644 watcher/db/sqlalchemy/alembic/env.py delete mode 100644 watcher/db/sqlalchemy/alembic/script.py.mako delete mode 100644 watcher/db/sqlalchemy/alembic/versions/001_ocata.py delete mode 100644 watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py delete mode 100644 watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py delete mode 100644 watcher/db/sqlalchemy/api.py delete mode 100644 watcher/db/sqlalchemy/job_store.py delete mode 100644 watcher/db/sqlalchemy/migration.py delete mode 100644 watcher/db/sqlalchemy/models.py delete mode 100644 watcher/decision_engine/__init__.py delete mode 100644 watcher/decision_engine/audit/__init__.py delete mode 100644 watcher/decision_engine/audit/base.py delete mode 100644 watcher/decision_engine/audit/continuous.py delete mode 100644 watcher/decision_engine/audit/oneshot.py delete mode 100644 watcher/decision_engine/gmr.py delete mode 100644 watcher/decision_engine/goal/__init__.py delete mode 100644 watcher/decision_engine/goal/base.py delete mode 100644 watcher/decision_engine/goal/efficacy/__init__.py delete mode 100644 watcher/decision_engine/goal/efficacy/base.py delete mode 100644 watcher/decision_engine/goal/efficacy/indicators.py delete mode 100644 watcher/decision_engine/goal/efficacy/specs.py delete mode 100644 watcher/decision_engine/goal/goals.py delete mode 100644 watcher/decision_engine/loading/__init__.py delete mode 100644 watcher/decision_engine/loading/default.py delete mode 100644 watcher/decision_engine/manager.py delete mode 100644 watcher/decision_engine/messaging/__init__.py delete mode 100644 watcher/decision_engine/messaging/audit_endpoint.py delete mode 100644 watcher/decision_engine/model/__init__.py delete mode 100644 watcher/decision_engine/model/base.py delete mode 100644 watcher/decision_engine/model/collector/__init__.py delete mode 100644 watcher/decision_engine/model/collector/base.py delete mode 100644 watcher/decision_engine/model/collector/cinder.py delete mode 100644 watcher/decision_engine/model/collector/manager.py delete mode 100644 watcher/decision_engine/model/collector/nova.py delete mode 100644 watcher/decision_engine/model/element/__init__.py delete mode 100644 watcher/decision_engine/model/element/base.py delete mode 100644 watcher/decision_engine/model/element/compute_resource.py delete mode 100644 watcher/decision_engine/model/element/instance.py delete mode 100644 watcher/decision_engine/model/element/node.py delete mode 100644 watcher/decision_engine/model/element/storage_resource.py delete mode 100644 watcher/decision_engine/model/element/volume.py delete mode 100644 watcher/decision_engine/model/model_root.py delete mode 100644 watcher/decision_engine/model/notification/__init__.py delete mode 100644 watcher/decision_engine/model/notification/base.py delete mode 100644 watcher/decision_engine/model/notification/cinder.py delete mode 100644 watcher/decision_engine/model/notification/filtering.py delete mode 100644 watcher/decision_engine/model/notification/nova.py delete mode 100644 watcher/decision_engine/planner/__init__.py delete mode 100644 watcher/decision_engine/planner/base.py delete mode 100644 watcher/decision_engine/planner/manager.py delete mode 100644 watcher/decision_engine/planner/weight.py delete mode 100644 watcher/decision_engine/planner/workload_stabilization.py delete mode 100644 watcher/decision_engine/rpcapi.py delete mode 100644 watcher/decision_engine/scheduling.py delete mode 100644 watcher/decision_engine/scope/__init__.py delete mode 100644 watcher/decision_engine/scope/base.py delete mode 100644 watcher/decision_engine/scope/default.py delete mode 100644 watcher/decision_engine/scoring/__init__.py delete mode 100644 watcher/decision_engine/scoring/base.py delete mode 100644 watcher/decision_engine/scoring/dummy_scorer.py delete mode 100644 watcher/decision_engine/scoring/dummy_scoring_container.py delete mode 100644 watcher/decision_engine/scoring/scoring_factory.py delete mode 100644 watcher/decision_engine/solution/__init__.py delete mode 100644 watcher/decision_engine/solution/base.py delete mode 100644 watcher/decision_engine/solution/default.py delete mode 100644 watcher/decision_engine/solution/efficacy.py delete mode 100644 watcher/decision_engine/solution/solution_comparator.py delete mode 100644 watcher/decision_engine/solution/solution_evaluator.py delete mode 100644 watcher/decision_engine/strategy/__init__.py delete mode 100644 watcher/decision_engine/strategy/common/__init__.py delete mode 100644 watcher/decision_engine/strategy/common/level.py delete mode 100644 watcher/decision_engine/strategy/context/__init__.py delete mode 100644 watcher/decision_engine/strategy/context/base.py delete mode 100644 watcher/decision_engine/strategy/context/default.py delete mode 100644 watcher/decision_engine/strategy/selection/__init__.py delete mode 100644 watcher/decision_engine/strategy/selection/base.py delete mode 100644 watcher/decision_engine/strategy/selection/default.py delete mode 100644 watcher/decision_engine/strategy/strategies/__init__.py delete mode 100644 watcher/decision_engine/strategy/strategies/base.py delete mode 100644 watcher/decision_engine/strategy/strategies/basic_consolidation.py delete mode 100644 watcher/decision_engine/strategy/strategies/dummy_strategy.py delete mode 100644 watcher/decision_engine/strategy/strategies/dummy_with_resize.py delete mode 100644 watcher/decision_engine/strategy/strategies/dummy_with_scorer.py delete mode 100644 watcher/decision_engine/strategy/strategies/noisy_neighbor.py delete mode 100644 watcher/decision_engine/strategy/strategies/outlet_temp_control.py delete mode 100644 watcher/decision_engine/strategy/strategies/uniform_airflow.py delete mode 100755 watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py delete mode 100644 watcher/decision_engine/strategy/strategies/workload_balance.py delete mode 100644 watcher/decision_engine/strategy/strategies/workload_stabilization.py delete mode 100644 watcher/decision_engine/sync.py delete mode 100644 watcher/hacking/__init__.py delete mode 100644 watcher/hacking/checks.py delete mode 100644 watcher/notifications/__init__.py delete mode 100644 watcher/notifications/action.py delete mode 100644 watcher/notifications/action_plan.py delete mode 100644 watcher/notifications/audit.py delete mode 100644 watcher/notifications/base.py delete mode 100644 watcher/notifications/exception.py delete mode 100644 watcher/notifications/goal.py delete mode 100644 watcher/notifications/service.py delete mode 100644 watcher/notifications/strategy.py delete mode 100644 watcher/objects/__init__.py delete mode 100644 watcher/objects/action.py delete mode 100644 watcher/objects/action_plan.py delete mode 100644 watcher/objects/audit.py delete mode 100644 watcher/objects/audit_template.py delete mode 100644 watcher/objects/base.py delete mode 100644 watcher/objects/efficacy_indicator.py delete mode 100644 watcher/objects/fields.py delete mode 100644 watcher/objects/goal.py delete mode 100644 watcher/objects/scoring_engine.py delete mode 100644 watcher/objects/service.py delete mode 100644 watcher/objects/strategy.py delete mode 100644 watcher/objects/utils.py delete mode 100644 watcher/tests/__init__.py delete mode 100644 watcher/tests/api/__init__.py delete mode 100644 watcher/tests/api/base.py delete mode 100644 watcher/tests/api/test_base.py delete mode 100644 watcher/tests/api/test_hooks.py delete mode 100644 watcher/tests/api/test_root.py delete mode 100644 watcher/tests/api/test_scheduling.py delete mode 100644 watcher/tests/api/test_utils.py delete mode 100644 watcher/tests/api/utils.py delete mode 100644 watcher/tests/api/v1/__init__.py delete mode 100644 watcher/tests/api/v1/test_actions.py delete mode 100644 watcher/tests/api/v1/test_actions_plans.py delete mode 100644 watcher/tests/api/v1/test_audit_templates.py delete mode 100644 watcher/tests/api/v1/test_audits.py delete mode 100644 watcher/tests/api/v1/test_goals.py delete mode 100644 watcher/tests/api/v1/test_root.py delete mode 100644 watcher/tests/api/v1/test_scoring_engines.py delete mode 100644 watcher/tests/api/v1/test_services.py delete mode 100644 watcher/tests/api/v1/test_strategies.py delete mode 100644 watcher/tests/api/v1/test_types.py delete mode 100644 watcher/tests/api/v1/test_utils.py delete mode 100644 watcher/tests/applier/__init__.py delete mode 100644 watcher/tests/applier/action_plan/__init__.py delete mode 100755 watcher/tests/applier/action_plan/test_default_action_handler.py delete mode 100644 watcher/tests/applier/actions/__init__.py delete mode 100644 watcher/tests/applier/actions/loading/__init__.py delete mode 100644 watcher/tests/applier/actions/loading/test_default_actions_loader.py delete mode 100644 watcher/tests/applier/actions/test_change_node_power_state.py delete mode 100644 watcher/tests/applier/actions/test_change_nova_service_state.py delete mode 100644 watcher/tests/applier/actions/test_migration.py delete mode 100644 watcher/tests/applier/actions/test_resize.py delete mode 100644 watcher/tests/applier/actions/test_sleep.py delete mode 100644 watcher/tests/applier/messaging/__init__.py delete mode 100644 watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py delete mode 100644 watcher/tests/applier/test_applier_manager.py delete mode 100644 watcher/tests/applier/test_rpcapi.py delete mode 100644 watcher/tests/applier/workflow_engine/__init__.py delete mode 100644 watcher/tests/applier/workflow_engine/loading/__init__.py delete mode 100644 watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py delete mode 100644 watcher/tests/applier/workflow_engine/test_default_workflow_engine.py delete mode 100644 watcher/tests/applier/workflow_engine/test_taskflow_action_container.py delete mode 100644 watcher/tests/base.py delete mode 100644 watcher/tests/cmd/__init__.py delete mode 100644 watcher/tests/cmd/test_api.py delete mode 100644 watcher/tests/cmd/test_applier.py delete mode 100644 watcher/tests/cmd/test_db_manage.py delete mode 100644 watcher/tests/cmd/test_decision_engine.py delete mode 100644 watcher/tests/common/__init__.py delete mode 100644 watcher/tests/common/loader/__init__.py delete mode 100644 watcher/tests/common/loader/test_loader.py delete mode 100644 watcher/tests/common/test_cinder_helper.py delete mode 100755 watcher/tests/common/test_clients.py delete mode 100644 watcher/tests/common/test_nova_helper.py delete mode 100644 watcher/tests/common/test_service.py delete mode 100644 watcher/tests/conf/__init__.py delete mode 100755 watcher/tests/conf/test_list_opts.py delete mode 100644 watcher/tests/conf_fixture.py delete mode 100644 watcher/tests/config.py delete mode 100644 watcher/tests/datasource/__init__.py delete mode 100644 watcher/tests/datasource/test_ceilometer_helper.py delete mode 100644 watcher/tests/datasource/test_gnocchi_helper.py delete mode 100644 watcher/tests/datasource/test_monasca_helper.py delete mode 100644 watcher/tests/db/__init__.py delete mode 100644 watcher/tests/db/base.py delete mode 100644 watcher/tests/db/test_action.py delete mode 100644 watcher/tests/db/test_action_plan.py delete mode 100644 watcher/tests/db/test_audit.py delete mode 100644 watcher/tests/db/test_audit_template.py delete mode 100644 watcher/tests/db/test_efficacy_indicator.py delete mode 100644 watcher/tests/db/test_goal.py delete mode 100644 watcher/tests/db/test_purge.py delete mode 100644 watcher/tests/db/test_scoring_engine.py delete mode 100644 watcher/tests/db/test_service.py delete mode 100644 watcher/tests/db/test_strategy.py delete mode 100644 watcher/tests/db/utils.py delete mode 100644 watcher/tests/decision_engine/__init__.py delete mode 100644 watcher/tests/decision_engine/audit/__init__.py delete mode 100644 watcher/tests/decision_engine/audit/test_audit_handlers.py delete mode 100644 watcher/tests/decision_engine/cluster/__init__.py delete mode 100644 watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py delete mode 100644 watcher/tests/decision_engine/cluster/test_nova_cdmc.py delete mode 100644 watcher/tests/decision_engine/event_consumer/__init__.py delete mode 100644 watcher/tests/decision_engine/fake_goals.py delete mode 100644 watcher/tests/decision_engine/fake_strategies.py delete mode 100644 watcher/tests/decision_engine/loading/__init__.py delete mode 100644 watcher/tests/decision_engine/loading/test_collector_loader.py delete mode 100644 watcher/tests/decision_engine/loading/test_default_planner_loader.py delete mode 100644 watcher/tests/decision_engine/loading/test_default_strategy_loader.py delete mode 100644 watcher/tests/decision_engine/loading/test_goal_loader.py delete mode 100644 watcher/tests/decision_engine/messaging/__init__.py delete mode 100644 watcher/tests/decision_engine/messaging/test_audit_endpoint.py delete mode 100644 watcher/tests/decision_engine/model/__init__.py delete mode 100644 watcher/tests/decision_engine/model/ceilometer_metrics.py delete mode 100644 watcher/tests/decision_engine/model/data/scenario_1.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml delete mode 100644 watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml delete mode 100644 watcher/tests/decision_engine/model/data/storage_scenario_1.xml delete mode 100644 watcher/tests/decision_engine/model/faker_cluster_and_metrics.py delete mode 100644 watcher/tests/decision_engine/model/faker_cluster_state.py delete mode 100644 watcher/tests/decision_engine/model/gnocchi_metrics.py delete mode 100644 watcher/tests/decision_engine/model/monasca_metrics.py delete mode 100644 watcher/tests/decision_engine/model/notification/__init__.py delete mode 100644 watcher/tests/decision_engine/model/notification/data/capacity.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/instance-create.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/instance-delete-end.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/instance-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/data/service-update.json delete mode 100644 watcher/tests/decision_engine/model/notification/fake_managers.py delete mode 100644 watcher/tests/decision_engine/model/notification/test_cinder_notifications.py delete mode 100644 watcher/tests/decision_engine/model/notification/test_notifications.py delete mode 100644 watcher/tests/decision_engine/model/notification/test_nova_notifications.py delete mode 100644 watcher/tests/decision_engine/model/test_element.py delete mode 100644 watcher/tests/decision_engine/model/test_model.py delete mode 100644 watcher/tests/decision_engine/planner/__init__.py delete mode 100644 watcher/tests/decision_engine/planner/test_planner_manager.py delete mode 100644 watcher/tests/decision_engine/planner/test_weight_planner.py delete mode 100644 watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py delete mode 100644 watcher/tests/decision_engine/scope/__init__.py delete mode 100644 watcher/tests/decision_engine/scope/fake_scopes.py delete mode 100644 watcher/tests/decision_engine/scope/test_default.py delete mode 100644 watcher/tests/decision_engine/scoring/__init__.py delete mode 100644 watcher/tests/decision_engine/scoring/test_dummy_scorer.py delete mode 100644 watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py delete mode 100644 watcher/tests/decision_engine/scoring/test_scoring_factory.py delete mode 100644 watcher/tests/decision_engine/solution/__init__.py delete mode 100644 watcher/tests/decision_engine/solution/test_default_solution.py delete mode 100644 watcher/tests/decision_engine/strategy/__init__.py delete mode 100644 watcher/tests/decision_engine/strategy/context/__init__.py delete mode 100644 watcher/tests/decision_engine/strategy/context/test_strategy_context.py delete mode 100644 watcher/tests/decision_engine/strategy/selector/__init__.py delete mode 100644 watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/__init__.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py delete mode 100644 watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py delete mode 100644 watcher/tests/decision_engine/test_gmr.py delete mode 100644 watcher/tests/decision_engine/test_rpcapi.py delete mode 100644 watcher/tests/decision_engine/test_scheduling.py delete mode 100644 watcher/tests/decision_engine/test_sync.py delete mode 100644 watcher/tests/fake_policy.py delete mode 100644 watcher/tests/fakes.py delete mode 100644 watcher/tests/notifications/__init__.py delete mode 100644 watcher/tests/notifications/test_action_notification.py delete mode 100644 watcher/tests/notifications/test_action_plan_notification.py delete mode 100644 watcher/tests/notifications/test_audit_notification.py delete mode 100644 watcher/tests/notifications/test_notification.py delete mode 100644 watcher/tests/notifications/test_service_notifications.py delete mode 100644 watcher/tests/objects/__init__.py delete mode 100644 watcher/tests/objects/test_action.py delete mode 100644 watcher/tests/objects/test_action_plan.py delete mode 100644 watcher/tests/objects/test_audit.py delete mode 100644 watcher/tests/objects/test_audit_template.py delete mode 100644 watcher/tests/objects/test_efficacy_indicator.py delete mode 100644 watcher/tests/objects/test_goal.py delete mode 100644 watcher/tests/objects/test_objects.py delete mode 100644 watcher/tests/objects/test_scoring_engine.py delete mode 100644 watcher/tests/objects/test_service.py delete mode 100644 watcher/tests/objects/test_strategy.py delete mode 100644 watcher/tests/objects/utils.py delete mode 100644 watcher/tests/policy_fixture.py delete mode 100644 watcher/version.py delete mode 100644 watcher_tempest_plugin/README.rst delete mode 100644 watcher_tempest_plugin/__init__.py delete mode 100644 watcher_tempest_plugin/config.py delete mode 100644 watcher_tempest_plugin/infra_optim_clients.py delete mode 100644 watcher_tempest_plugin/plugin.py delete mode 100644 watcher_tempest_plugin/services/__init__.py delete mode 100644 watcher_tempest_plugin/services/infra_optim/__init__.py delete mode 100644 watcher_tempest_plugin/services/infra_optim/base.py delete mode 100644 watcher_tempest_plugin/services/infra_optim/v1/__init__.py delete mode 100644 watcher_tempest_plugin/services/infra_optim/v1/json/__init__.py delete mode 100644 watcher_tempest_plugin/services/infra_optim/v1/json/client.py delete mode 100644 watcher_tempest_plugin/tests/__init__.py delete mode 100644 watcher_tempest_plugin/tests/api/__init__.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/__init__.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/base.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_action.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_action_plan.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_api_discovery.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_audit.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_audit_template.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_goal.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_service.py delete mode 100644 watcher_tempest_plugin/tests/api/admin/test_strategy.py delete mode 100644 watcher_tempest_plugin/tests/scenario/__init__.py delete mode 100644 watcher_tempest_plugin/tests/scenario/base.py delete mode 100644 watcher_tempest_plugin/tests/scenario/manager.py delete mode 100644 watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py delete mode 100644 watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py delete mode 100644 watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index b474acb..0000000 --- a/.coveragerc +++ /dev/null @@ -1,12 +0,0 @@ -[run] -branch = True -source = watcher -omit = - watcher/tests/* - watcher/hacking/* - -[report] -ignore_errors = True -exclude_lines = - @abc.abstract - raise NotImplementedError diff --git a/.gitignore b/.gitignore deleted file mode 100644 index debf764..0000000 --- a/.gitignore +++ /dev/null @@ -1,74 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg* -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage* -.tox -nosetests.xml -.testrepository -.venv -.idea - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build -doc/source/api/* -doc/source/samples -doc/source/watcher.conf.sample -!doc/source/api/index.rst -!doc/source/api/v1.rst - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp -.*sw? - -sftp-config.json -/.idea/ -/cover/ -.settings/ -.eclipse - -cover -/demo/ - - -# Files created by releasenotes build -releasenotes/build - -# Desktop Service Store -*.DS_Store diff --git a/.gitreview b/.gitreview deleted file mode 100644 index 9c9a533..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/watcher.git diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 516ae6f..0000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 0c9a76a..0000000 --- a/.testr.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./watcher/tests} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 673a5fe..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - https://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - https://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/watcher diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index 225c8a0..0000000 --- a/HACKING.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -========================== -watcher Style Commandments -========================== - -Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a..0000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/README b/README new file mode 100644 index 0000000..8fcd2b2 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index 289eb91..0000000 --- a/README.rst +++ /dev/null @@ -1,31 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: https://governance.openstack.org/badges/watcher.svg - :target: https://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======= -Watcher -======= - -OpenStack Watcher provides a flexible and scalable resource optimization -service for multi-tenant OpenStack-based clouds. -Watcher provides a robust framework to realize a wide range of cloud -optimization goals, including the reduction of data center -operating costs, increased system performance via intelligent virtual machine -migration, increased energy efficiency-and more! - -* Free software: Apache license -* Wiki: https://wiki.openstack.org/wiki/Watcher -* Source: https://github.com/openstack/watcher -* Bugs: https://bugs.launchpad.net/watcher -* Documentation: https://docs.openstack.org/watcher/latest/ diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb..0000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/devstack/files/apache-watcher-api.template b/devstack/files/apache-watcher-api.template deleted file mode 100644 index 1f9cffb..0000000 --- a/devstack/files/apache-watcher-api.template +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using the -# Watcher API through mod_wsgi. This version assumes you are -# running devstack to configure the software. - -Listen %WATCHER_SERVICE_PORT% - - - WSGIDaemonProcess watcher-api user=%USER% processes=%APIWORKERS% threads=1 display-name=%{GROUP} - WSGIScriptAlias / %WATCHER_WSGI_DIR%/app.wsgi - WSGIApplicationGroup %{GLOBAL} - WSGIProcessGroup watcher-api - WSGIPassAuthorization On - - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/watcher-api.log - CustomLog /var/log/%APACHE_NAME%/watcher-api-access.log combined - - - - WSGIProcessGroup watcher-api - WSGIApplicationGroup %{GLOBAL} - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - diff --git a/devstack/lib/watcher b/devstack/lib/watcher deleted file mode 100644 index a0fd2c4..0000000 --- a/devstack/lib/watcher +++ /dev/null @@ -1,313 +0,0 @@ -#!/bin/bash -# -# lib/watcher -# Functions to control the configuration and operation of the watcher services - -# Dependencies: -# -# - ``functions`` file -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - is_watcher_enabled -# - install_watcher -# - configure_watcher -# - create_watcher_conf -# - init_watcher -# - start_watcher -# - stop_watcher -# - cleanup_watcher - -# Save trace setting -_XTRACE_WATCHER=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -WATCHER_REPO=${WATCHER_REPO:-${GIT_BASE}/openstack/watcher.git} -WATCHER_BRANCH=${WATCHER_BRANCH:-master} -WATCHER_DIR=$DEST/watcher - -GITREPO["python-watcherclient"]=${WATCHERCLIENT_REPO:-${GIT_BASE}/openstack/python-watcherclient.git} -GITBRANCH["python-watcherclient"]=${WATCHERCLIENT_BRANCH:-master} -GITDIR["python-watcherclient"]=$DEST/python-watcherclient - -WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher} -WATCHER_AUTH_CACHE_DIR=${WATCHER_AUTH_CACHE_DIR:-/var/cache/watcher} - -WATCHER_CONF_DIR=/etc/watcher -WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf -WATCHER_POLICY_JSON=$WATCHER_CONF_DIR/policy.json - -WATCHER_DEVSTACK_DIR=$WATCHER_DIR/devstack -WATCHER_DEVSTACK_FILES_DIR=$WATCHER_DEVSTACK_DIR/files - -NOVA_CONF_DIR=/etc/nova -NOVA_CONF=$NOVA_CONF_DIR/nova.conf - -if is_ssl_enabled_service "watcher" || is_service_enabled tls-proxy; then - WATCHER_SERVICE_PROTOCOL="https" -fi - -WATCHER_USE_MOD_WSGI=$(trueorfalse True WATCHER_USE_MOD_WSGI) - -if is_suse; then - WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/srv/www/htdocs/watcher} -else - WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/var/www/watcher} -fi -# Public facing bits -WATCHER_SERVICE_HOST=${WATCHER_SERVICE_HOST:-$HOST_IP} -WATCHER_SERVICE_PORT=${WATCHER_SERVICE_PORT:-9322} -WATCHER_SERVICE_PORT_INT=${WATCHER_SERVICE_PORT_INT:-19322} -WATCHER_SERVICE_PROTOCOL=${WATCHER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Support entry points installation of console scripts -if [[ -d $WATCHER_DIR/bin ]]; then - WATCHER_BIN_DIR=$WATCHER_DIR/bin -else - WATCHER_BIN_DIR=$(get_python_exec_prefix) -fi - -# Entry Points -# ------------ - -# Test if any watcher services are enabled -# is_watcher_enabled -function is_watcher_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"watcher-" ]] && return 0 - return 1 -} - -#_cleanup_watcher_apache_wsgi - Remove wsgi files, -#disable and remove apache vhost file -function _cleanup_watcher_apache_wsgi { - sudo rm -rf $WATCHER_WSGI_DIR - sudo rm -f $(apache_site_config_for watcher-api) - restart_apache_server -} - -# cleanup_watcher() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_watcher { - sudo rm -rf $WATCHER_STATE_PATH $WATCHER_AUTH_CACHE_DIR - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - _cleanup_watcher_apache_wsgi - fi -} - -# configure_watcher() - Set config files, create data dirs, etc -function configure_watcher { - # Put config files in ``/etc/watcher`` for everyone to find - sudo install -d -o $STACK_USER $WATCHER_CONF_DIR - - install_default_policy watcher - - # Rebuild the config file from scratch - create_watcher_conf -} - -# create_watcher_accounts() - Set up common required watcher accounts -# -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_TENANT_NAME watcher service -function create_watcher_accounts { - create_service_user "watcher" "admin" - - local watcher_service=$(get_or_create_service "watcher" \ - "infra-optim" "Watcher Infrastructure Optimization Service") - get_or_create_endpoint $watcher_service \ - "$REGION_NAME" \ - "$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \ - "$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" \ - "$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" -} - -# _config_watcher_apache_wsgi() - Set WSGI config files of watcher -function _config_watcher_apache_wsgi { - local watcher_apache_conf - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - sudo mkdir -p $WATCHER_WSGI_DIR - sudo cp $WATCHER_DIR/watcher/api/app.wsgi $WATCHER_WSGI_DIR/app.wsgi - watcher_apache_conf=$(apache_site_config_for watcher-api) - sudo cp $WATCHER_DEVSTACK_FILES_DIR/apache-watcher-api.template $watcher_apache_conf - sudo sed -e " - s|%WATCHER_SERVICE_PORT%|$WATCHER_SERVICE_PORT|g; - s|%WATCHER_WSGI_DIR%|$WATCHER_WSGI_DIR|g; - s|%USER%|$STACK_USER|g; - s|%APIWORKERS%|$API_WORKERS|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - " -i $watcher_apache_conf - enable_apache_site watcher-api - tail_log watcher-access /var/log/$APACHE_NAME/watcher-api-access.log - tail_log watcher-api /var/log/$APACHE_NAME/watcher-api.log - fi - -} - -# create_watcher_conf() - Create a new watcher.conf file -function create_watcher_conf { - # (Re)create ``watcher.conf`` - rm -f $WATCHER_CONF - - iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $WATCHER_CONF DEFAULT control_exchange watcher - - iniset $WATCHER_CONF database connection $(database_connection_url watcher) - iniset $WATCHER_CONF api host "$WATCHER_SERVICE_HOST" - iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT" - - iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_JSON - - iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID - iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD - iniset $WATCHER_CONF oslo_messaging_rabbit rabbit_host $RABBIT_HOST - - iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2" - - iniset $NOVA_CONF oslo_messaging_notifications topics "notifications,watcher_notifications" - iniset $NOVA_CONF notifications notify_on_state_change "vm_and_task_state" - - configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR - configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR "watcher_clients_auth" - - if is_fedora || is_suse; then - # watcher defaults to /usr/local/bin, but fedora and suse pip like to - # install things in /usr/bin - iniset $WATCHER_CONF DEFAULT bindir "/usr/bin" - fi - - if [ -n "$WATCHER_STATE_PATH" ]; then - iniset $WATCHER_CONF DEFAULT state_path "$WATCHER_STATE_PATH" - iniset $WATCHER_CONF oslo_concurrency lock_path "$WATCHER_STATE_PATH" - fi - - if [ "$SYSLOG" != "False" ]; then - iniset $WATCHER_CONF DEFAULT use_syslog "True" - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $WATCHER_CONF DEFAULT - else - # Show user_name and project_name instead of user_id and project_id - iniset $WATCHER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_domain)s %(user_name)s %(project_name)s] %(instance)s%(message)s" - fi - - #config apache files - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - _config_watcher_apache_wsgi - fi - # Register SSL certificates if provided - if is_ssl_enabled_service watcher; then - ensure_certificates WATCHER - - iniset $WATCHER_CONF DEFAULT ssl_cert_file "$WATCHER_SSL_CERT" - iniset $WATCHER_CONF DEFAULT ssl_key_file "$WATCHER_SSL_KEY" - - iniset $WATCHER_CONF DEFAULT enabled_ssl_apis "$WATCHER_ENABLED_APIS" - fi - - if is_service_enabled ceilometer; then - iniset $WATCHER_CONF watcher_messaging notifier_driver "messaging" - fi -} - -# create_watcher_cache_dir() - Part of the init_watcher() process -function create_watcher_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $WATCHER_AUTH_CACHE_DIR - rm -rf $WATCHER_AUTH_CACHE_DIR/* -} - -# init_watcher() - Initialize databases, etc. -function init_watcher { - # clean up from previous (possibly aborted) runs - # create required data files - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled watcher-api; then - # (Re)create watcher database - recreate_database watcher - - # Create watcher schema - $WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade - fi - create_watcher_cache_dir -} - -# install_watcherclient() - Collect source and prepare -function install_watcherclient { - if use_library_from_git "python-watcherclient"; then - git_clone_by_name "python-watcherclient" - setup_dev_lib "python-watcherclient" - fi -} - -# install_watcher() - Collect source and prepare -function install_watcher { - git_clone $WATCHER_REPO $WATCHER_DIR $WATCHER_BRANCH - setup_develop $WATCHER_DIR - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - install_apache_wsgi - fi -} - -# start_watcher_api() - Start the API process ahead of other things -function start_watcher_api { - # Get right service port for testing - - local service_port=$WATCHER_SERVICE_PORT - local service_protocol=$WATCHER_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$WATCHER_SERVICE_PORT_INT - service_protocol="http" - fi - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - restart_apache_server - else - run_process watcher-api "$WATCHER_BIN_DIR/watcher-api --config-file $WATCHER_CONF" - fi - echo "Waiting for watcher-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$WATCHER_SERVICE_HOST:$service_port; then - die $LINENO "watcher-api did not start" - fi - - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT & - start_tls_proxy '*' $EC2_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT & - fi -} - -# start_watcher() - Start running processes, including screen -function start_watcher { - # ``run_process`` checks ``is_service_enabled``, it is not needed here - start_watcher_api - run_process watcher-decision-engine "$WATCHER_BIN_DIR/watcher-decision-engine --config-file $WATCHER_CONF" - run_process watcher-applier "$WATCHER_BIN_DIR/watcher-applier --config-file $WATCHER_CONF" -} - -# stop_watcher() - Stop running processes (non-screen) -function stop_watcher { - if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then - disable_apache_site watcher-api - else - stop_process watcher-api - fi - for serv in watcher-decision-engine watcher-applier; do - stop_process $serv - done -} - -# Restore xtrace -$_XTRACE_WATCHER - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/local.conf.compute b/devstack/local.conf.compute deleted file mode 100644 index 79d0aeb..0000000 --- a/devstack/local.conf.compute +++ /dev/null @@ -1,49 +0,0 @@ -# Sample ``local.conf`` for compute node for Watcher development -# NOTE: Copy this file to the root DevStack directory for it to work properly. - -[[local|localrc]] - -ADMIN_PASSWORD=nomoresecrete -DATABASE_PASSWORD=stackdb -RABBIT_PASSWORD=stackqueue -SERVICE_PASSWORD=$ADMIN_PASSWORD -SERVICE_TOKEN=azertytoken - -HOST_IP=192.168.42.2 # Change this to this compute node's IP address -FLAT_INTERFACE=eth0 - -FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is -NETWORK_GATEWAY=10.254.1.1 # Change this for your network - -MULTI_HOST=1 - -SERVICE_HOST=192.168.42.1 # Change this to the IP of your controller node -MYSQL_HOST=$SERVICE_HOST -RABBIT_HOST=$SERVICE_HOST -GLANCE_HOSTPORT=${SERVICE_HOST}:9292 - -DATABASE_TYPE=mysql - -# Enable services (including neutron) -ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client - -NOVA_VNC_ENABLED=True -NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" -VNCSERVER_LISTEN=0.0.0.0 -VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP - -NOVA_INSTANCES_PATH=/opt/stack/data/instances - -# Enable the Ceilometer plugin for the compute agent -enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer -disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api - -LOGFILE=$DEST/logs/stack.sh.log -LOGDAYS=2 - -[[post-config|$NOVA_CONF]] -[DEFAULT] -compute_monitors=cpu.virt_driver -notify_on_state_change = vm_and_task_state -[notifications] -notify_on_state_change = vm_and_task_state diff --git a/devstack/local.conf.controller b/devstack/local.conf.controller deleted file mode 100644 index c117d60..0000000 --- a/devstack/local.conf.controller +++ /dev/null @@ -1,59 +0,0 @@ -# Sample ``local.conf`` for controller node for Watcher development -# NOTE: Copy this file to the root DevStack directory for it to work properly. - -[[local|localrc]] - -ADMIN_PASSWORD=nomoresecrete -DATABASE_PASSWORD=stackdb -RABBIT_PASSWORD=stackqueue -SERVICE_PASSWORD=$ADMIN_PASSWORD -SERVICE_TOKEN=azertytoken - -HOST_IP=192.168.42.1 # Change this to your controller node IP address -FLAT_INTERFACE=eth0 - -FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is -NETWORK_GATEWAY=10.254.1.1 # Change this for your network - -MULTI_HOST=1 - - -#Set this to FALSE if do not want to run watcher-api behind mod-wsgi -#WATCHER_USE_MOD_WSGI=TRUE - -# This is the controller node, so disable nova-compute -disable_service n-cpu - -# Disable nova-network and use neutron instead -disable_service n-net -ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3,neutron - -# Enable remote console access -enable_service n-cauth - -# Enable the Watcher Dashboard plugin -enable_plugin watcher-dashboard git://git.openstack.org/openstack/watcher-dashboard - -# Enable the Watcher plugin -enable_plugin watcher git://git.openstack.org/openstack/watcher - -# Enable the Ceilometer plugin -enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer - -# This is the controller node, so disable the ceilometer compute agent -disable_service ceilometer-acompute -# Enable the ceilometer api explicitly(bug:1667678) -enable_service ceilometer-api - -# Enable the Gnocchi plugin -enable_plugin gnocchi https://git.openstack.org/openstack/gnocchi - -LOGFILE=$DEST/logs/stack.sh.log -LOGDAYS=2 - -[[post-config|$NOVA_CONF]] -[DEFAULT] -compute_monitors=cpu.virt_driver -notify_on_state_change = vm_and_task_state -[notifications] -notify_on_state_change = vm_and_task_state diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index 2bf726d..0000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -# -# plugin.sh - DevStack plugin script to install watcher - -# Save trace setting -_XTRACE_WATCHER_PLUGIN=$(set +o | grep xtrace) -set -o xtrace - -echo_summary "watcher's plugin.sh was called..." -source $DEST/watcher/devstack/lib/watcher - -# Show all of defined environment variables -(set -o posix; set) - -if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo_summary "Before Installing watcher" - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing watcher" - install_watcher - - LIBS_FROM_GIT="${LIBS_FROM_GIT},python-watcherclient" - - install_watcherclient - cleanup_watcher - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring watcher" - configure_watcher - - if is_service_enabled key; then - create_watcher_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize watcher - init_watcher - - # Start the watcher components - echo_summary "Starting watcher" - start_watcher - fi - - if [[ "$1" == "unstack" ]]; then - stop_watcher - fi - - if [[ "$1" == "clean" ]]; then - cleanup_watcher - fi -fi - -# Restore xtrace -$_XTRACE_WATCHER_PLUGIN diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 190ae7d..0000000 --- a/devstack/settings +++ /dev/null @@ -1,9 +0,0 @@ -# DevStack settings - -# Make sure rabbit is enabled -enable_service rabbit - -# Enable Watcher services -enable_service watcher-api -enable_service watcher-decision-engine -enable_service watcher-applier diff --git a/doc/ext/__init__.py b/doc/ext/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/doc/ext/term.py b/doc/ext/term.py deleted file mode 100644 index 6bbc55d..0000000 --- a/doc/ext/term.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import importlib -import inspect - -from docutils import nodes -from docutils.parsers import rst -from docutils import statemachine - -from watcher.version import version_info - - -class BaseWatcherDirective(rst.Directive): - - def __init__(self, name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - super(BaseWatcherDirective, self).__init__( - name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine) - self.result = statemachine.ViewList() - - def run(self): - raise NotImplementedError('Must override run() is subclass.') - - def add_line(self, line, *lineno): - """Append one line of generated reST to the output.""" - self.result.append(line, rst.directives.unchanged, *lineno) - - def add_textblock(self, textblock): - for line in textblock.splitlines(): - self.add_line(line) - - def add_object_docstring(self, obj): - obj_raw_docstring = obj.__doc__ or "" - - # Maybe it's within the __init__ - if not obj_raw_docstring and hasattr(obj, "__init__"): - if obj.__init__.__doc__: - obj_raw_docstring = obj.__init__.__doc__ - - if not obj_raw_docstring: - # Raise a warning to make the tests fail wit doc8 - raise self.error("No docstring available for %s!" % obj) - - obj_docstring = inspect.cleandoc(obj_raw_docstring) - self.add_textblock(obj_docstring) - - -class WatcherTerm(BaseWatcherDirective): - """Directive to import an RST formatted docstring into the Watcher glossary - - **How to use it** - - # inside your .py file - class DocumentedObject(object): - '''My *.rst* docstring''' - - - # Inside your .rst file - .. watcher-term:: import.path.to.your.DocumentedObject - - This directive will then import the docstring and then interpret it. - """ - - # You need to put an import path as an argument for this directive to work - required_arguments = 1 - - def run(self): - cls_path = self.arguments[0] - - try: - try: - cls = importlib.import_module(cls_path) - except ImportError: - module_name, cls_name = cls_path.rsplit('.', 1) - mod = importlib.import_module(module_name) - cls = getattr(mod, cls_name) - except Exception as exc: - raise self.error(exc) - - self.add_object_docstring(cls) - - node = nodes.paragraph() - node.document = self.state.document - self.state.nested_parse(self.result, 0, node) - return node.children - - -class WatcherFunc(BaseWatcherDirective): - """Directive to import a value returned by a func into the Watcher doc - - **How to use it** - - # inside your .py file - class Bar(object): - - def foo(object): - return foo_string - - - # Inside your .rst file - .. watcher-func:: import.path.to.your.Bar.foo node_classname - - node_classname is decumented here: - http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html - - This directive will then import the value and then interpret it. - """ - - # You need to put an import path as an argument for this directive to work - # required_arguments = 1 - # optional_arguments = 1 - - option_spec = {'format': rst.directives.unchanged} - has_content = True - - def run(self): - if not self.content: - error = self.state_machine.reporter.error( - 'The "%s" directive is empty; content required.' % self.name, - nodes.literal_block(self.block_text, self.block_text), - line=self.lineno) - return [error] - - func_path = self.content[0] - try: - cls_path, func_name = func_path.rsplit('.', 1) - module_name, cls_name = cls_path.rsplit('.', 1) - mod = importlib.import_module(module_name) - cls = getattr(mod, cls_name) - except Exception as exc: - raise self.error(exc) - - cls_obj = cls() - func = getattr(cls_obj, func_name) - textblock = func() - if not isinstance(textblock, str): - textblock = str(textblock) - - self.add_textblock(textblock) - - try: - node_class = getattr(nodes, - self.options.get('format', 'paragraph')) - except Exception as exc: - raise self.error(exc) - - node = node_class() - node.document = self.state.document - self.state.nested_parse(self.result, 0, node) - return [node] - - -def setup(app): - app.add_directive('watcher-term', WatcherTerm) - app.add_directive('watcher-func', WatcherFunc) - return {'version': version_info.version_string()} diff --git a/doc/ext/versioned_notifications.py b/doc/ext/versioned_notifications.py deleted file mode 100644 index 1a81079..0000000 --- a/doc/ext/versioned_notifications.py +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This provides a sphinx extension able to list the implemented versioned -notifications into the developer documentation. - -It is used via a single directive in the .rst file - - .. versioned_notifications:: - -""" - -from sphinx.util.compat import Directive -from docutils import nodes - -from watcher.notifications import base as notification -from watcher.objects import base - - -class VersionedNotificationDirective(Directive): - - SAMPLE_ROOT = 'doc/notification_samples/' - TOGGLE_SCRIPT = """ - -""" - - def run(self): - notifications = self._collect_notifications() - return self._build_markup(notifications) - - def _collect_notifications(self): - base.WatcherObjectRegistry.register_notification_objects() - notifications = [] - ovos = base.WatcherObjectRegistry.obj_classes() - for name, cls in ovos.items(): - cls = cls[0] - if (issubclass(cls, notification.NotificationBase) and - cls != notification.NotificationBase): - - payload_name = cls.fields['payload'].objname - payload_cls = ovos[payload_name][0] - for sample in cls.samples: - notifications.append((cls.__name__, - payload_cls.__name__, - sample)) - return sorted(notifications) - - def _build_markup(self, notifications): - content = [] - cols = ['Event type', 'Notification class', 'Payload class', 'Sample'] - table = nodes.table() - content.append(table) - group = nodes.tgroup(cols=len(cols)) - table.append(group) - - head = nodes.thead() - group.append(head) - - for _ in cols: - group.append(nodes.colspec(colwidth=1)) - - body = nodes.tbody() - group.append(body) - - # fill the table header - row = nodes.row() - body.append(row) - for col_name in cols: - col = nodes.entry() - row.append(col) - text = nodes.strong(text=col_name) - col.append(text) - - # fill the table content, one notification per row - for name, payload, sample_file in notifications: - event_type = sample_file[0: -5].replace('-', '.') - - row = nodes.row() - body.append(row) - col = nodes.entry() - row.append(col) - text = nodes.literal(text=event_type) - col.append(text) - - col = nodes.entry() - row.append(col) - text = nodes.literal(text=name) - col.append(text) - - col = nodes.entry() - row.append(col) - text = nodes.literal(text=payload) - col.append(text) - - col = nodes.entry() - row.append(col) - - with open(self.SAMPLE_ROOT + sample_file, 'r') as f: - sample_content = f.read() - - event_type = sample_file[0: -5] - html_str = self.TOGGLE_SCRIPT % ((event_type, ) * 3) - html_str += ("" % event_type) - html_str += ("
%s
" - % (event_type, sample_content)) - - raw = nodes.raw('', html_str, format="html") - col.append(raw) - - return content - - -def setup(app): - app.add_directive('versioned_notifications', - VersionedNotificationDirective) diff --git a/doc/notification_samples/action-create.json b/doc/notification_samples/action-create.json deleted file mode 100644 index c8dd7a4..0000000 --- a/doc/notification_samples/action-create.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionCreatePayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "PENDING", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "event_type": "action.create", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-delete.json b/doc/notification_samples/action-delete.json deleted file mode 100644 index dbc5ef9..0000000 --- a/doc/notification_samples/action-delete.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionDeletePayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "DELETED", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "event_type": "action.delete", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-execution-end.json b/doc/notification_samples/action-execution-end.json deleted file mode 100644 index 479a649..0000000 --- a/doc/notification_samples/action-execution-end.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionExecutionPayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "fault": null, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "SUCCEEDED", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.execution.end", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-execution-error.json b/doc/notification_samples/action-execution-error.json deleted file mode 100644 index 66e2371..0000000 --- a/doc/notification_samples/action-execution-error.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "priority": "ERROR", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionExecutionPayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "fault": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ExceptionPayload", - "watcher_object.data": { - "module_name": "watcher.tests.notifications.test_action_notification", - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": "test_send_action_execution_with_error" - } - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "FAILED", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.execution.error", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-execution-start.json b/doc/notification_samples/action-execution-start.json deleted file mode 100644 index ace78f9..0000000 --- a/doc/notification_samples/action-execution-start.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionExecutionPayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "fault": null, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.execution.start", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action-update.json b/doc/notification_samples/action-update.json deleted file mode 100644 index 3f4cbcb..0000000 --- a/doc/notification_samples/action-update.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionUpdatePayload", - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "input_parameters": { - "param2": 2, - "param1": 1 - }, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "ActionStateUpdatePayload", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - } - }, - "state": "ONGOING", - "action_plan": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.name": "TerseActionPlanPayload", - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "state": "ONGOING", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "deleted_at": null - } - }, - "parents": [], - "action_type": "nop", - "deleted_at": null - } - }, - "event_type": "action.update", - "publisher_id": "infra-optim:node0", - "timestamp": "2017-01-01 00:00:00.000000", - "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" -} diff --git a/doc/notification_samples/action_plan-create.json b/doc/notification_samples/action_plan-create.json deleted file mode 100644 index b3de9b7..0000000 --- a/doc/notification_samples/action_plan-create.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "publisher_id": "infra-optim:node0", - "payload": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "display_name": "test strategy", - "name": "TEST", - "updated_at": null, - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload" - }, - "created_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "audit_type": "ONESHOT", - "scope": [], - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "parameters": {}, - "interval": null, - "deleted_at": null, - "state": "PENDING", - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "global_efficacy": {}, - "deleted_at": null, - "state": "RECOMMENDED", - "updated_at": null - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanCreatePayload" - }, - "priority": "INFO", - "message_id": "5148bff1-ea06-4ad6-8e4e-8c85ca5eb629", - "event_type": "action_plan.create", - "timestamp": "2016-10-18 09:52:05.219414" -} diff --git a/doc/notification_samples/action_plan-delete.json b/doc/notification_samples/action_plan-delete.json deleted file mode 100644 index 29d0762..0000000 --- a/doc/notification_samples/action_plan-delete.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "publisher_id": "infra-optim:node0", - "timestamp": "2016-10-18 09:52:05.219414", - "payload": { - "watcher_object.data": { - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "created_at": "2016-10-18T09:52:05Z", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "interval": null, - "audit_type": "ONESHOT", - "scope": [], - "updated_at": null, - "deleted_at": null, - "state": "PENDING", - "created_at": "2016-10-18T09:52:05Z", - "parameters": {} - }, - "watcher_object.version": "1.0", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher" - }, - "global_efficacy": {}, - "updated_at": null, - "deleted_at": null, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.data": { - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "created_at": "2016-10-18T09:52:05Z", - "name": "TEST", - "display_name": "test strategy", - "deleted_at": null, - "updated_at": null, - "parameters_spec": {} - }, - "watcher_object.version": "1.0", - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher" - }, - "state": "DELETED" - }, - "watcher_object.version": "1.0", - "watcher_object.name": "ActionPlanDeletePayload", - "watcher_object.namespace": "watcher" - }, - "event_type": "action_plan.delete", - "message_id": "3d137686-a1fd-4683-ab40-c4210aac2140", - "priority": "INFO" -} diff --git a/doc/notification_samples/action_plan-execution-end.json b/doc/notification_samples/action_plan-execution-end.json deleted file mode 100644 index 2fee7f0..0000000 --- a/doc/notification_samples/action_plan-execution-end.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "event_type": "action_plan.execution.end", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "scope": [], - "audit_type": "ONESHOT", - "state": "SUCCEEDED", - "parameters": {}, - "interval": null, - "updated_at": null - } - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "fault": null, - "state": "ONGOING", - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "name": "TEST", - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "parameters_spec": {}, - "display_name": "test strategy", - "updated_at": null - } - }, - "updated_at": null - } - }, - "priority": "INFO", - "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", - "timestamp": "2016-10-18 09:52:05.219414", - "publisher_id": "infra-optim:node0" -} diff --git a/doc/notification_samples/action_plan-execution-error.json b/doc/notification_samples/action_plan-execution-error.json deleted file mode 100644 index 466c67f..0000000 --- a/doc/notification_samples/action_plan-execution-error.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "action_plan.execution.error", - "publisher_id": "infra-optim:node0", - "priority": "ERROR", - "message_id": "9a45c5ae-0e21-4300-8fa0-5555d52a66d9", - "payload": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.data": { - "fault": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "ExceptionPayload", - "watcher_object.data": { - "exception_message": "TEST", - "module_name": "watcher.tests.notifications.test_action_plan_notification", - "function_name": "test_send_action_plan_action_with_error", - "exception": "WatcherException" - } - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "created_at": "2016-10-18T09:52:05Z", - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload", - "watcher_object.data": { - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "created_at": "2016-10-18T09:52:05Z", - "name": "TEST", - "updated_at": null, - "display_name": "test strategy", - "parameters_spec": {}, - "deleted_at": null - } - }, - "updated_at": null, - "deleted_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.data": { - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "created_at": "2016-10-18T09:52:05Z", - "scope": [], - "updated_at": null, - "audit_type": "ONESHOT", - "interval": null, - "deleted_at": null, - "state": "PENDING" - } - }, - "global_efficacy": {}, - "state": "ONGOING" - } - }, - "timestamp": "2016-10-18 09:52:05.219414" -} diff --git a/doc/notification_samples/action_plan-execution-start.json b/doc/notification_samples/action_plan-execution-start.json deleted file mode 100644 index 7045162..0000000 --- a/doc/notification_samples/action_plan-execution-start.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "event_type": "action_plan.execution.start", - "payload": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "scope": [], - "audit_type": "ONESHOT", - "state": "PENDING", - "parameters": {}, - "interval": null, - "updated_at": null - } - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "fault": null, - "state": "ONGOING", - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": null, - "name": "TEST", - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "parameters_spec": {}, - "display_name": "test strategy", - "updated_at": null - } - }, - "updated_at": null - } - }, - "priority": "INFO", - "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", - "timestamp": "2016-10-18 09:52:05.219414", - "publisher_id": "infra-optim:node0" -} diff --git a/doc/notification_samples/action_plan-update.json b/doc/notification_samples/action_plan-update.json deleted file mode 100644 index 60f7eec..0000000 --- a/doc/notification_samples/action_plan-update.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "payload": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "audit_type": "ONESHOT", - "scope": [], - "created_at": "2016-10-18T09:52:05Z", - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "interval": null, - "updated_at": null, - "state": "PENDING", - "deleted_at": null, - "parameters": {} - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload" - }, - "created_at": "2016-10-18T09:52:05Z", - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "updated_at": null, - "state_update": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanStateUpdatePayload" - }, - "state": "ONGOING", - "deleted_at": null, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.version": "1.0", - "watcher_object.data": { - "name": "TEST", - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "display_name": "test strategy", - "created_at": "2016-10-18T09:52:05Z", - "updated_at": null, - "deleted_at": null, - "parameters_spec": {} - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "StrategyPayload" - }, - "global_efficacy": {} - }, - "watcher_object.namespace": "watcher", - "watcher_object.name": "ActionPlanUpdatePayload" - }, - "publisher_id": "infra-optim:node0", - "priority": "INFO", - "timestamp": "2016-10-18 09:52:05.219414", - "event_type": "action_plan.update", - "message_id": "0a8a7329-fd5a-4ec6-97d7-2b776ce51a4c" -} diff --git a/doc/notification_samples/audit-create.json b/doc/notification_samples/audit-create.json deleted file mode 100644 index dd655ea..0000000 --- a/doc/notification_samples/audit-create.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "PENDING", - "updated_at": null, - "deleted_at": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditCreatePayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.create", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-delete.json b/doc/notification_samples/audit-delete.json deleted file mode 100644 index 7527829..0000000 --- a/doc/notification_samples/audit-delete.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "DELETED", - "updated_at": null, - "deleted_at": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditDeletePayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.delete", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-planner-end.json b/doc/notification_samples/audit-planner-end.json deleted file mode 100644 index d3307c0..0000000 --- a/doc/notification_samples/audit-planner-end.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.planner.end", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-planner-error.json b/doc/notification_samples/audit-planner-error.json deleted file mode 100644 index d3b1635..0000000 --- a/doc/notification_samples/audit-planner-error.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "priority": "ERROR", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": "test_send_audit_action_with_error", - "module_name": "watcher.tests.notifications.test_audit_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.planner.error", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-planner-start.json b/doc/notification_samples/audit-planner-start.json deleted file mode 100644 index 93644dd..0000000 --- a/doc/notification_samples/audit-planner-start.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.planner.start", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-strategy-end.json b/doc/notification_samples/audit-strategy-end.json deleted file mode 100644 index 3874fbf..0000000 --- a/doc/notification_samples/audit-strategy-end.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.strategy.end", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-strategy-error.json b/doc/notification_samples/audit-strategy-error.json deleted file mode 100644 index 4c6fd18..0000000 --- a/doc/notification_samples/audit-strategy-error.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "priority": "ERROR", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": "test_send_audit_action_with_error", - "module_name": "watcher.tests.notifications.test_audit_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.strategy.error", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-strategy-start.json b/doc/notification_samples/audit-strategy-start.json deleted file mode 100644 index 43322a7..0000000 --- a/doc/notification_samples/audit-strategy-start.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "state": "ONGOING", - "updated_at": null, - "deleted_at": null, - "fault": null, - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.data": { - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "name": "dummy", - "updated_at": null, - "deleted_at": null, - "efficacy_specification": [], - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy goal" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "interval": null, - "scope": [], - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.data": { - "parameters_spec": { - "properties": { - "para2": { - "type": "string", - "default": "hello", - "description": "string parameter example" - }, - "para1": { - "description": "number parameter example", - "maximum": 10.2, - "type": "number", - "default": 3.2, - "minimum": 1.0 - } - } - }, - "name": "dummy", - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "updated_at": null, - "deleted_at": null, - "created_at": "2016-11-04T16:25:35Z", - "display_name": "Dummy strategy" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "created_at": "2016-11-04T16:29:20Z", - "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.version": "1.0", - "watcher_object.namespace": "watcher" - }, - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:31:36.264673 ", - "event_type": "audit.strategy.start", - "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" -} diff --git a/doc/notification_samples/audit-update.json b/doc/notification_samples/audit-update.json deleted file mode 100644 index 3dc4b0b..0000000 --- a/doc/notification_samples/audit-update.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "publisher_id": "infra-optim:localhost", - "timestamp": "2016-11-04 16:51:38.722986 ", - "payload": { - "watcher_object.name": "AuditUpdatePayload", - "watcher_object.data": { - "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "strategy": { - "watcher_object.name": "StrategyPayload", - "watcher_object.data": { - "name": "dummy", - "parameters_spec": { - "properties": { - "para2": { - "default": "hello", - "type": "string", - "description": "string parameter example" - }, - "para1": { - "maximum": 10.2, - "default": 3.2, - "minimum": 1.0, - "description": "number parameter example", - "type": "number" - } - } - }, - "updated_at": null, - "display_name": "Dummy strategy", - "deleted_at": null, - "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", - "created_at": "2016-11-04T16:25:35Z" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "scope": [], - "created_at": "2016-11-04T16:51:21Z", - "uuid": "f1e0d912-afd9-4bf2-91ef-c99cd08cc1ef", - "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "goal": { - "watcher_object.name": "GoalPayload", - "watcher_object.data": { - "efficacy_specification": [], - "updated_at": null, - "name": "dummy", - "display_name": "Dummy goal", - "deleted_at": null, - "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", - "created_at": "2016-11-04T16:25:35Z" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "parameters": { - "para2": "hello", - "para1": 3.2 - }, - "deleted_at": null, - "state_update": { - "watcher_object.name": "AuditStateUpdatePayload", - "watcher_object.data": { - "state": "ONGOING", - "old_state": "PENDING" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "interval": null, - "updated_at": null, - "state": "ONGOING", - "audit_type": "ONESHOT" - }, - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "priority": "INFO", - "event_type": "audit.update", - "message_id": "697fdf55-7252-4b6c-a2c2-5b9e85f6342c" -} diff --git a/doc/notification_samples/infra-optim-exception.json b/doc/notification_samples/infra-optim-exception.json deleted file mode 100644 index 0793312..0000000 --- a/doc/notification_samples/infra-optim-exception.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "event_type": "infra-optim.exception", - "payload": { - "watcher_object.data": { - "exception": "NoAvailableStrategyForGoal", - "exception_message": "No strategy could be found to achieve the server_consolidation goal.", - "function_name": "_aggregate_create_in_db", - "module_name": "watcher.objects.aggregate" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "priority": "ERROR", - "publisher_id": "watcher-api:fake-mini" -} diff --git a/doc/notification_samples/service-update.json b/doc/notification_samples/service-update.json deleted file mode 100644 index 1f61e58..0000000 --- a/doc/notification_samples/service-update.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "payload": { - "watcher_object.name": "ServiceUpdatePayload", - "watcher_object.namespace": "watcher", - "watcher_object.data": { - "status_update": { - "watcher_object.name": "ServiceStatusUpdatePayload", - "watcher_object.namespace": "watcher", - "watcher_object.data": { - "old_state": "ACTIVE", - "state": "FAILED" - }, - "watcher_object.version": "1.0" - }, - "last_seen_up": "2016-09-22T08:32:06Z", - "name": "watcher-service", - "sevice_host": "controller" - }, - "watcher_object.version": "1.0" - }, - "event_type": "service.update", - "priority": "INFO", - "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", - "timestamp": "2016-10-18 09:52:05.219414", - "publisher_id": "infra-optim:node0" -} \ No newline at end of file diff --git a/doc/source/admin/apache-mod-wsgi.rst b/doc/source/admin/apache-mod-wsgi.rst deleted file mode 100644 index c0b6347..0000000 --- a/doc/source/admin/apache-mod-wsgi.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - - -Installing API behind mod_wsgi -============================== - -#. Install the Apache Service:: - - Fedora 21/RHEL7/CentOS7: - sudo yum install httpd - - Fedora 22 (or higher): - sudo dnf install httpd - - Debian/Ubuntu: - apt-get install apache2 - -#. Copy ``etc/apache2/watcher.conf`` under the apache sites:: - - Fedora/RHEL7/CentOS7: - sudo cp etc/apache2/watcher /etc/httpd/conf.d/watcher.conf - - Debian/Ubuntu: - sudo cp etc/apache2/watcher /etc/apache2/sites-available/watcher.conf - -#. Edit ``/watcher.conf`` according to installation - and environment. - - * Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and - ``group`` values to appropriate user on your server. - * Modify the ``WSGIScriptAlias`` directive to point to the - watcher/api/app.wsgi script. - * Modify the ``Directory`` directive to set the path to the Watcher API - code. - * Modify the ``ErrorLog and CustomLog`` to redirect the logs to the right - directory. - -#. Enable the apache watcher site and reload:: - - Fedora/RHEL7/CentOS7: - sudo systemctl reload httpd - - Debian/Ubuntu: - sudo a2ensite watcher - sudo service apache2 reload diff --git a/doc/source/admin/conf-files.rst b/doc/source/admin/conf-files.rst deleted file mode 100644 index 792bc2c..0000000 --- a/doc/source/admin/conf-files.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _watcher_sample_configuration_files: - -================================== -Watcher sample configuration files -================================== - -watcher.conf -~~~~~~~~~~~~ - -The ``watcher.conf`` file contains most of the options to configure the -Watcher services. - -.. literalinclude:: ../watcher.conf.sample - :language: ini diff --git a/doc/source/admin/configuration.rst b/doc/source/admin/configuration.rst deleted file mode 100644 index d379156..0000000 --- a/doc/source/admin/configuration.rst +++ /dev/null @@ -1,460 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -=================== -Configuring Watcher -=================== - -This document is continually updated and reflects the latest -available code of the Watcher service. - -Service overview -================ - -The Watcher system is a collection of services that provides support to -optimize your IAAS platform. The Watcher service may, depending upon -configuration, interact with several other OpenStack services. This includes: - -- the OpenStack Identity service (`keystone`_) for request authentication and - to locate other OpenStack services -- the OpenStack Telemetry service (`ceilometer`_) for consuming the resources - metrics -- the OpenStack Compute service (`nova`_) works with the Watcher service and - acts as a user-facing API for instance migration. - -The Watcher service includes the following components: - -- ``watcher-decision-engine``: runs audit on part of your IAAS and return an - action plan in order to optimize resource placement. -- ``watcher-api``: A RESTful API that processes application requests by sending - them to the watcher-decision-engine over RPC. -- ``watcher-applier``: applies the action plan. -- `python-watcherclient`_: A command-line interface (CLI) for interacting with - the Watcher service. -- `watcher-dashboard`_: An Horizon plugin for interacting with the Watcher - service. - -Additionally, the Watcher service has certain external dependencies, which -are very similar to other OpenStack services: - -- A database to store audit and action plan information and state. You can set - the database back-end type and location. -- A queue. A central hub for passing messages, such as `RabbitMQ`_. - -Optionally, one may wish to utilize the following associated projects for -additional functionality: - -- `watcher metering`_: an alternative to collect and push metrics to the - Telemetry service. - -.. _`keystone`: https://github.com/openstack/keystone -.. _`ceilometer`: https://github.com/openstack/ceilometer -.. _`nova`: https://github.com/openstack/nova -.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient -.. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard -.. _`watcher metering`: https://github.com/b-com/watcher-metering -.. _`RabbitMQ`: https://www.rabbitmq.com/ - -Install and configure prerequisites -=================================== - -You can configure Watcher services to run on separate nodes or the same node. -In this guide, the components run on one node, typically the Controller node. - -This section shows you how to install and configure the services. - -It assumes that the Identity, Image, Compute, and Networking services -have already been set up. - -.. _identity-service_configuration: - -Configure the Identity service for the Watcher service ------------------------------------------------------- - -#. Create the Watcher service user (eg ``watcher``). The service uses this to - authenticate with the Identity Service. Use the - ``KEYSTONE_SERVICE_PROJECT_NAME`` project (named ``service`` by default in - devstack) and give the user the ``admin`` role: - - .. code-block:: bash - - $ keystone user-create --name=watcher --pass=WATCHER_PASSWORD \ - --email=watcher@example.com \ - --tenant=KEYSTONE_SERVICE_PROJECT_NAME - $ keystone user-role-add --user=watcher \ - --tenant=KEYSTONE_SERVICE_PROJECT_NAME --role=admin - - or (by using python-openstackclient 1.8.0+) - - .. code-block:: bash - - $ openstack user create --password WATCHER_PASSWORD --enable \ - --email watcher@example.com watcher \ - --project=KEYSTONE_SERVICE_PROJECT_NAME - $ openstack role add --project KEYSTONE_SERVICE_PROJECT_NAME \ - --user watcher admin - - -#. You must register the Watcher Service with the Identity Service so that - other OpenStack services can locate it. To register the service: - - .. code-block:: bash - - $ keystone service-create --name=watcher --type=infra-optim \ - --description="Infrastructure Optimization service" - - or (by using python-openstackclient 1.8.0+) - - .. code-block:: bash - - $ openstack service create --name watcher infra-optim \ - --description="Infrastructure Optimization service" - -#. Create the endpoints by replacing YOUR_REGION and - ``WATCHER_API_[PUBLIC|ADMIN|INTERNAL]_IP`` with your region and your - Watcher Service's API node IP addresses (or FQDN): - - .. code-block:: bash - - $ keystone endpoint-create \ - --service-id=the_service_id_above \ - --publicurl=http://WATCHER_API_PUBLIC_IP:9322 \ - --internalurl=http://WATCHER_API_INTERNAL_IP:9322 \ - --adminurl=http://WATCHER_API_ADMIN_IP:9322 - - or (by using python-openstackclient 1.8.0+) - - .. code-block:: bash - - $ openstack endpoint create --region YOUR_REGION watcher \ - --publicurl http://WATCHER_API_PUBLIC_IP:9322 \ - --internalurl http://WATCHER_API_INTERNAL_IP:9322 \ - --adminurl http://WATCHER_API_ADMIN_IP:9322 - -.. _watcher-db_configuration: - -Set up the database for Watcher -------------------------------- - -The Watcher service stores information in a database. This guide uses the -MySQL database that is used by other OpenStack services. - -#. In MySQL, create a ``watcher`` database that is accessible by the - ``watcher`` user. Replace WATCHER_DBPASSWORD - with the actual password:: - - $ mysql -u root -p - - mysql> CREATE DATABASE watcher CHARACTER SET utf8; - mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ - IDENTIFIED BY 'WATCHER_DBPASSWORD'; - mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ - IDENTIFIED BY 'WATCHER_DBPASSWORD'; - - -Configure the Watcher service -============================= - -The Watcher service is configured via its configuration file. This file -is typically located at ``/etc/watcher/watcher.conf``. - -You can easily generate and update a sample configuration file -named :ref:`watcher.conf.sample ` by using -these following commands:: - - $ git clone git://git.openstack.org/openstack/watcher - $ cd watcher/ - $ tox -e genconfig - $ vi etc/watcher/watcher.conf.sample - - -The configuration file is organized into the following sections: - -* ``[DEFAULT]`` - General configuration -* ``[api]`` - API server configuration -* ``[database]`` - SQL driver configuration -* ``[keystone_authtoken]`` - Keystone Authentication plugin configuration -* ``[watcher_clients_auth]`` - Keystone auth configuration for clients -* ``[watcher_applier]`` - Watcher Applier module configuration -* ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration -* ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration -* ``[ceilometer_client]`` - Ceilometer client configuration -* ``[cinder_client]`` - Cinder client configuration -* ``[glance_client]`` - Glance client configuration -* ``[nova_client]`` - Nova client configuration -* ``[neutron_client]`` - Neutron client configuration - -The Watcher configuration file is expected to be named -``watcher.conf``. When starting Watcher, you can specify a different -configuration file to use with ``--config-file``. If you do **not** specify a -configuration file, Watcher will look in the following directories for a -configuration file, in order: - -* ``~/.watcher/`` -* ``~/`` -* ``/etc/watcher/`` -* ``/etc/`` - - -Although some configuration options are mentioned here, it is recommended that -you review all the `available options -`_ -so that the watcher service is configured for your needs. - -#. The Watcher Service stores information in a database. This guide uses the - MySQL database that is used by other OpenStack services. - - Configure the location of the database via the ``connection`` option. In the - following, replace WATCHER_DBPASSWORD with the password of your ``watcher`` - user, and replace DB_IP with the IP address where the DB server is located:: - - [database] - ... - - # The SQLAlchemy connection string used to connect to the - # database (string value) - #connection= - connection = mysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8 - -#. Configure the Watcher Service to use the RabbitMQ message broker by - setting one or more of these options. Replace RABBIT_HOST with the - IP address of the RabbitMQ server, RABBITMQ_USER and RABBITMQ_PASSWORD - by the RabbitMQ server login credentials :: - - [DEFAULT] - - # The messaging driver to use, defaults to rabbit. Other drivers - # include qpid and zmq. (string value) - #rpc_backend = rabbit - - # The default exchange under which topics are scoped. May be - # overridden by an exchange name specified in the transport_url - # option. (string value) - control_exchange = watcher - - ... - - [oslo_messaging_rabbit] - - # The username used by the message broker (string value) - rabbit_userid = RABBITMQ_USER - - # The password of user used by the message broker (string value) - rabbit_password = RABBITMQ_PASSWORD - - # The host where the message brokeris installed (string value) - rabbit_host = RABBIT_HOST - - # The port used bythe message broker (string value) - #rabbit_port = 5672 - - -#. Watcher API shall validate the token provided by every incoming request, - via keystonemiddleware, which requires the Watcher service to be configured - with the right credentials for the Identity service. - - In the configuration section here below: - - * replace IDENTITY_IP with the IP of the Identity server - * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` - user - * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created - for OpenStack services (e.g. ``service``) :: - - [keystone_authtoken] - - # Authentication type to load (unknown value) - # Deprecated group/name - [DEFAULT]/auth_plugin - #auth_type = - auth_type = password - - # Authentication URL (unknown value) - #auth_url = - auth_url = http://IDENTITY_IP:35357 - - # Username (unknown value) - # Deprecated group/name - [DEFAULT]/username - #username = - username=watcher - - # User's password (unknown value) - #password = - password = WATCHER_PASSWORD - - # Domain ID containing project (unknown value) - #project_domain_id = - project_domain_id = default - - # User's domain id (unknown value) - #user_domain_id = - user_domain_id = default - - # Project name to scope to (unknown value) - # Deprecated group/name - [DEFAULT]/tenant-name - #project_name = - project_name = KEYSTONE_SERVICE_PROJECT_NAME - -#. Watcher's decision engine and applier interact with other OpenStack - projects through those projects' clients. In order to instantiate these - clients, Watcher needs to request a new session from the Identity service - using the right credentials. - - In the configuration section here below: - - * replace IDENTITY_IP with the IP of the Identity server - * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` - user - * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created - for OpenStack services (e.g. ``service``) :: - - [watcher_clients_auth] - - # Authentication type to load (unknown value) - # Deprecated group/name - [DEFAULT]/auth_plugin - #auth_type = - auth_type = password - - # Authentication URL (unknown value) - #auth_url = - auth_url = http://IDENTITY_IP:35357 - - # Username (unknown value) - # Deprecated group/name - [DEFAULT]/username - #username = - username=watcher - - # User's password (unknown value) - #password = - password = WATCHER_PASSWORD - - # Domain ID containing project (unknown value) - #project_domain_id = - project_domain_id = default - - # User's domain id (unknown value) - #user_domain_id = - user_domain_id = default - - # Project name to scope to (unknown value) - # Deprecated group/name - [DEFAULT]/tenant-name - #project_name = - project_name = KEYSTONE_SERVICE_PROJECT_NAME - -#. Configure the clients to use a specific version if desired. For example, to - configure Watcher to use a Nova client with version 2.1, use:: - - [nova_client] - - # Version of Nova API to use in novaclient. (string value) - #api_version = 2 - api_version = 2.1 - -#. Create the Watcher Service database tables:: - - $ watcher-db-manage --config-file /etc/watcher/watcher.conf create_schema - -#. Start the Watcher Service:: - - $ watcher-api && watcher-decision-engine && watcher-applier - -Configure Nova compute -====================== - -Please check your hypervisor configuration to correctly handle -`instance migration`_. - -.. _`instance migration`: http://docs.openstack.org/admin-guide/compute-live-migration-usage.html - -Configure Measurements -====================== - -You can configure and install Ceilometer by following the documentation below : - -#. http://docs.openstack.org/developer/ceilometer -#. http://docs.openstack.org/kilo/install-guide/install/apt/content/ceilometer-nova.html - -The built-in strategy 'basic_consolidation' provided by watcher requires -"**compute.node.cpu.percent**" and "**cpu_util**" measurements to be collected -by Ceilometer. -The measurements available depend on the hypervisors that OpenStack manages on -the specific implementation. -You can find the measurements available per hypervisor and OpenStack release on -the OpenStack site. -You can use 'ceilometer meter-list' to list the available meters. - -For more information: -http://docs.openstack.org/developer/ceilometer/measurements.html - -Ceilometer is designed to collect measurements from OpenStack services and from -other external components. If you would like to add new meters to the currently -existing ones, you need to follow the documentation below: - -#. http://docs.openstack.org/developer/ceilometer/new_meters.html - -The Ceilometer collector uses a pluggable storage system, meaning that you can -pick any database system you prefer. -The original implementation has been based on MongoDB but you can create your -own storage driver using whatever technology you want. -For more information : https://wiki.openstack.org/wiki/Gnocchi - - -Configure Nova Notifications -============================ - -Watcher can consume notifications generated by the Nova services, in order to -build or update, in real time, its cluster data model related to computing -resources. - -Nova publishes, by default, notifications on ``notifications`` AMQP queue -(configurable) and ``versioned_notifications`` AMQP queue (not -configurable). ``notifications`` queue is mainly used by ceilometer, so we can -not use it. And some events, related to nova-compute service state, are only -sent into the ``versioned_notifications`` queue. - -By default, Watcher listens to AMQP queues named ``watcher_notifications`` -and ``versioned_notifications``. So you have to update the Nova -configuration file on controller and compute nodes, in order -to Watcher receives Nova notifications in ``watcher_notifications`` as well. - - * In the file ``/etc/nova/nova.conf``, update the section - ``[oslo_messaging_notifications]``, by redefining the list of topics - into which Nova services will publish events :: - - [oslo_messaging_notifications] - driver = messagingv2 - topics = notifications,watcher_notifications - - * Restart the Nova services. - - -Workers -======= - -You can define a number of workers for the Decision Engine and the Applier. - -If you want to create and run more audits simultaneously, you have to raise -the number of workers used by the Decision Engine:: - - [watcher_decision_engine] - - ... - - # The maximum number of threads that can be used to execute strategies - # (integer value) - #max_workers = 2 - - -If you want to execute simultaneously more recommended action plans, you -have to raise the number of workers used by the Applier:: - - [watcher_applier] - - ... - - # Number of workers for applier, default value is 1. (integer value) - # Minimum value: 1 - #workers = 1 - diff --git a/doc/source/admin/gmr.rst b/doc/source/admin/gmr.rst deleted file mode 100644 index 31ea4f5..0000000 --- a/doc/source/admin/gmr.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher_gmr: - -======================= -Guru Meditation Reports -======================= - -Watcher contains a mechanism whereby developers and system administrators can -generate a report about the state of a running Watcher service. This report -is called a *Guru Meditation Report* (*GMR* for short). - -Generating a GMR -================ - -A *GMR* can be generated by sending the *USR2* signal to any Watcher process -with support (see below). The *GMR* will then be outputted as standard error -for that particular process. - -For example, suppose that ``watcher-api`` has process id ``8675``, and was run -with ``2>/var/log/watcher/watcher-api-err.log``. Then, ``kill -USR2 8675`` -will trigger the Guru Meditation report to be printed to -``/var/log/watcher/watcher-api-err.log``. - -Structure of a GMR -================== - -The *GMR* is designed to be extensible; any particular service may add its -own sections. However, the base *GMR* consists of several sections: - -Package - Shows information about the package to which this process belongs, including - version informations. - -Threads - Shows stack traces and thread ids for each of the threads within this - process. - -Green Threads - Shows stack traces for each of the green threads within this process (green - threads don't have thread ids). - -Configuration - Lists all the configuration options currently accessible via the CONF object - for the current process. - -Plugins - Lists all the plugins currently accessible by the Watcher service. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst deleted file mode 100644 index 62729ea..0000000 --- a/doc/source/admin/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -=================== -Administrator Guide -=================== - -.. toctree:: - :maxdepth: 2 - - apache-mod-wsgi - conf-files - configuration - gmr - policy - ways-to-install - ../strategies/index diff --git a/doc/source/admin/policy.rst b/doc/source/admin/policy.rst deleted file mode 100644 index 458c812..0000000 --- a/doc/source/admin/policy.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. - Copyright 2016 OpenStack Foundation - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Policies -======== - -Watcher's public API calls may be restricted to certain sets of users using a -policy configuration file. This document explains exactly how policies are -configured and what they apply to. - -A policy is composed of a set of rules that are used in determining if a -particular action may be performed by the authorized tenant. - -Constructing a Policy Configuration File ----------------------------------------- - -A policy configuration file is a simply JSON object that contain sets of -rules. Each top-level key is the name of a rule. Each rule -is a string that describes an action that may be performed in the Watcher API. - -The actions that may have a rule enforced on them are: - -* ``strategy:get_all``, ``strategy:detail`` - List available strategies - - * ``GET /v1/strategies`` - * ``GET /v1/strategies/detail`` - -* ``strategy:get`` - Retrieve a specific strategy entity - - * ``GET /v1/strategies/`` - * ``GET /v1/strategies/`` - - -* ``goal:get_all``, ``goal:detail`` - List available goals - - * ``GET /v1/goals`` - * ``GET /v1/goals/detail`` - -* ``goal:get`` - Retrieve a specific goal entity - - * ``GET /v1/goals/`` - * ``GET /v1/goals/`` - - -* ``audit_template:get_all``, ``audit_template:detail`` - List available - audit_templates - - * ``GET /v1/audit_templates`` - * ``GET /v1/audit_templates/detail`` - -* ``audit_template:get`` - Retrieve a specific audit template entity - - * ``GET /v1/audit_templates/`` - * ``GET /v1/audit_templates/`` - -* ``audit_template:create`` - Create an audit template entity - - * ``POST /v1/audit_templates`` - -* ``audit_template:delete`` - Delete an audit template entity - - * ``DELETE /v1/audit_templates/`` - * ``DELETE /v1/audit_templates/`` - -* ``audit_template:update`` - Update an audit template entity - - * ``PATCH /v1/audit_templates/`` - * ``PATCH /v1/audit_templates/`` - - -* ``audit:get_all``, ``audit:detail`` - List available audits - - * ``GET /v1/audits`` - * ``GET /v1/audits/detail`` - -* ``audit:get`` - Retrieve a specific audit entity - - * ``GET /v1/audits/`` - -* ``audit:create`` - Create an audit entity - - * ``POST /v1/audits`` - -* ``audit:delete`` - Delete an audit entity - - * ``DELETE /v1/audits/`` - -* ``audit:update`` - Update an audit entity - - * ``PATCH /v1/audits/`` - - -* ``action_plan:get_all``, ``action_plan:detail`` - List available action plans - - * ``GET /v1/action_plans`` - * ``GET /v1/action_plans/detail`` - -* ``action_plan:get`` - Retrieve a specific action plan entity - - * ``GET /v1/action_plans/`` - -* ``action_plan:delete`` - Delete an action plan entity - - * ``DELETE /v1/action_plans/`` - -* ``action_plan:update`` - Update an action plan entity - - * ``PATCH /v1/audits/`` - - -* ``action:get_all``, ``action:detail`` - List available action - - * ``GET /v1/actions`` - * ``GET /v1/actions/detail`` - -* ``action:get`` - Retrieve a specific action plan entity - - * ``GET /v1/actions/`` - - - -To limit an action to a particular role or roles, you list the roles like so :: - - { - "audit:create": ["role:admin", "role:superuser"] - } - -The above would add a rule that only allowed users that had roles of either -"admin" or "superuser" to launch an audit. diff --git a/doc/source/admin/ways-to-install.rst b/doc/source/admin/ways-to-install.rst deleted file mode 100644 index 43bd888..0000000 --- a/doc/source/admin/ways-to-install.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======================= -Ways to install Watcher -======================= - -This document describes some ways to install Watcher in order to use it. -If you are intending to develop on or with Watcher, -please read :doc:`../dev/environment`. - -Prerequisites -------------- - -The source install instructions specifically avoid using platform specific -packages, instead using the source for the code and the Python Package Index -(PyPi_). - -.. _PyPi: https://pypi.python.org/pypi - -It's expected that your system already has python2.7_, latest version of pip_, -and git_ available. - -.. _python2.7: https://www.python.org -.. _pip: https://pip.pypa.io/en/latest/installing/ -.. _git: https://git-scm.com/ - -Your system shall also have some additional system libraries: - - On Ubuntu (tested on 14.04LTS): - - .. code-block:: bash - - $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev - - On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux - (tested on CentOS 7.1): - - .. code-block:: bash - - $ sudo yum install gcc python-devel openssl-devel libffi-devel mysql-devel - - -Installing from Source ----------------------- - -Clone the Watcher repository: - -.. code-block:: bash - - $ git clone https://git.openstack.org/openstack/watcher.git - $ cd watcher - -Install the Watcher modules: - -.. code-block:: bash - - # python setup.py install - -The following commands should be available on the command-line path: - -* ``watcher-api`` the Watcher Web service used to handle RESTful requests -* ``watcher-decision-engine`` the Watcher Decision Engine used to build action - plans, according to optimization goals to achieve. -* ``watcher-applier`` the Watcher Applier module, used to apply action plan -* ``watcher-db-manage`` used to bootstrap Watcher data - -You will find sample configuration files in ``etc/watcher``: - -* ``watcher.conf.sample`` - -Install the Watcher modules dependencies: - -.. code-block:: bash - - # pip install -r requirements.txt - -From here, refer to :doc:`configuration` to declare Watcher as a new service -into Keystone and to configure its different modules. Once configured, you -should be able to run the Watcher services by issuing these commands: - -.. code-block:: bash - - $ watcher-api - $ watcher-decision-engine - $ watcher-applier - -By default, this will show logging on the console from which it was started. -Once started, you can use the `Watcher Client`_ to play with Watcher service. - -.. _`Watcher Client`: https://git.openstack.org/cgit/openstack/python-watcherclient - -Installing from packages: PyPI --------------------------------- - -Watcher package is available on PyPI repository. To install Watcher on your -system: - -.. code-block:: bash - - $ sudo pip install python-watcher - -The Watcher services along with its dependencies should then be automatically -installed on your system. - -Once installed, you still need to declare Watcher as a new service into -Keystone and to configure its different modules, which you can find described -in :doc:`configuration`. - - -Installing from packages: Debian (experimental) ------------------------------------------------ - -Experimental Debian packages are available on `Debian repositories`_. The best -way to use them is to install them into a Docker_ container. - -Here is single Dockerfile snippet you can use to run your Docker container: - -.. code-block:: bash - - FROM debian:experimental - MAINTAINER David TARDIVEL - - RUN apt-get update - RUN apt-get dist-upgrade -y - RUN apt-get install -y vim net-tools - RUN apt-get install -yt experimental watcher-api - - CMD ["/usr/bin/watcher-api"] - -Build your container from this Dockerfile: - -.. code-block:: bash - - $ docker build -t watcher/api . - -To run your container, execute this command: - -.. code-block:: bash - - $ docker run -d -p 9322:9322 watcher/api - -Check in your logs Watcher API is started - -.. code-block:: bash - - $ docker logs - -You can run similar container with Watcher Decision Engine (package -``watcher-decision-engine``) and with the Watcher Applier (package -``watcher-applier``). - -.. _Docker: https://www.docker.com/ -.. _`Debian repositories`: https://packages.debian.org/experimental/allpackages - - - - - diff --git a/doc/source/api/index.rst b/doc/source/api/index.rst deleted file mode 100644 index fd0ecfd..0000000 --- a/doc/source/api/index.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - v1 diff --git a/doc/source/api/v1.rst b/doc/source/api/v1.rst deleted file mode 100644 index b702137..0000000 --- a/doc/source/api/v1.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -==================== -RESTful Web API (v1) -==================== - -Goals -===== - -.. rest-controller:: watcher.api.controllers.v1.goal:GoalsController - :webprefix: /v1/goal - -.. autotype:: watcher.api.controllers.v1.goal.GoalCollection - :members: - -.. autotype:: watcher.api.controllers.v1.goal.Goal - :members: - -Strategies -========== - -.. rest-controller:: watcher.api.controllers.v1.strategy:StrategiesController - :webprefix: /v1/strategies - -.. autotype:: watcher.api.controllers.v1.strategy.StrategyCollection - :members: - -.. autotype:: watcher.api.controllers.v1.strategy.Strategy - :members: - -Audit Templates -=============== - -.. rest-controller:: watcher.api.controllers.v1.audit_template:AuditTemplatesController - :webprefix: /v1/audit_templates - -.. autotype:: watcher.api.controllers.v1.audit_template.AuditTemplateCollection - :members: - -.. autotype:: watcher.api.controllers.v1.audit_template.AuditTemplate - :members: - -Audits -====== - -.. rest-controller:: watcher.api.controllers.v1.audit:AuditsController - :webprefix: /v1/audits - -.. autotype:: watcher.api.controllers.v1.audit.AuditCollection - :members: - -.. autotype:: watcher.api.controllers.v1.audit.Audit - :members: - -Links -===== - -.. autotype:: watcher.api.controllers.link.Link - :members: - -Action Plans -============ - -.. rest-controller:: watcher.api.controllers.v1.action_plan:ActionPlansController - :webprefix: /v1/action_plans - -.. autotype:: watcher.api.controllers.v1.action_plan.ActionPlanCollection - :members: - -.. autotype:: watcher.api.controllers.v1.action_plan.ActionPlan - :members: - - -Actions -======= - -.. rest-controller:: watcher.api.controllers.v1.action:ActionsController - :webprefix: /v1/actions - -.. autotype:: watcher.api.controllers.v1.action.ActionCollection - :members: - -.. autotype:: watcher.api.controllers.v1.action.Action - :members: diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100644 index b903a97..0000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,464 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _architecture: - -=================== -System Architecture -=================== - - -This page presents the current technical Architecture of the Watcher system. - -.. _architecture_overview: - -Overview -======== - -Below you will find a diagram, showing the main components of Watcher: - -.. image:: ./images/architecture.svg - :width: 110% - - -.. _components_definition: - -Components -========== - -.. _amqp_bus_definition: - -AMQP Bus --------- - -The AMQP message bus handles internal asynchronous communications between the -different Watcher components. - -.. _cluster_datasource_definition: - -Datasource ----------- - -This component stores the metrics related to the cluster. - -It can potentially rely on any appropriate storage system (InfluxDB, OpenTSDB, -MongoDB,...) but will probably be more performant when using -`Time Series Databases `_ -which are optimized for handling time series data, which are arrays of numbers -indexed by time (a datetime or a datetime range). - -.. _archi_watcher_api_definition: - -Watcher API ------------ - -This component implements the REST API provided by the Watcher system to the -external world. - -It enables the :ref:`Administrator ` of a -:ref:`Cluster ` to control and monitor the Watcher system -via any interaction mechanism connected to this API: - -- :ref:`CLI ` -- Horizon plugin -- Python SDK - -You can also read the detailed description of `Watcher API`_. - -.. _archi_watcher_applier_definition: - -Watcher Applier ---------------- - -This component is in charge of executing the -:ref:`Action Plan ` built by the -:ref:`Watcher Decision Engine `. - -It connects to the :ref:`message bus ` and launches the -:ref:`Action Plan ` whenever a triggering message is -received on a dedicated AMQP queue. - -The triggering message contains the Action Plan UUID. - -It then gets the detailed information about the -:ref:`Action Plan ` from the -:ref:`Watcher Database ` which contains the list -of :ref:`Actions ` to launch. - -It then loops on each :ref:`Action `, gets the associated -class and calls the execute() method of this class. -Most of the time, this method will first request a token to the Keystone API -and if it is allowed, sends a request to the REST API of the OpenStack service -which handles this kind of :ref:`atomic Action `. - -Note that as soon as :ref:`Watcher Applier ` starts -handling a given :ref:`Action ` from the list, a -notification message is sent on the :ref:`message bus ` -indicating that the state of the action has changed to **ONGOING**. - -If the :ref:`Action ` is successful, -the :ref:`Watcher Applier ` sends a notification -message on :ref:`the bus ` informing the other components -of this. - - -If the :ref:`Action ` fails, the -:ref:`Watcher Applier ` tries to rollback to the -previous state of the :ref:`Managed resource ` -(i.e. before the command was sent to the underlying OpenStack service). - -.. _archi_watcher_cli_definition: - -Watcher CLI ------------ - -The watcher command-line interface (CLI) can be used to interact with the -Watcher system in order to control it or to know its current status. - -Please, read `the detailed documentation about Watcher CLI -`_. - -.. _archi_watcher_dashboard_definition: - -Watcher Dashboard ------------------ - -The Watcher Dashboard can be used to interact with the Watcher system through -Horizon in order to control it or to know its current status. - -Please, read `the detailed documentation about Watcher Dashboard -`_. - -.. _archi_watcher_database_definition: - -Watcher Database ----------------- - -This database stores all the Watcher domain objects which can be requested -by the :ref:`Watcher API ` or the -:ref:`Watcher CLI `: - -- :ref:`Goals ` -- :ref:`Strategies ` -- :ref:`Audit templates ` -- :ref:`Audits ` -- :ref:`Action plans ` -- :ref:`Efficacy indicators ` via the Action - Plan API. -- :ref:`Actions ` - -The Watcher domain being here "*optimization of some resources provided by an -OpenStack system*". - -.. _archi_watcher_decision_engine_definition: - -Watcher Decision Engine ------------------------ - -This component is responsible for computing a set of potential optimization -:ref:`Actions ` in order to fulfill -the :ref:`Goal ` of an :ref:`Audit `. - -It first reads the parameters of the :ref:`Audit ` to know -the :ref:`Goal ` to achieve. - -Unless specified, it then selects the most appropriate :ref:`strategy -` from the list of available strategies achieving this -goal. - -The :ref:`Strategy ` is then dynamically loaded (via -`stevedore `_). The -:ref:`Watcher Decision Engine ` executes -the strategy. - -In order to compute the potential :ref:`Solution ` for the -Audit, the :ref:`Strategy ` relies on different sets of -data: - -- :ref:`Cluster data models ` that are - periodically synchronized through pluggable cluster data model collectors. - These models contain the current state of various - :ref:`Managed resources ` (e.g., the data stored - in the Nova database). These models gives a strategy the ability to reason on - the current state of a given :ref:`cluster `. -- The data stored in the :ref:`Cluster Datasource - ` which provides information about the past of - the :ref:`Cluster `. - -Here below is a sequence diagram showing how the Decision Engine builds and -maintains the :ref:`cluster data models ` that -are used by the strategies. - -.. image:: ./images/sequence_architecture_cdmc_sync.png - :width: 100% - -The execution of a strategy then yields a solution composed of a set of -:ref:`Actions ` as well as a set of :ref:`efficacy -indicators `. - -These :ref:`Actions ` are scheduled in time by the -:ref:`Watcher Planner ` (i.e., it generates an -:ref:`Action Plan `). - -.. _data_model: - -Data model -========== - -The following diagram shows the data model of Watcher, especially the -functional dependency of objects from the actors (Admin, Customer) point of -view (Goals, Audits, Action Plans, ...): - -.. image:: ./images/functional_data_model.svg - :width: 100% - -Here below is a diagram representing the main objects in Watcher from a -database perspective: - -.. image:: ./images/watcher_db_schema_diagram.png - - -.. _sequence_diagrams: - -Sequence diagrams -================= - -The following paragraph shows the messages exchanged between the different -components of Watcher for the most often used scenarios. - -.. _sequence_diagrams_create_audit_template: - -Create a new Audit Template ---------------------------- - -The :ref:`Administrator ` first creates an -:ref:`Audit template ` providing at least the -following parameters: - -- A name -- A goal to achieve -- An optional strategy - -.. image:: ./images/sequence_create_audit_template.png - :width: 100% - -The `Watcher API`_ makes sure that both the specified goal (mandatory) and -its associated strategy (optional) are registered inside the :ref:`Watcher -Database ` before storing a new audit template in -the :ref:`Watcher Database `. - -.. _sequence_diagrams_create_and_launch_audit: - -Create and launch a new Audit ------------------------------ - -The :ref:`Administrator ` can then launch a new -:ref:`Audit ` by providing at least the unique UUID of the -previously created :ref:`Audit template `: - -.. image:: ./images/sequence_create_and_launch_audit.png - :width: 100% - -The :ref:`Administrator ` also can specify type of -Audit and interval (in case of CONTINUOUS type). There is two types of Audit: -ONESHOT and CONTINUOUS. Oneshot Audit is launched once and if it succeeded -executed new action plan list will be provided. Continuous Audit creates -action plans with specified interval (in seconds); if action plan -has been created, all previous action plans get CANCELLED state. - -A message is sent on the :ref:`AMQP bus ` which triggers -the Audit in the -:ref:`Watcher Decision Engine `: - -.. image:: ./images/sequence_trigger_audit_in_decision_engine.png - :width: 100% - -The :ref:`Watcher Decision Engine ` reads -the Audit parameters from the -:ref:`Watcher Database `. It instantiates the -appropriate :ref:`strategy ` (using entry points) -given both the :ref:`goal ` and the strategy associated to the -parent :ref:`audit template ` of the :ref:`audit -`. If no strategy is associated to the audit template, the -strategy is dynamically selected by the Decision Engine. - -The :ref:`Watcher Decision Engine ` also -builds the :ref:`Cluster Data Model `. This -data model is needed by the :ref:`Strategy ` to know the -current state and topology of the audited -:ref:`OpenStack cluster `. - -The :ref:`Watcher Decision Engine ` calls -the **execute()** method of the instantiated -:ref:`Strategy ` and provides the data model as an input -parameter. This method computes a :ref:`Solution ` to -achieve the goal and returns it to the -:ref:`Decision Engine `. At this point, -actions are not scheduled yet. - -The :ref:`Watcher Decision Engine ` -dynamically loads the :ref:`Watcher Planner ` -implementation which is configured in Watcher (via entry points) and calls the -**schedule()** method of this class with the solution as an input parameter. -This method finds an appropriate scheduling of -:ref:`Actions ` taking into account some scheduling rules -(such as priorities between actions). -It generates a new :ref:`Action Plan ` with status -**RECOMMENDED** and saves it into the :ref:`Watcher Database -`. The saved action plan is now a scheduled flow -of actions to which a global efficacy is associated alongside a number of -:ref:`Efficacy Indicators ` as specified by the -related :ref:`goal `. - -If every step executed successfully, the -:ref:`Watcher Decision Engine ` updates -the current status of the Audit to **SUCCEEDED** in the -:ref:`Watcher Database ` and sends a notification -on the bus to inform other components that the :ref:`Audit ` -was successful. - -This internal workflow the Decision Engine follows to conduct an audit can be -seen in the sequence diagram here below: - -.. image:: ./images/sequence_from_audit_execution_to_actionplan_creation.png - :width: 100% - -.. _sequence_diagrams_launch_action_plan: - -Launch Action Plan ------------------- - -The :ref:`Administrator ` can then launch the -recommended :ref:`Action Plan `: - -.. image:: ./images/sequence_launch_action_plan.png - :width: 100% - -A message is sent on the :ref:`AMQP bus ` which triggers -the :ref:`Action Plan ` in the -:ref:`Watcher Applier `: - -.. image:: ./images/sequence_launch_action_plan_in_applier.png - :width: 100% - -The :ref:`Watcher Applier ` will get the -description of the flow of :ref:`Actions ` from the -:ref:`Watcher Database ` and for each -:ref:`Action ` it will instantiate a corresponding -:ref:`Action ` handler python class. - -The :ref:`Watcher Applier ` will then call the -following methods of the :ref:`Action ` handler: - -- **validate_parameters()**: this method will make sure that all the - provided input parameters are valid: - - - If all parameters are valid, the Watcher Applier moves on to the next - step. - - If it is not, an error is raised and the action is not executed. A - notification is sent on the bus informing other components of the - failure. - -- **preconditions()**: this method will make sure that all conditions are met - before executing the action (for example, it makes sure that an instance - still exists before trying to migrate it). -- **execute()**: this method is what triggers real commands on other - OpenStack services (such as Nova, ...) in order to change target resource - state. If the action is successfully executed, a notification message is - sent on the bus indicating that the new state of the action is - **SUCCEEDED**. - -If every action of the action flow has been executed successfully, a -notification is sent on the bus to indicate that the whole -:ref:`Action Plan ` has **SUCCEEDED**. - - -.. _state_machine_diagrams: - -State Machine diagrams -====================== - -.. _audit_state_machine: - -Audit State Machine -------------------- - -An :ref:`Audit ` has a life-cycle and its current state may -be one of the following: - -- **PENDING** : a request for an :ref:`Audit ` has been - submitted (either manually by the - :ref:`Administrator ` or automatically via some - event handling mechanism) and is in the queue for being processed by the - :ref:`Watcher Decision Engine ` -- **ONGOING** : the :ref:`Audit ` is currently being - processed by the - :ref:`Watcher Decision Engine ` -- **SUCCEEDED** : the :ref:`Audit ` has been executed - successfully and at least one solution was found -- **FAILED** : an error occurred while executing the - :ref:`Audit ` -- **DELETED** : the :ref:`Audit ` is still stored in the - :ref:`Watcher database ` but is not returned - any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Audit ` was in **PENDING** or - **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** - state and was suspended by the - :ref:`Administrator ` - -The following diagram shows the different possible states of an -:ref:`Audit ` and what event makes the state change to a new -value: - -.. image:: ./images/audit_state_machine.png - :width: 100% - -.. _action_plan_state_machine: - -Action Plan State Machine -------------------------- - -An :ref:`Action Plan ` has a life-cycle and its current -state may be one of the following: - -- **RECOMMENDED** : the :ref:`Action Plan ` is waiting - for a validation from the :ref:`Administrator ` -- **PENDING** : a request for an :ref:`Action Plan ` - has been submitted (due to an - :ref:`Administrator ` executing an - :ref:`Audit `) and is in the queue for - being processed by the :ref:`Watcher Applier ` -- **ONGOING** : the :ref:`Action Plan ` is currently - being processed by the :ref:`Watcher Applier ` -- **SUCCEEDED** : the :ref:`Action Plan ` has been - executed successfully (i.e. all :ref:`Actions ` that it - contains have been executed successfully) -- **FAILED** : an error occurred while executing the - :ref:`Action Plan ` -- **DELETED** : the :ref:`Action Plan ` is still - stored in the :ref:`Watcher database ` but is - not returned any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Action Plan ` was in - **RECOMMENDED**, **PENDING** or **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUPERSEDED** : the :ref:`Action Plan ` was in - RECOMMENDED state and was automatically superseded by Watcher, due to an - expiration delay or an update of the - :ref:`Cluster data model ` - - -The following diagram shows the different possible states of an -:ref:`Action Plan ` and what event makes the state -change to a new value: - -.. image:: ./images/action_plan_state_machine.png - :width: 100% - - - -.. _Watcher API: webapi/v1.html diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100755 index b73eca8..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from watcher import version as watcher_version -from watcher import objects - -objects.register_all() - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) -sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'oslo_config.sphinxconfiggen', - 'openstackdocstheme', - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinxcontrib.httpdomain', - 'sphinxcontrib.pecanwsme.rest', - 'stevedore.sphinxext', - 'wsmeext.sphinxext', - 'ext.term', - 'ext.versioned_notifications', -] - -wsme_protocols = ['restjson'] -config_generator_config_file = '../../etc/watcher/watcher-config-generator.conf' -sample_config_basename = 'watcher' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Watcher' -copyright = u'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -# The full version, including alpha/beta/rc tags. -release = watcher_version.version_info.release_string() -# The short X.Y version. -version = watcher_version.version_info.version_string() - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['watcher.'] - -exclude_patterns = [ - # The man directory includes some snippet files that are included - # in other documents during the build but that should not be - # included in the toctree themselves, so tell Sphinx to ignore - # them when scanning for input files. - 'man/footer.rst', - 'man/general-options.rst', - 'strategies/strategy-template.rst', - 'image_src/plantuml/README.rst', -] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for man page output -------------------------------------------- - -# Grouping the document tree for man pages. -# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' - -man_pages = [ - ('man/watcher-api', 'watcher-api', u'Watcher API Server', - [u'OpenStack'], 1), - ('man/watcher-applier', 'watcher-applier', u'Watcher Applier', - [u'OpenStack'], 1), - ('man/watcher-db-manage', 'watcher-db-manage', - u'Watcher Db Management Utility', [u'OpenStack'], 1), - ('man/watcher-decision-engine', 'watcher-decision-engine', - u'Watcher Decision Engine', [u'OpenStack'], 1), -] - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -html_theme = 'openstackdocs' -# html_static_path = ['static'] -# html_theme_options = {} - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -#openstackdocstheme options -repository_name = 'openstack/watcher' -bug_project = 'watcher' -bug_tag = '' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/config-generator.conf b/doc/source/config-generator.conf deleted file mode 100644 index e704259..0000000 --- a/doc/source/config-generator.conf +++ /dev/null @@ -1 +0,0 @@ -../../etc/watcher/watcher-config-generator.conf diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst deleted file mode 100644 index ec0a284..0000000 --- a/doc/source/contributor/contributing.rst +++ /dev/null @@ -1,72 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _contributing: - -======================= -Contributing to Watcher -======================= - -If you're interested in contributing to the Watcher project, -the following will help get you started. - -Contributor License Agreement ------------------------------ - -.. index:: - single: license; agreement - -In order to contribute to the Watcher project, you need to have -signed OpenStack's contributor's agreement. - -.. seealso:: - - * http://docs.openstack.org/infra/manual/developers.html - * http://wiki.openstack.org/CLA - -LaunchPad Project ------------------ - -Most of the tools used for OpenStack depend on a launchpad.net ID for -authentication. After signing up for a launchpad account, join the -"openstack" team to have access to the mailing list and receive -notifications of important events. - -.. seealso:: - - * http://launchpad.net - * http://launchpad.net/watcher - * http://launchpad.net/~openstack - - -Project Hosting Details ------------------------ - -Bug tracker - http://launchpad.net/watcher - -Mailing list (prefix subjects with ``[watcher]`` for faster responses) - http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev - -Wiki - http://wiki.openstack.org/Watcher - -Code Hosting - https://git.openstack.org/cgit/openstack/watcher - -Code Review - https://review.openstack.org/#/q/status:open+project:openstack/watcher,n,z - -IRC Channel - ``#openstack-watcher`` (changelog_) - -Weekly Meetings - On Wednesdays at 14:00 UTC on even weeks in the ``#openstack-meeting-4`` - IRC channel, 13:00 UTC on odd weeks in the ``#openstack-meeting-alt`` - IRC channel (`meetings logs`_) - -.. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/ -.. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/ diff --git a/doc/source/contributor/devstack.rst b/doc/source/contributor/devstack.rst deleted file mode 100644 index d27f6a7..0000000 --- a/doc/source/contributor/devstack.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -============================================= -Set up a development environment via DevStack -============================================= - -Watcher is currently able to optimize compute resources - specifically Nova -compute hosts - via operations such as live migrations. In order for you to -fully be able to exercise what Watcher can do, it is necessary to have a -multinode environment to use. - -You can set up the Watcher services quickly and easily using a Watcher -DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin -model. To enable the Watcher plugin with DevStack, add the following to the -`[[local|localrc]]` section of your controller's `local.conf` to enable the -Watcher plugin:: - - enable_plugin watcher git://git.openstack.org/openstack/watcher - -For more detailed instructions, see `Detailed DevStack Instructions`_. Check -out the `DevStack documentation`_ for more information regarding DevStack. - -.. _PluginModelDocs: http://docs.openstack.org/developer/devstack/plugins.html -.. _DevStack documentation: http://docs.openstack.org/developer/devstack/ - -Detailed DevStack Instructions -============================== - -#. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack). - One of these servers will be the controller node while the others will be - compute nodes. N is preferably >= 3 so that you have at least 2 compute - nodes, but in order to stand up the Watcher services only 1 server is - needed (i.e., no computes are needed if you want to just experiment with - the Watcher services). These servers can be VMs running on your local - machine via VirtualBox if you prefer. DevStack currently recommends that - you use Ubuntu 14.04 LTS. The servers should also have connections to the - same network such that they are all able to communicate with one another. - -#. For each server, clone the DevStack repository and create the stack user:: - - sudo apt-get update - sudo apt-get install git - git clone https://git.openstack.org/openstack-dev/devstack - sudo ./devstack/tools/create-stack-user.sh - - Now you have a stack user that is used to run the DevStack processes. You - may want to give your stack user a password to allow SSH via a password:: - - sudo passwd stack - -#. Switch to the stack user and clone the DevStack repo again:: - - sudo su stack - cd ~ - git clone https://git.openstack.org/openstack-dev/devstack - -#. For each compute node, copy the provided `local.conf.compute`_ example file - to the compute node's system at ~/devstack/local.conf. Make sure the - HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP - is set to the IP address of the compute node and SERVICE_HOST is set to the - IP address of the controller node. - - If you need specific metrics collected (or want to use something other - than Ceilometer), be sure to configure it. For example, in the - `local.conf.compute`_ example file, the appropriate ceilometer plugins and - services are enabled and disabled. If you were using something other than - Ceilometer, then you would likely want to configure it likewise. The - example file also sets the compute monitors nova configuration option to - use the CPU virt driver. If you needed other metrics, it may be necessary - to configure similar configuration options for the projects providing those - metrics. - -#. For the controller node, copy the provided `local.conf.controller`_ example - file to the controller node's system at ~/devstack/local.conf. Make sure - the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP - address of the controller node. - - Note: if you want to use another Watcher git repository (such as a local - one), then change the enable plugin line:: - - enable_plugin watcher [optional_branch] - - If you do this, then the Watcher DevStack plugin will try to pull the - python-watcherclient repo from /../, so either make - sure that is also available or specify WATCHERCLIENT_REPO in the local.conf - file. - - Note: if you want to use a specific branch, specify WATCHER_BRANCH in the - local.conf file. By default it will use the master branch. - - Note: watcher-api will default run under apache/httpd, set the variable - WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd. - For development environment it is suggested to set WATHCER_USE_MOD_WSGI - to FALSE. For Production environment it is suggested to keep it at the - default TRUE value. - -#. Start stacking from the controller node:: - - ./devstack/stack.sh - -#. Start stacking on each of the compute nodes using the same command. - -#. Configure the environment for live migration via NFS. See the - `Multi-Node DevStack Environment`_ section for more details. - -.. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller -.. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute - -Multi-Node DevStack Environment -=============================== - -Since deploying Watcher with only a single compute node is not very useful, a -few tips are given here for enabling a multi-node environment with live -migration. - -Configuring NFS Server ----------------------- - -If you would like to use live migration for shared storage, then the controller -can serve as the NFS server if needed:: - - sudo apt-get install nfs-kernel-server - sudo mkdir -p /nfs/instances - sudo chown stack:stack /nfs/instances - -Add an entry to `/etc/exports` with the appropriate gateway and netmask -information:: - - /nfs/instances /(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash) - -Export the NFS directories:: - - sudo exportfs -ra - -Make sure the NFS server is running:: - - sudo service nfs-kernel-server status - -If the server is not running, then start it:: - - sudo service nfs-kernel-server start - -Configuring NFS on Compute Node -------------------------------- - -Each compute node needs to use the NFS server to hold the instance data:: - - sudo apt-get install rpcbind nfs-common - mkdir -p /opt/stack/data/instances - sudo mount :/nfs/instances /opt/stack/data/instances - -If you would like to have the NFS directory automatically mounted on reboot, -then add the following to `/etc/fstab`:: - - :/nfs/instances /opt/stack/data/instances nfs auto 0 0 - -Edit `/etc/libvirt/libvirtd.conf` to make sure the following values are set:: - - listen_tls = 0 - listen_tcp = 1 - auth_tcp = "none" - -Edit `/etc/default/libvirt-bin`:: - - libvirtd_opts="-d -l" - -Restart the libvirt service:: - - sudo service libvirt-bin restart - -Setting up SSH keys between compute nodes to enable live migration ------------------------------------------------------------------- - -In order for live migration to work, SSH keys need to be exchanged between -each compute node: - -1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub) - needs to be in the DESTINATION stack user's authorized_keys file - (~stack/.ssh/authorized_keys). This can be accomplished by manually - copying the contents from the file on the SOURCE to the DESTINATION. If - you have a password configured for the stack user, then you can use the - following command to accomplish the same thing:: - - ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION - -2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub) - needs to be in the SOURCE root user's known_hosts file - (/root/.ssh/known_hosts). This can be accomplished by running the - following on the SOURCE machine (hostname must be used):: - - ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts - -In essence, this means that every compute node's root user's public RSA key -must exist in every other compute node's stack user's authorized_keys file and -every compute node's public ECDSA key needs to be in every other compute -node's root user's known_hosts file. - -Disable serial console ----------------------- - -Serial console needs to be disabled for live migration to work. - -On both the controller and compute node, in /etc/nova/nova.conf - -[serial_console] -enabled = False - -Alternatively, in devstack's local.conf: - -[[post-config|$NOVA_CONF]] -[serial_console] -#enabled=false - - -VNC server configuration ------------------------- - -The VNC server listening parameter needs to be set to any address so -that the server can accept connections from all of the compute nodes. - -On both the controller and compute node, in /etc/nova/nova.conf - -vncserver_listen = 0.0.0.0 - -Alternatively, in devstack's local.conf: - -VNCSERVER_LISTEN=0.0.0.0 - - -Environment final checkup -------------------------- - -If you are willing to make sure everything is in order in your DevStack -environment, you can run the Watcher Tempest tests which will validate its API -but also that you can perform the typical Watcher workflows. To do so, have a -look at the :ref:`Tempest tests ` section which will explain to -you how to run them. diff --git a/doc/source/contributor/environment.rst b/doc/source/contributor/environment.rst deleted file mode 100644 index 7404a7b..0000000 --- a/doc/source/contributor/environment.rst +++ /dev/null @@ -1,275 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher_developement_environment: - -========================================= -Set up a development environment manually -========================================= - -This document describes getting the source from watcher `Git repository`_ -for development purposes. - -To install Watcher from packaging, refer instead to Watcher `User -Documentation`_. - -.. _`Git Repository`: https://git.openstack.org/cgit/openstack/watcher -.. _`User Documentation`: https://docs.openstack.org/watcher/latest/ - -Prerequisites -============= - -This document assumes you are using Ubuntu or Fedora, and that you have the -following tools available on your system: - -- Python_ 2.7 and 3.4 -- git_ -- setuptools_ -- pip_ -- msgfmt (part of the gettext package) -- virtualenv and virtualenvwrapper_ - -**Reminder**: If you're successfully using a different platform, or a -different version of the above, please document your configuration here! - -.. _Python: https://www.python.org/ -.. _git: https://git-scm.com/ -.. _setuptools: https://pypi.python.org/pypi/setuptools -.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html - -Getting the latest code -======================= - -Make a clone of the code from our `Git repository`: - -.. code-block:: bash - - $ git clone https://git.openstack.org/openstack/watcher.git - -When that is complete, you can: - -.. code-block:: bash - - $ cd watcher - -Installing dependencies -======================= - -Watcher maintains two lists of dependencies:: - - requirements.txt - test-requirements.txt - -The first is the list of dependencies needed for running Watcher, the second -list includes dependencies used for active development and testing of Watcher -itself. - -These dependencies can be installed from PyPi_ using the Python tool pip_. - -.. _PyPi: http://pypi.python.org/ -.. _pip: http://pypi.python.org/pypi/pip - -However, your system *may* need additional dependencies that `pip` (and by -extension, PyPi) cannot satisfy. These dependencies should be installed -prior to using `pip`, and the installation method may vary depending on -your platform. - -* Ubuntu 14.04:: - - $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev - -* Fedora 19+:: - - $ sudo yum install openssl-devel libffi-devel mysql-devel - -* CentOS 7:: - - $ sudo yum install gcc python-devel libxml2-devel libxslt-devel mariadb-devel - -PyPi Packages and VirtualEnv ----------------------------- - -We recommend establishing a virtualenv to run Watcher within. virtualenv -limits the Python environment to just what you're installing as dependencies, -useful to keep a clean environment for working on Watcher. - -.. code-block:: bash - - $ mkvirtualenv watcher - $ git clone https://git.openstack.org/openstack/watcher - - # Use 'python setup.py' to link Watcher into Python's site-packages - $ cd watcher && python setup.py install - - # Install the dependencies for running Watcher - $ pip install -r ./requirements.txt - - # Install the dependencies for developing, testing, and running Watcher - $ pip install -r ./test-requirements.txt - -This will create a local virtual environment in the directory ``$WORKON_HOME``. -The virtual environment can be disabled using the command: - -.. code-block:: bash - - $ deactivate - -You can re-activate this virtualenv for your current shell using: - -.. code-block:: bash - - $ workon watcher - -For more information on virtual environments, see virtualenv_. - -.. _virtualenv: http://www.virtualenv.org/ - - - -Verifying Watcher is set up -=========================== - -Once set up, either directly or within a virtualenv, you should be able to -invoke Python and import the libraries. If you're using a virtualenv, don't -forget to activate it: - -.. code-block:: bash - - $ workon watcher - -You should then be able to `import watcher` using Python without issue: - -.. code-block:: bash - - $ python -c "import watcher" - -If you can import watcher without a traceback, you should be ready to develop. - -Run Watcher tests -================= - -Watcher provides both :ref:`unit tests ` and -:ref:`functional/tempest tests `. Please refer to :doc:`testing` -to understand how to run them. - - -Build the Watcher documentation -=============================== - -You can easily build the HTML documentation from ``doc/source`` files, by using -``tox``: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ cd watcher - (watcher) $ tox -edocs - -The HTML files are available into ``doc/build`` directory. - - -Configure the Watcher services -============================== - -Watcher services require a configuration file. Use tox to generate -a sample configuration file that can be used to get started: - -.. code-block:: bash - - $ tox -e genconfig - $ cp etc/watcher.conf.sample etc/watcher.conf - -Most of the default configuration should be enough to get you going, but you -still need to configure the following sections: - -- The ``[database]`` section to configure the - :ref:`Watcher database ` -- The ``[keystone_authtoken]`` section to configure the - :ref:`Identity service ` i.e. Keystone -- The ``[watcher_messaging]`` section to configure the OpenStack AMQP-based - message bus - -So if you need some more details on how to configure one or more of these -sections, please do have a look at :doc:`../deploy/configuration` before -continuing. - - -Create Watcher SQL database -=========================== - -When initially getting set up, after you've configured which databases to use, -you're probably going to need to run the following to your database schema in -place: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-db-manage create_schema - - -Running Watcher services -======================== - -To run the Watcher API service, use: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-api - -To run the Watcher Decision Engine service, use: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-decision-engine - -To run the Watcher Applier service, use: - -.. code-block:: bash - - $ workon watcher - - (watcher) $ watcher-applier - -Default configuration of these services are available into ``/etc/watcher`` -directory. See :doc:`../deploy/configuration` for details on how Watcher is -configured. By default, Watcher is configured with SQL backends. - - -Interact with Watcher -===================== - -You can also interact with Watcher through its REST API. There is a Python -Watcher client library `python-watcherclient`_ which interacts exclusively -through the REST API, and which Watcher itself uses to provide its command-line -interface. - -.. _`python-watcherclient`: https://github.com/openstack/python-watcherclient - -There is also an Horizon plugin for Watcher `watcher-dashboard`_ which -allows to interact with Watcher through a web-based interface. - -.. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard - - -Exercising the Watcher Services locally -======================================= - -If you would like to exercise the Watcher services in isolation within a local -virtual environment, you can do this without starting any other OpenStack -services. For example, this is useful for rapidly prototyping and debugging -interactions over the RPC channel, testing database migrations, and so forth. - -You will find in the `watcher-tools`_ project, Ansible playbooks and Docker -template files to easily play with Watcher services within a minimal OpenStack -isolated environment (Identity, Message Bus, SQL database, Horizon, ...). - -.. _`watcher-tools`: https://github.com/b-com/watcher-tools diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst deleted file mode 100644 index eddf972..0000000 --- a/doc/source/contributor/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - environment - devstack - notifications - testing - rally_link diff --git a/doc/source/contributor/notifications.rst b/doc/source/contributor/notifications.rst deleted file mode 100644 index 6ee7339..0000000 --- a/doc/source/contributor/notifications.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher_notifications: - -======================== -Notifications in Watcher -======================== - -.. versioned_notifications:: diff --git a/doc/source/contributor/plugin/action-plugin.rst b/doc/source/contributor/plugin/action-plugin.rst deleted file mode 100644 index 29e9bcc..0000000 --- a/doc/source/contributor/plugin/action-plugin.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_action_plugin: - -================== -Build a new action -================== - -Watcher Applier has an external :ref:`action ` plugin -interface which gives anyone the ability to integrate an external -:ref:`action ` in order to extend the initial set of actions -Watcher provides. - -This section gives some guidelines on how to implement and integrate custom -actions with Watcher. - - -Creating a new plugin -===================== - -First of all you have to extend the base :py:class:`BaseAction` class which -defines a set of abstract methods and/or properties that you will have to -implement: - - - The :py:attr:`~.BaseAction.schema` is an abstract property that you have to - implement. This is the first function to be called by the - :ref:`applier ` before any further processing - and its role is to validate the input parameters that were provided to it. - - The :py:meth:`~.BaseAction.pre_condition` is called before the execution of - an action. This method is a hook that can be used to perform some - initializations or to make some more advanced validation on its input - parameters. If you wish to block the execution based on this factor, you - simply have to ``raise`` an exception. - - The :py:meth:`~.BaseAction.post_condition` is called after the execution of - an action. As this function is called regardless of whether an action - succeeded or not, this can prove itself useful to perform cleanup - operations. - - The :py:meth:`~.BaseAction.execute` is the main component of an action. - This is where you should implement the logic of your action. - - The :py:meth:`~.BaseAction.revert` allows you to roll back the targeted - resource to its original state following a faulty execution. Indeed, this - method is called by the workflow engine whenever an action raises an - exception. - -Here is an example showing how you can write a plugin called ``DummyAction``: - -.. code-block:: python - - # Filepath = /thirdparty/dummy.py - # Import path = thirdparty.dummy - import voluptuous - - from watcher.applier.actions import base - - - class DummyAction(base.BaseAction): - - @property - def schema(self): - return voluptuous.Schema({}) - - def execute(self): - # Does nothing - pass # Only returning False is considered as a failure - - def revert(self): - # Does nothing - pass - - def pre_condition(self): - # No pre-checks are done here - pass - - def post_condition(self): - # Nothing done here - pass - - -This implementation is the most basic one. So in order to get a better -understanding on how to implement a more advanced action, have a look at the -:py:class:`~watcher.applier.actions.migration.Migrate` class. - -Input validation ----------------- - -As you can see in the previous example, we are using `Voluptuous`_ to validate -the input parameters of an action. So if you want to learn more about how to -work with `Voluptuous`_, you can have a look at their `documentation`_: - -.. _Voluptuous: https://github.com/alecthomas/voluptuous -.. _documentation: https://github.com/alecthomas/voluptuous/blob/master/README.md - - -Define configuration parameters -=============================== - -At this point, you have a fully functional action. However, in more complex -implementation, you may want to define some configuration options so one can -tune the action to its needs. To do so, you can implement the -:py:meth:`~.Loadable.get_config_opts` class method as followed: - -.. code-block:: python - - from oslo_config import cfg - - class DummyAction(base.BaseAction): - - # [...] - - def execute(self): - assert self.config.test_opt == 0 - - @classmethod - def get_config_opts(cls): - return super( - DummyAction, cls).get_config_opts() + [ - cfg.StrOpt('test_opt', help="Demo Option.", default=0), - # Some more options ... - ] - - -The configuration options defined within this class method will be included -within the global ``watcher.conf`` configuration file under a section named by -convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` -configuration would have to be modified as followed: - -.. code-block:: ini - - [watcher_actions.dummy] - # Option used for testing. - test_opt = test_value - -Then, the configuration options you define within this method will then be -injected in each instantiated object via the ``config`` parameter of the -:py:meth:`~.BaseAction.__init__` method. - - -Abstract Plugin Class -===================== - -Here below is the abstract ``BaseAction`` class that every single action -should implement: - -.. autoclass:: watcher.applier.actions.base.BaseAction - :members: - :special-members: __init__ - :noindex: - - .. py:attribute:: schema - - Defines a Schema that the input parameters shall comply to - - :returns: A schema declaring the input parameters this action should be - provided along with their respective constraints - (e.g. type, value range, ...) - :rtype: :py:class:`voluptuous.Schema` instance - - -Register a new entry point -========================== - -In order for the Watcher Applier to load your new action, the -action must be registered as a named entry point under the -``watcher_actions`` entry point of your ``setup.py`` file. If you are using -pbr_, this entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique. - -Here below is how you would proceed to register ``DummyAction`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_actions = - dummy = thirdparty.dummy:DummyAction - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Using action plugins -==================== - -The Watcher Applier service will automatically discover any installed plugins -when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, you can use your new action plugin in your :ref:`strategy plugin -` if you reference it via the use of the -:py:meth:`~.Solution.add_action` method: - -.. code-block:: python - - # [...] - self.solution.add_action( - action_type="dummy", # Name of the entry point we registered earlier - applies_to="", - input_parameters={}) - -By doing so, your action will be saved within the Watcher Database, ready to be -processed by the planner for creating an action plan which can then be executed -by the Watcher Applier via its workflow engine. - -At the last, remember to add the action into the weights in ``watcher.conf``, -otherwise you will get an error when the action be referenced in a strategy. - - -Scheduling of an action plugin -============================== - -Watcher provides a basic built-in :ref:`planner ` -which is only able to process the Watcher built-in actions. Therefore, you will -either have to use an existing third-party planner or :ref:`implement another -planner ` that will be able to take into account your -new action plugin. diff --git a/doc/source/contributor/plugin/base-setup.rst b/doc/source/contributor/plugin/base-setup.rst deleted file mode 100644 index 3e829f5..0000000 --- a/doc/source/contributor/plugin/base-setup.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _plugin-base_setup: - -======================================= -Create a third-party plugin for Watcher -======================================= - -Watcher provides a plugin architecture which allows anyone to extend the -existing functionalities by implementing third-party plugins. This process can -be cumbersome so this documentation is there to help you get going as quickly -as possible. - - -Pre-requisites -============== - -We assume that you have set up a working Watcher development environment. So if -this not already the case, you can check out our documentation which explains -how to set up a :ref:`development environment -`. - -.. _development environment: - -Third party project scaffolding -=============================== - -First off, we need to create the project structure. To do so, we can use -`cookiecutter`_ and the `OpenStack cookiecutter`_ project scaffolder to -generate the skeleton of our project:: - - $ virtualenv thirdparty - $ source thirdparty/bin/activate - $ pip install cookiecutter - $ cookiecutter https://github.com/openstack-dev/cookiecutter - -The last command will ask you for many information, and If you set -``module_name`` and ``repo_name`` as ``thirdparty``, you should end up with a -structure that looks like this:: - - $ cd thirdparty - $ tree . - . - ├── babel.cfg - ├── CONTRIBUTING.rst - ├── doc - │   └── source - │   ├── conf.py - │   ├── contributing.rst - │   ├── index.rst - │   ├── installation.rst - │   ├── readme.rst - │   └── usage.rst - ├── HACKING.rst - ├── LICENSE - ├── MANIFEST.in - ├── README.rst - ├── requirements.txt - ├── setup.cfg - ├── setup.py - ├── test-requirements.txt - ├── thirdparty - │   ├── __init__.py - │   └── tests - │   ├── base.py - │   ├── __init__.py - │   └── test_thirdparty.py - └── tox.ini - -**Note:** You should add `python-watcher`_ as a dependency in the -requirements.txt file:: - - # Watcher-specific requirements - python-watcher - -.. _cookiecutter: https://github.com/audreyr/cookiecutter -.. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter -.. _python-watcher: https://pypi.python.org/pypi/python-watcher - -Implementing a plugin for Watcher -================================= - -Now that the project skeleton has been created, you can start the -implementation of your plugin. As of now, you can implement the following -plugins for Watcher: - -- A :ref:`goal plugin ` -- A :ref:`strategy plugin ` -- An :ref:`action plugin ` -- A :ref:`planner plugin ` -- A workflow engine plugin -- A :ref:`cluster data model collector plugin - ` - -If you want to learn more on how to implement them, you can refer to their -dedicated documentation. diff --git a/doc/source/contributor/plugin/cdmc-plugin.rst b/doc/source/contributor/plugin/cdmc-plugin.rst deleted file mode 100644 index 179c627..0000000 --- a/doc/source/contributor/plugin/cdmc-plugin.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_cluster_data_model_collector_plugin: - -======================================== -Build a new cluster data model collector -======================================== - -Watcher Decision Engine has an external cluster data model (CDM) plugin -interface which gives anyone the ability to integrate an external cluster data -model collector (CDMC) in order to extend the initial set of cluster data model -collectors Watcher provides. - -This section gives some guidelines on how to implement and integrate custom -cluster data model collectors within Watcher. - - -Creating a new plugin -===================== - -In order to create a new cluster data model collector, you have to: - -- Extend the :py:class:`~.base.BaseClusterDataModelCollector` class. -- Implement its :py:meth:`~.BaseClusterDataModelCollector.execute` abstract - method to return your entire cluster data model that this method should - build. -- Implement its :py:meth:`~.Goal.notification_endpoints` abstract property to - return the list of all the :py:class:`~.base.NotificationEndpoint` instances - that will be responsible for handling incoming notifications in order to - incrementally update your cluster data model. - -First of all, you have to extend the :class:`~.BaseClusterDataModelCollector` -base class which defines the :py:meth:`~.BaseClusterDataModelCollector.execute` -abstract method you will have to implement. This method is responsible for -building an entire cluster data model. - -Here is an example showing how you can write a plugin called -``DummyClusterDataModelCollector``: - -.. code-block:: python - - # Filepath = /thirdparty/dummy.py - # Import path = thirdparty.dummy - - from watcher.decision_engine.model import model_root - from watcher.decision_engine.model.collector import base - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [] - -This implementation is the most basic one. So in order to get a better -understanding on how to implement a more advanced cluster data model collector, -have a look at the :py:class:`~.NovaClusterDataModelCollector` class. - -Define a custom model -===================== - -As you may have noticed in the above example, we are reusing an existing model -provided by Watcher. However, this model can be easily customized by -implementing a new class that would implement the :py:class:`~.Model` abstract -base class. Here below is simple example on how to proceed in implementing a -custom Model: - -.. code-block:: python - - # Filepath = /thirdparty/dummy.py - # Import path = thirdparty.dummy - - from watcher.decision_engine.model import base as modelbase - from watcher.decision_engine.model.collector import base - - - class MyModel(modelbase.Model): - - def to_string(self): - return 'MyModel' - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = MyModel() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [] - -Here below is the abstract ``Model`` class that every single cluster data model -should implement: - -.. autoclass:: watcher.decision_engine.model.base.Model - :members: - :special-members: __init__ - :noindex: - -Define configuration parameters -=============================== - -At this point, you have a fully functional cluster data model collector. -By default, cluster data model collectors define a ``period`` option (see -:py:meth:`~.BaseClusterDataModelCollector.get_config_opts`) that corresponds -to the interval of time between each synchronization of the in-memory model. - -However, in more complex implementation, you may want to define some -configuration options so one can tune the cluster data model collector to your -needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` -class method as followed: - -.. code-block:: python - - from oslo_config import cfg - from watcher.decision_engine.model import model_root - from watcher.decision_engine.model.collector import base - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [] - - @classmethod - def get_config_opts(cls): - return super( - DummyClusterDataModelCollector, cls).get_config_opts() + [ - cfg.StrOpt('test_opt', help="Demo Option.", default=0), - # Some more options ... - ] - -The configuration options defined within this class method will be included -within the global ``watcher.conf`` configuration file under a section named by -convention: ``{namespace}.{plugin_name}`` (see section :ref:`Register a new -entry point `). The namespace for CDMC plugins is -``watcher_cluster_data_model_collectors``, so in our case, the ``watcher.conf`` -configuration would have to be modified as followed: - -.. code-block:: ini - - [watcher_cluster_data_model_collectors.dummy] - # Option used for testing. - test_opt = test_value - -Then, the configuration options you define within this method will then be -injected in each instantiated object via the ``config`` parameter of the -:py:meth:`~.BaseClusterDataModelCollector.__init__` method. - - -Abstract Plugin Class -===================== - -Here below is the abstract ``BaseClusterDataModelCollector`` class that every -single cluster data model collector should implement: - -.. autoclass:: watcher.decision_engine.model.collector.base.BaseClusterDataModelCollector - :members: - :special-members: __init__ - :noindex: - - -.. _register_new_cdmc_entrypoint: - -Register a new entry point -========================== - -In order for the Watcher Decision Engine to load your new cluster data model -collector, the latter must be registered as a named entry point under the -``watcher_cluster_data_model_collectors`` entry point namespace of your -``setup.py`` file. If you are using pbr_, this entry point should be placed in -your ``setup.cfg`` file. - -The name you give to your entry point has to be unique. - -Here below is how to register ``DummyClusterDataModelCollector`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_cluster_data_model_collectors = - dummy = thirdparty.dummy:DummyClusterDataModelCollector - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Add new notification endpoints -============================== - -At this point, you have a fully functional cluster data model collector. -However, this CDMC is only refreshed periodically via a background scheduler. -As you may sometimes execute a strategy with a stale CDM due to a high activity -on your infrastructure, you can define some notification endpoints that will be -responsible for incrementally updating the CDM based on notifications emitted -by other services such as Nova. To do so, you can implement and register a new -``DummyEndpoint`` notification endpoint regarding a ``dummy`` event as shown -below: - -.. code-block:: python - - from watcher.decision_engine.model import model_root - from watcher.decision_engine.model.collector import base - - - class DummyNotification(base.NotificationEndpoint): - - @property - def filter_rule(self): - return filtering.NotificationFilter( - publisher_id=r'.*', - event_type=r'^dummy$', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - # Do some CDM modifications here... - pass - - - class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - @property - def notification_endpoints(self): - return [DummyNotification(self)] - - -Note that if the event you are trying to listen to is published by a new -service, you may have to also add a new topic Watcher will have to subscribe to -in the ``notification_topics`` option of the ``[watcher_decision_engine]`` -section. - - -Using cluster data model collector plugins -========================================== - -The Watcher Decision Engine service will automatically discover any installed -plugins when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, you can use your new cluster data model plugin in your -:ref:`strategy plugin ` by using the -:py:attr:`~.BaseStrategy.collector_manager` property as followed: - -.. code-block:: python - - # [...] - dummy_collector = self.collector_manager.get_cluster_model_collector( - "dummy") # "dummy" is the name of the entry point we declared earlier - dummy_model = dummy_collector.get_latest_cluster_data_model() - # Do some stuff with this model diff --git a/doc/source/contributor/plugin/goal-plugin.rst b/doc/source/contributor/plugin/goal-plugin.rst deleted file mode 100644 index f0e7dd8..0000000 --- a/doc/source/contributor/plugin/goal-plugin.rst +++ /dev/null @@ -1,215 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_goal_plugin: - -================ -Build a new goal -================ - -Watcher Decision Engine has an external :ref:`goal ` -plugin interface which gives anyone the ability to integrate an external -goal which can be achieved by a :ref:`strategy `. - -This section gives some guidelines on how to implement and integrate custom -goals with Watcher. If you wish to create a third-party package for your -plugin, you can refer to our :ref:`documentation for third-party package -creation `. - - -Pre-requisites -============== - -Before using any goal, please make sure that none of the existing goals fit -your needs. Indeed, the underlying value of defining a goal is to be able to -compare the efficacy of the action plans resulting from the various strategies -satisfying the same goal. By doing so, Watcher can assist the administrator -in his choices. - - -Create a new plugin -=================== - -In order to create a new goal, you have to: - -- Extend the :py:class:`~.base.Goal` class. -- Implement its :py:meth:`~.Goal.get_name` class method to return the - **unique** ID of the new goal you want to create. This unique ID should - be the same as the name of :ref:`the entry point you will declare later on - `. -- Implement its :py:meth:`~.Goal.get_display_name` class method to - return the translated display name of the goal you want to create. - Note: Do not use a variable to return the translated string so it can be - automatically collected by the translation tool. -- Implement its :py:meth:`~.Goal.get_translatable_display_name` - class method to return the translation key (actually the english display - name) of your new goal. The value return should be the same as the - string translated in :py:meth:`~.Goal.get_display_name`. -- Implement its :py:meth:`~.Goal.get_efficacy_specification` method to return - the :ref:`efficacy specification ` for - your goal. - -Here is an example showing how you can define a new ``NewGoal`` goal plugin: - -.. code-block:: python - - # filepath: thirdparty/new.py - # import path: thirdparty.new - - from watcher._i18n import _ - from watcher.decision_engine.goal import base - from watcher.decision_engine.goal.efficacy import specs - - class NewGoal(base.Goal): - - @classmethod - def get_name(cls): - return "new_goal" # Will be the name of the entry point - - @classmethod - def get_display_name(cls): - return _("New Goal") - - @classmethod - def get_translatable_display_name(cls): - return "New Goal" - - @classmethod - def get_efficacy_specification(cls): - return specs.Unclassified() - - -As you may have noticed, the :py:meth:`~.Goal.get_efficacy_specification` -method returns an :py:meth:`~.Unclassified` instance which -is provided by Watcher. This efficacy specification is useful during the -development process of your goal as it corresponds to an empty specification. -If you want to learn more about what efficacy specifications are used for or to -define your own efficacy specification, please refer to the :ref:`related -section below `. - - -Abstract Plugin Class -===================== - -Here below is the abstract :py:class:`~.base.Goal` class: - -.. autoclass:: watcher.decision_engine.goal.base.Goal - :members: - :noindex: - -.. _goal_plugin_add_entrypoint: - -Add a new entry point -===================== - -In order for the Watcher Decision Engine to load your new goal, the -goal must be registered as a named entry point under the ``watcher_goals`` -entry point namespace of your ``setup.py`` file. If you are using pbr_, this -entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique and should be the same -as the value returned by the :py:meth:`~.base.Goal.get_name` class method of -your goal. - -Here below is how you would proceed to register ``NewGoal`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_goals = - new_goal = thirdparty.new:NewGoal - - -To get a better understanding on how to implement a more advanced goal, -have a look at the :py:class:`~.ServerConsolidation` class. - -.. _pbr: http://docs.openstack.org/developer/pbr/ - -.. _implement_efficacy_specification: - -Implement a customized efficacy specification -============================================= - -What is it for? ---------------- - -Efficacy specifications define a set of specifications for a given goal. -These specifications actually define a list of indicators which are to be used -to compute a global efficacy that outlines how well a strategy performed when -trying to achieve the goal it is associated to. - -The idea behind such specification is to give the administrator the possibility -to run an audit using different strategies satisfying the same goal and be able -to judge how they performed at a glance. - - -Implementation --------------- - -In order to create a new efficacy specification, you have to: - -- Extend the :py:class:`~.EfficacySpecification` class. -- Implement :py:meth:`~.EfficacySpecification.get_indicators_specifications` - by returning a list of :py:class:`~.IndicatorSpecification` instances. - - * Each :py:class:`~.IndicatorSpecification` instance should actually extend - the latter. - * Each indicator specification should have a **unique name** which should be - a valid Python variable name. - * They should implement the :py:attr:`~.EfficacySpecification.schema` - abstract property by returning a :py:class:`~.voluptuous.Schema` instance. - This schema is the contract the strategy will have to comply with when - setting the value associated to the indicator specification within its - solution (see the :ref:`architecture of Watcher - ` for more information on - the audit execution workflow). - -- Implement the :py:meth:`~.EfficacySpecification.get_global_efficacy` method: - it should compute the global efficacy for the goal it achieves based on the - efficacy indicators you just defined. - -Here below is an example of an efficacy specification containing one indicator -specification: - -.. code-block:: python - - from watcher._i18n import _ - from watcher.decision_engine.goal.efficacy import base as efficacy_base - from watcher.decision_engine.goal.efficacy import indicators - from watcher.decision_engine.solution import efficacy - - - class IndicatorExample(IndicatorSpecification): - def __init__(self): - super(IndicatorExample, self).__init__( - name="indicator_example", - description=_("Example of indicator specification."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema(voluptuous.Range(min=0), required=True) - - - class UnclassifiedStrategySpecification(efficacy_base.EfficacySpecification): - - def get_indicators_specifications(self): - return [IndicatorExample()] - - def get_global_efficacy(self, indicators_map): - return efficacy.Indicator( - name="global_efficacy_indicator", - description="Example of global efficacy indicator", - unit="%", - value=indicators_map.indicator_example % 100) - - -To get a better understanding on how to implement an efficacy specification, -have a look at :py:class:`~.ServerConsolidationSpecification`. - -Also, if you want to see a concrete example of an indicator specification, -have a look at :py:class:`~.ReleasedComputeNodesCount`. diff --git a/doc/source/contributor/plugin/index.rst b/doc/source/contributor/plugin/index.rst deleted file mode 100644 index 4fedb01..0000000 --- a/doc/source/contributor/plugin/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. toctree:: - :maxdepth: 1 - - base-setup - action-plugin - cdmc-plugin - goal-plugin - planner-plugin - scoring-engine-plugin - strategy-plugin - plugins diff --git a/doc/source/contributor/plugin/planner-plugin.rst b/doc/source/contributor/plugin/planner-plugin.rst deleted file mode 100644 index de2e7b1..0000000 --- a/doc/source/contributor/plugin/planner-plugin.rst +++ /dev/null @@ -1,174 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_planner_plugin: - -=================== -Build a new planner -=================== - -Watcher :ref:`Decision Engine ` has an -external :ref:`planner ` plugin interface which -gives anyone the ability to integrate an external :ref:`planner -` in order to extend the initial set of planners -Watcher provides. - -This section gives some guidelines on how to implement and integrate custom -planners with Watcher. - -.. _Decision Engine: watcher_decision_engine_definition - -Creating a new plugin -===================== - -First of all you have to extend the base :py:class:`~.BasePlanner` class which -defines an abstract method that you will have to implement. The -:py:meth:`~.BasePlanner.schedule` is the method being called by the Decision -Engine to schedule a given solution (:py:class:`~.BaseSolution`) into an -:ref:`action plan ` by ordering/sequencing an unordered -set of actions contained in the proposed solution (for more details, see -:ref:`definition of a solution `). - -Here is an example showing how you can write a planner plugin called -``DummyPlanner``: - -.. code-block:: python - - # Filepath = third-party/third_party/dummy.py - # Import path = third_party.dummy - from oslo_utils import uuidutils - from watcher.decision_engine.planner import base - - - class DummyPlanner(base.BasePlanner): - - def _create_action_plan(self, context, audit_id): - action_plan_dict = { - 'uuid': uuidutils.generate_uuid(), - 'audit_id': audit_id, - 'first_action_id': None, - 'state': objects.action_plan.State.RECOMMENDED - } - - new_action_plan = objects.ActionPlan(context, **action_plan_dict) - new_action_plan.create(context) - new_action_plan.save() - return new_action_plan - - def schedule(self, context, audit_id, solution): - # Empty action plan - action_plan = self._create_action_plan(context, audit_id) - # todo: You need to create the workflow of actions here - # and attach it to the action plan - return action_plan - -This implementation is the most basic one. So if you want to have more advanced -examples, have a look at the implementation of planners already provided by -Watcher like :py:class:`~.DefaultPlanner`. A list with all available planner -plugins can be found :ref:`here `. - - -Define configuration parameters -=============================== - -At this point, you have a fully functional planner. However, in more complex -implementation, you may want to define some configuration options so one can -tune the planner to its needs. To do so, you can implement the -:py:meth:`~.Loadable.get_config_opts` class method as followed: - -.. code-block:: python - - from oslo_config import cfg - - class DummyPlanner(base.BasePlanner): - - # [...] - - def schedule(self, context, audit_uuid, solution): - assert self.config.test_opt == 0 - # [...] - - @classmethod - def get_config_opts(cls): - return super( - DummyPlanner, cls).get_config_opts() + [ - cfg.StrOpt('test_opt', help="Demo Option.", default=0), - # Some more options ... - ] - -The configuration options defined within this class method will be included -within the global ``watcher.conf`` configuration file under a section named by -convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` -configuration would have to be modified as followed: - -.. code-block:: ini - - [watcher_planners.dummy] - # Option used for testing. - test_opt = test_value - -Then, the configuration options you define within this method will then be -injected in each instantiated object via the ``config`` parameter of the -:py:meth:`~.BasePlanner.__init__` method. - - -Abstract Plugin Class -===================== - -Here below is the abstract ``BasePlanner`` class that every single planner -should implement: - -.. autoclass:: watcher.decision_engine.planner.base.BasePlanner - :members: - :special-members: __init__ - :noindex: - - -Register a new entry point -========================== - -In order for the Watcher Decision Engine to load your new planner, the -latter must be registered as a new entry point under the -``watcher_planners`` entry point namespace of your ``setup.py`` file. If you -are using pbr_, this entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique. - -Here below is how you would proceed to register ``DummyPlanner`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_planners = - dummy = third_party.dummy:DummyPlanner - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Using planner plugins -===================== - -The :ref:`Watcher Decision Engine ` service -will automatically discover any installed plugins when it is started. This -means that if Watcher is already running when you install your plugin, you will -have to restart the related Watcher services. If a Python package containing a -custom plugin is installed within the same environment as Watcher, Watcher will -automatically make that plugin available for use. - -At this point, Watcher will use your new planner if you referenced it in the -``planner`` option under the ``[watcher_planner]`` section of your -``watcher.conf`` configuration file when you started it. For example, if you -want to use the ``dummy`` planner you just installed, you would have to -select it as followed: - -.. code-block:: ini - - [watcher_planner] - planner = dummy - -As you may have noticed, only a single planner implementation can be activated -at a time, so make sure it is generic enough to support all your strategies -and actions. diff --git a/doc/source/contributor/plugin/plugins.rst b/doc/source/contributor/plugin/plugins.rst deleted file mode 100644 index 6eeb7a1..0000000 --- a/doc/source/contributor/plugin/plugins.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - - -================= -Available Plugins -================= - -In this section we present all the plugins that are shipped along with Watcher. -If you want to know which plugins your Watcher services have access to, you can -use the :ref:`Guru Meditation Reports ` to display them. - -.. _watcher_goals: - -Goals -===== - -.. list-plugins:: watcher_goals - :detailed: - -.. _watcher_scoring_engines: - -Scoring Engines -=============== - -.. list-plugins:: watcher_scoring_engines - :detailed: - -.. _watcher_scoring_engine_containers: - -Scoring Engine Containers -========================= - -.. list-plugins:: watcher_scoring_engine_containers - :detailed: - -.. _watcher_strategies: - -Strategies -========== - -.. list-plugins:: watcher_strategies - :detailed: - -.. _watcher_actions: - -Actions -======= - -.. list-plugins:: watcher_actions - :detailed: - -.. _watcher_workflow_engines: - -Workflow Engines -================ - -.. list-plugins:: watcher_workflow_engines - :detailed: - -.. _watcher_planners: - -Planners -======== - -.. list-plugins:: watcher_planners - :detailed: - -Cluster Data Model Collectors -============================= - -.. list-plugins:: watcher_cluster_data_model_collectors - :detailed: diff --git a/doc/source/contributor/plugin/scoring-engine-plugin.rst b/doc/source/contributor/plugin/scoring-engine-plugin.rst deleted file mode 100644 index 728fbcf..0000000 --- a/doc/source/contributor/plugin/scoring-engine-plugin.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_scoring_engine_plugin: - -========================== -Build a new scoring engine -========================== - -Watcher Decision Engine has an external :ref:`scoring engine -` plugin interface which gives anyone the ability -to integrate an external scoring engine in order to make use of it in a -:ref:`strategy `. - -This section gives some guidelines on how to implement and integrate custom -scoring engines with Watcher. If you wish to create a third-party package for -your plugin, you can refer to our :ref:`documentation for third-party package -creation `. - - -Pre-requisites -============== - -Because scoring engines execute a purely mathematical tasks, they typically do -not have any additional dependencies. Additional requirements might be defined -by specific scoring engine implementations. For example, some scoring engines -might require to prepare learning data, which has to be loaded during the -scoring engine startup. Some other might require some external services to be -available (e.g. if the scoring infrastructure is running in the cloud). - - -Create a new scoring engine plugin -================================== - -In order to create a new scoring engine you have to: - -- Extend the :py:class:`~.ScoringEngine` class -- Implement its :py:meth:`~.ScoringEngine.get_name` method to return the - **unique** ID of the new scoring engine you want to create. This unique ID - should be the same as the name of :ref:`the entry point we will declare later - on `. -- Implement its :py:meth:`~.ScoringEngine.get_description` method to return the - user-friendly description of the implemented scoring engine. It might contain - information about algorithm used, learning data etc. -- Implement its :py:meth:`~.ScoringEngine.get_metainfo` method to return the - machine-friendly metadata about this scoring engine. For example, it could be - a JSON formatted text with information about the data model used, its input - and output data format, column names, etc. -- Implement its :py:meth:`~.ScoringEngine.calculate_score` method to return the - result calculated by this scoring engine. - -Here is an example showing how you can write a plugin called ``NewScorer``: - -.. code-block:: python - - # filepath: thirdparty/new.py - # import path: thirdparty.new - from watcher.decision_engine.scoring import base - - - class NewScorer(base.ScoringEngine): - - def get_name(self): - return 'new_scorer' - - def get_description(self): - return '' - - def get_metainfo(self): - return """{ - "feature_columns": [ - "column1", - "column2", - "column3"], - "result_columns": [ - "value", - "probability"] - }""" - - def calculate_score(self, features): - return '[12, 0.83]' - -As you can see in the above example, the -:py:meth:`~.ScoringEngine.calculate_score` method returns a string. Both this -class and the client (caller) should perform all the necessary serialization -or deserialization. - - -(Optional) Create a new scoring engine container plugin -======================================================= - -Optionally, it's possible to implement a container plugin, which can return a -list of scoring engines. This list can be re-evaluated multiple times during -the lifecycle of :ref:`Watcher Decision Engine -` and synchronized with :ref:`Watcher -Database ` using the ``watcher-sync`` command line -tool. - -Below is an example of a container using some scoring engine implementation -that is simply made of a client responsible for communicating with a real -scoring engine deployed as a web service on external servers: - -.. code-block:: python - - class NewScoringContainer(base.ScoringEngineContainer): - - @classmethod - def get_scoring_engine_list(self): - return [ - RemoteScoringEngine( - name='scoring_engine1', - description='Some remote Scoring Engine 1', - remote_url='http://engine1.example.com/score'), - RemoteScoringEngine( - name='scoring_engine2', - description='Some remote Scoring Engine 2', - remote_url='http://engine2.example.com/score'), - ] - - -Abstract Plugin Class -===================== - -Here below is the abstract :py:class:`~.ScoringEngine` class: - -.. autoclass:: watcher.decision_engine.scoring.base.ScoringEngine - :members: - :special-members: __init__ - :noindex: - - -Abstract Plugin Container Class -=============================== - -Here below is the abstract :py:class:`~.ScoringContainer` class: - -.. autoclass:: watcher.decision_engine.scoring.base.ScoringEngineContainer - :members: - :special-members: __init__ - :noindex: - - -.. _scoring_engine_plugin_add_entrypoint: - -Add a new entry point -===================== - -In order for the Watcher Decision Engine to load your new scoring engine, it -must be registered as a named entry point under the ``watcher_scoring_engines`` -entry point of your ``setup.py`` file. If you are using pbr_, this entry point -should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique and should be the same -as the value returned by the :py:meth:`~.ScoringEngine.get_name` method of your -strategy. - -Here below is how you would proceed to register ``NewScorer`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_scoring_engines = - new_scorer = thirdparty.new:NewScorer - - -To get a better understanding on how to implement a more advanced scoring -engine, have a look at the :py:class:`~.DummyScorer` class. This implementation -is not really using machine learning, but other than that it contains all the -pieces which the "real" implementation would have. - -In addition, for some use cases there is a need to register a list (possibly -dynamic, depending on the implementation and configuration) of scoring engines -in a single plugin, so there is no need to restart :ref:`Watcher Decision -Engine ` every time such list changes. For -these cases, an additional ``watcher_scoring_engine_containers`` entry point -can be used. - -For the example how to use scoring engine containers, please have a look at -the :py:class:`~.DummyScoringContainer` and the way it is configured in -``setup.cfg``. For new containers it could be done like this: - -.. code-block:: ini - - [entry_points] - watcher_scoring_engine_containers = - new_scoring_container = thirdparty.new:NewContainer - -.. _pbr: http://docs.openstack.org/developer/pbr/ - - -Using scoring engine plugins -============================ - -The Watcher Decision Engine service will automatically discover any installed -plugins when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, Watcher will scan and register inside the :ref:`Watcher Database -` all the scoring engines you implemented upon -restarting the :ref:`Watcher Decision Engine -`. - -In addition, ``watcher-sync`` tool can be used to trigger :ref:`Watcher -Database ` synchronization. This might be used for -"dynamic" scoring containers, which can return different scoring engines based -on some external configuration (if they support that). diff --git a/doc/source/contributor/plugin/strategy-plugin.rst b/doc/source/contributor/plugin/strategy-plugin.rst deleted file mode 100644 index b285dff..0000000 --- a/doc/source/contributor/plugin/strategy-plugin.rst +++ /dev/null @@ -1,314 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _implement_strategy_plugin: - -================================= -Build a new optimization strategy -================================= - -Watcher Decision Engine has an external :ref:`strategy ` -plugin interface which gives anyone the ability to integrate an external -strategy in order to make use of placement algorithms. - -This section gives some guidelines on how to implement and integrate custom -strategies with Watcher. If you wish to create a third-party package for your -plugin, you can refer to our :ref:`documentation for third-party package -creation `. - - -Pre-requisites -============== - -Before using any strategy, you should make sure you have your Telemetry service -configured so that it would provide you all the metrics you need to be able to -use your strategy. - - -Create a new strategy plugin -============================ - -In order to create a new strategy, you have to: - -- Extend the :py:class:`~.UnclassifiedStrategy` class -- Implement its :py:meth:`~.BaseStrategy.get_name` class method to return the - **unique** ID of the new strategy you want to create. This unique ID should - be the same as the name of :ref:`the entry point we will declare later on - `. -- Implement its :py:meth:`~.BaseStrategy.get_display_name` class method to - return the translated display name of the strategy you want to create. - Note: Do not use a variable to return the translated string so it can be - automatically collected by the translation tool. -- Implement its :py:meth:`~.BaseStrategy.get_translatable_display_name` - class method to return the translation key (actually the English display - name) of your new strategy. The value return should be the same as the - string translated in :py:meth:`~.BaseStrategy.get_display_name`. -- Implement its :py:meth:`~.BaseStrategy.execute` method to return the - solution you computed within your strategy. - -Here is an example showing how you can write a plugin called ``NewStrategy``: - -.. code-block:: python - - # filepath: thirdparty/new.py - # import path: thirdparty.new - import abc - - import six - - from watcher._i18n import _ - from watcher.decision_engine.strategy.strategies import base - - - class NewStrategy(base.UnclassifiedStrategy): - - def __init__(self, osc=None): - super(NewStrategy, self).__init__(osc) - - def execute(self, original_model): - self.solution.add_action(action_type="nop", - input_parameters=parameters) - # Do some more stuff here ... - return self.solution - - @classmethod - def get_name(cls): - return "new_strategy" - - @classmethod - def get_display_name(cls): - return _("New strategy") - - @classmethod - def get_translatable_display_name(cls): - return "New strategy" - - -As you can see in the above example, the :py:meth:`~.BaseStrategy.execute` -method returns a :py:class:`~.BaseSolution` instance as required. This solution -is what wraps the abstract set of actions the strategy recommends to you. This -solution is then processed by a :ref:`planner ` to -produce an action plan which contains the sequenced flow of actions to be -executed by the :ref:`Watcher Applier `. This -solution also contains the various :ref:`efficacy indicators -` alongside its computed :ref:`global efficacy -`. - -Please note that your strategy class will expect to find the same constructor -signature as BaseStrategy to instantiate you strategy. Therefore, you should -ensure that your ``__init__`` signature is identical to the -:py:class:`~.BaseStrategy` one. - - -Strategy efficacy -================= - -As stated before, the ``NewStrategy`` class extends a class called -:py:class:`~.UnclassifiedStrategy`. This class actually implements a set of -abstract methods which are defined within the :py:class:`~.BaseStrategy` parent -class. - -One thing this :py:class:`~.UnclassifiedStrategy` class defines is that our -``NewStrategy`` achieves the ``unclassified`` goal. This goal is a peculiar one -as it does not contain any indicator nor does it calculate a global efficacy. -This proves itself to be quite useful during the development of a new strategy -for which the goal has yet to be defined or in case a :ref:`new goal -` has yet to be implemented. - - -Define Strategy Parameters -========================== - -For each new added strategy, you can add parameters spec so that an operator -can input strategy parameters when creating an audit to control the -:py:meth:`~.BaseStrategy.execute` behavior of strategy. This is useful to -define some threshold for your strategy, and tune them at runtime. - -To define parameters, just implements :py:meth:`~.BaseStrategy.get_schema` to -return parameters spec with `jsonschema -`_ format. -It is strongly encouraged that provide default value for each parameter, or -else reference fails if operator specify no parameters. - -Here is an example showing how you can define 2 parameters for -``DummyStrategy``: - -.. code-block:: python - - class DummyStrategy(base.DummyBaseStrategy): - - @classmethod - def get_schema(cls): - return { - "properties": { - "para1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - }, - "para2": { - "description": "string parameter example", - "type": "string", - "default": "hello", - }, - }, - } - - -You can reference parameters in :py:meth:`~.BaseStrategy.execute`: - -.. code-block:: python - - class DummyStrategy(base.DummyBaseStrategy): - - def execute(self): - para1 = self.input_parameters.para1 - para2 = self.input_parameters.para2 - - if para1 > 5: - ... - - -Operator can specify parameters with following commands: - -.. code:: bash - - $ watcher audit create -a -p para1=6.0 -p para2=hi - -Pls. check user-guide for details. - - -Abstract Plugin Class -===================== - -Here below is the abstract :py:class:`~.BaseStrategy` class: - -.. autoclass:: watcher.decision_engine.strategy.strategies.base.BaseStrategy - :members: - :special-members: __init__ - :noindex: - -.. _strategy_plugin_add_entrypoint: - -Add a new entry point -===================== - -In order for the Watcher Decision Engine to load your new strategy, the -strategy must be registered as a named entry point under the -``watcher_strategies`` entry point of your ``setup.py`` file. If you are using -pbr_, this entry point should be placed in your ``setup.cfg`` file. - -The name you give to your entry point has to be unique and should be the same -as the value returned by the :py:meth:`~.BaseStrategy.get_name` class method of -your strategy. - -Here below is how you would proceed to register ``NewStrategy`` using pbr_: - -.. code-block:: ini - - [entry_points] - watcher_strategies = - new_strategy = thirdparty.new:NewStrategy - - -To get a better understanding on how to implement a more advanced strategy, -have a look at the :py:class:`~.BasicConsolidation` class. - -.. _pbr: http://docs.openstack.org/developer/pbr/ - -Using strategy plugins -====================== - -The Watcher Decision Engine service will automatically discover any installed -plugins when it is restarted. If a Python package containing a custom plugin is -installed within the same environment as Watcher, Watcher will automatically -make that plugin available for use. - -At this point, Watcher will scan and register inside the :ref:`Watcher Database -` all the strategies (alongside the goals they -should satisfy) you implemented upon restarting the :ref:`Watcher Decision -Engine `. - -You should take care when installing strategy plugins. By their very nature, -there are no guarantees that utilizing them as is will be supported, as -they may require a set of metrics which is not yet available within the -Telemetry service. In such a case, please do make sure that you first -check/configure the latter so your new strategy can be fully functional. - -Querying metrics ----------------- - -A large set of metrics, generated by OpenStack modules, can be used in your -strategy implementation. To collect these metrics, Watcher provides a -`Helper`_ for two data sources which are `Ceilometer`_ and `Monasca`_. If you -wish to query metrics from a different data source, you can implement your own -and directly use it from within your new strategy. Indeed, strategies in -Watcher have the cluster data models decoupled from the data sources which -means that you may keep the former while changing the latter. -The recommended way for you to support a new data source is to implement a new -helper that would encapsulate within separate methods the queries you need to -perform. To then use it, you would just have to instantiate it within your -strategy. - -If you want to use Ceilometer but with your own metrics database backend, -please refer to the `Ceilometer developer guide`_. The list of the available -Ceilometer backends is located here_. The `Ceilosca`_ project is a good example -of how to create your own pluggable backend. Moreover, if your strategy -requires new metrics not covered by Ceilometer, you can add them through a -`Ceilometer plugin`_. - - -.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/decision_engine/cluster/history/ceilometer.py -.. _`Ceilometer developer guide`: http://docs.openstack.org/developer/ceilometer/architecture.html#storing-the-data -.. _`Ceilometer`: http://docs.openstack.org/developer/ceilometer/ -.. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md -.. _`here`: http://docs.openstack.org/developer/ceilometer/install/dbreco.html#choosing-a-database-backend -.. _`Ceilometer plugin`: http://docs.openstack.org/developer/ceilometer/plugins.html -.. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py - -Read usage metrics using the Watcher Datasource Helper ------------------------------------------------------- - -The following code snippet shows how to invoke a Datasource Helper class: - -.. code-block:: py - - from watcher.datasource import ceilometer as ceil - from watcher.datasource import monasca as mon - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @property - def monasca(self): - if self._monasca is None: - self._monasca = mon.MonascaHelper(osc=self.osc) - return self._monasca - -Using that you can now query the values for that specific metric: - -.. code-block:: py - - if self.config.datasource == "ceilometer": - resource_id = "%s_%s" % (node.uuid, node.hostname) - return self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name='compute.node.cpu.percent', - period="7200", - aggregate='avg', - ) - elif self.config.datasource == "monasca": - statistics = self.monasca.statistic_aggregation( - meter_name='compute.node.cpu.percent', - dimensions=dict(hostname=node.uuid), - period=7200, - aggregate='avg' - ) diff --git a/doc/source/contributor/rally_link.rst b/doc/source/contributor/rally_link.rst deleted file mode 100644 index 7fd02e3..0000000 --- a/doc/source/contributor/rally_link.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../../rally-jobs/README.rst diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst deleted file mode 100644 index ab0675f..0000000 --- a/doc/source/contributor/testing.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======= -Testing -======= - -.. _unit_tests: - -Unit tests -========== - -All unit tests should be run using `tox`_. To run the same unit tests that are -executing onto `Gerrit`_ which includes ``py35``, ``py27`` and ``pep8``, you -can issue the following command:: - - $ workon watcher - (watcher) $ pip install tox - (watcher) $ cd watcher - (watcher) $ tox - -If you want to only run one of the aforementioned, you can then issue one of -the following:: - - $ workon watcher - (watcher) $ tox -e py35 - (watcher) $ tox -e py27 - (watcher) $ tox -e pep8 - -.. _tox: https://tox.readthedocs.org/ -.. _Gerrit: http://review.openstack.org/ - -You may pass options to the test programs using positional arguments. To run a -specific unit test, you can pass extra options to `os-testr`_ after putting -the ``--`` separator. So using the ``-r`` option followed by a regex string, -you can run the desired test:: - - $ workon watcher - (watcher) $ tox -e py27 -- -r watcher.tests.api - -.. _os-testr: http://docs.openstack.org/developer/os-testr/ - -When you're done, deactivate the virtualenv:: - - $ deactivate - -.. include:: ../../../watcher_tempest_plugin/README.rst diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index 7ab0602..0000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,386 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -======== -Glossary -======== - -.. glossary:: - :sorted: - -This page explains the different terms used in the Watcher system. - -They are sorted in alphabetical order. - -.. _action_definition: - -Action -====== - -.. watcher-term:: watcher.api.controllers.v1.action - -.. _action_plan_definition: - -Action Plan -=========== - -.. watcher-term:: watcher.api.controllers.v1.action_plan - -.. _administrator_definition: - -Administrator -============= - -The :ref:`Administrator ` is any user who has admin -access on the OpenStack cluster. This user is allowed to create new projects -for tenants, create new users and assign roles to each user. - -The :ref:`Administrator ` usually has remote access -to any host of the cluster in order to change the configuration and restart any -OpenStack service, including Watcher. - -In the context of Watcher, the :ref:`Administrator ` -is a role for users which allows them to run any Watcher commands, such as: - -- Create/Delete an :ref:`Audit Template ` -- Launch an :ref:`Audit ` -- Get the :ref:`Action Plan ` -- Launch a recommended :ref:`Action Plan ` manually -- Archive previous :ref:`Audits ` and - :ref:`Action Plans ` - - -The :ref:`Administrator ` is also allowed to modify -any Watcher configuration files and to restart Watcher services. - -.. _audit_definition: - -Audit -===== - -.. watcher-term:: watcher.api.controllers.v1.audit - -.. _audit_template_definition: - -Audit Template -============== - -.. watcher-term:: watcher.api.controllers.v1.audit_template - -.. _availability_zone_definition: - -Availability Zone -================= - -Please, read `the official OpenStack definition of an Availability Zone `_. - -.. _cluster_definition: - -Cluster -======= - -A :ref:`Cluster ` is a set of physical machines which -provide compute, storage and networking resources and are managed by the same -OpenStack Controller node. -A :ref:`Cluster ` represents a set of resources that a -cloud provider is able to offer to his/her -:ref:`customers `. - -A data center may contain several clusters. - -The :ref:`Cluster ` may be divided in one or several -:ref:`Availability Zone(s) `. - -.. _cluster_data_model_definition: - -Cluster Data Model (CDM) -======================== - -.. watcher-term:: watcher.decision_engine.model.collector.base - - -.. _controller_node_definition: - -Controller Node -=============== - -A controller node is a machine that typically runs the following core OpenStack -services: - -- Keystone: for identity and service management -- Cinder scheduler: for volumes management -- Glance controller: for image management -- Neutron controller: for network management -- Nova controller: for global compute resources management with services - such as nova-scheduler, nova-conductor and nova-network. - -In many configurations, Watcher will reside on a controller node even if it -can potentially be hosted on a dedicated machine. - -.. _compute_node_definition: - -Compute node -============ - -Please, read `the official OpenStack definition of a Compute Node -`_. - -.. _customer_definition: - -Customer -======== - -A :ref:`Customer ` is the person or company which -subscribes to the cloud provider offering. A customer may have several -:ref:`Project(s) ` -hosted on the same :ref:`Cluster ` or dispatched on -different clusters. - -In the private cloud context, the :ref:`Customers ` are -different groups within the same organization (different departments, project -teams, branch offices and so on). Cloud infrastructure includes the ability to -precisely track each customer's service usage so that it can be charged back to -them, or at least reported to them. - -.. _goal_definition: - -Goal -==== - -.. watcher-term:: watcher.api.controllers.v1.goal - - -.. _host_aggregates_definition: - -Host Aggregate -============== - -Please, read `the official OpenStack definition of a Host Aggregate -`_. - -.. _instance_definition: - -Instance -======== - -A running virtual machine, or a virtual machine in a known state such as -suspended, that can be used like a hardware server. - -.. _managed_resource_definition: - -Managed resource -================ - -A :ref:`Managed resource ` is one instance of -:ref:`Managed resource type ` in a topology -with particular properties and dependencies on other -:ref:`Managed resources ` (relationships). - -For example, a :ref:`Managed resource ` can be one -virtual machine (i.e., an :ref:`instance `) hosted on a -:ref:`compute node ` and connected to another virtual -machine through a network link (represented also as a -:ref:`Managed resource ` in the -:ref:`Cluster Data Model `). - -.. _managed_resource_type_definition: - -Managed resource type -===================== - -A :ref:`Managed resource type ` is a type of -hardware or software element of the :ref:`Cluster ` that -the Watcher system can act on. - -Here are some examples of -:ref:`Managed resource types `: - -- `Nova Host Aggregates `_ -- `Nova Servers `_ -- `Cinder Volumes `_ -- `Neutron Routers `_ -- `Neutron Networks `_ -- `Neutron load-balancers `_ -- `Sahara Hadoop Cluster `_ -- ... - -It can be any of the `the official list of available resource types defined in -OpenStack for HEAT -`_. - -.. _efficacy_indicator_definition: - -Efficacy Indicator -================== - -.. watcher-term:: watcher.api.controllers.v1.efficacy_indicator - -.. _efficacy_specification_definition: - -Efficacy Specification -====================== - -.. watcher-term:: watcher.decision_engine.goal.efficacy.base - -.. _efficacy_definition: - -Optimization Efficacy -===================== - -The :ref:`Optimization Efficacy ` is the objective -measure of how much of the :ref:`Goal ` has been achieved in -respect with constraints and :ref:`SLAs ` defined by the -:ref:`Customer `. - -The way efficacy is evaluated will depend on the :ref:`Goal ` -to achieve. - -Of course, the efficacy will be relevant only as long as the -:ref:`Action Plan ` is relevant -(i.e., the current state of the :ref:`Cluster ` -has not changed in a way that a new :ref:`Audit ` would need -to be launched). - -For example, if the :ref:`Goal ` is to lower the energy -consumption, the :ref:`Efficacy ` will be computed -using several :ref:`efficacy indicators ` -(KPIs): - -- the percentage of energy gain (which must be the highest possible) -- the number of :ref:`SLA violations ` - (which must be the lowest possible) -- the number of virtual machine migrations (which must be the lowest possible) - -All those indicators are computed within a given timeframe, which is the -time taken to execute the whole :ref:`Action Plan `. - -The efficacy also enables the :ref:`Administrator ` -to objectively compare different :ref:`Strategies ` for -the same goal and same workload of the :ref:`Cluster `. - -.. _project_definition: - -Project -======= - -:ref:`Projects ` represent the base unit of “ownership” -in OpenStack, in that all :ref:`resources ` in -OpenStack should be owned by a specific :ref:`project `. -In OpenStack Identity, a :ref:`project ` must be owned by a -specific domain. - -Please, read `the official OpenStack definition of a Project -`_. - -.. _scoring_engine_definition: - -Scoring Engine -============== - -.. watcher-term:: watcher.api.controllers.v1.scoring_engine - -.. _sla_definition: - -SLA -=== - -:ref:`SLA ` means Service Level Agreement. - -The resources are negotiated between the :ref:`Customer ` -and the Cloud Provider in a contract. - -Most of the time, this contract is composed of two documents: - -- :ref:`SLA ` : Service Level Agreement -- :ref:`SLO ` : Service Level Objectives - -Note that the :ref:`SLA ` is more general than the -:ref:`SLO ` in the sense that the former specifies what service -is to be provided, how it is supported, times, locations, costs, performance, -and responsibilities of the parties involved while the -:ref:`SLO ` focuses on more measurable characteristics such as -availability, throughput, frequency, response time or quality. - -You can also read `the Wikipedia page for SLA `_ -which provides a good definition. - -.. _sla_violation_definition: - -SLA violation -============= - -A :ref:`SLA violation ` happens when a -:ref:`SLA ` defined with a given -:ref:`Customer ` could not be respected by the -cloud provider within the timeframe defined by the official contract document. - -.. _slo_definition: - -SLO -=== - -A Service Level Objective (SLO) is a key element of a -:ref:`SLA ` between a service provider and a -:ref:`Customer `. SLOs are agreed as a means of measuring -the performance of the Service Provider and are outlined as a way of avoiding -disputes between the two parties based on misunderstanding. - -You can also read `the Wikipedia page for SLO `_ -which provides a good definition. - -.. _solution_definition: - -Solution -======== - -.. watcher-term:: watcher.decision_engine.solution.base - -.. _strategy_definition: - -Strategy -======== - -.. watcher-term:: watcher.api.controllers.v1.strategy - -.. _watcher_applier_definition: - -Watcher Applier -=============== - -.. watcher-term:: watcher.applier.base - -.. _watcher_database_definition: - -Watcher Database -================ - -This database stores all the Watcher domain objects which can be requested -by the Watcher API or the Watcher CLI: - -- Audit templates -- Audits -- Action plans -- Actions -- Goals - -The Watcher domain being here "*optimization of some resources provided by an -OpenStack system*". - -See :doc:`architecture` for more details on this component. - -.. _watcher_decision_engine_definition: - -Watcher Decision Engine -======================= - -.. watcher-term:: watcher.decision_engine.manager - -.. _watcher_planner_definition: - -Watcher Planner -=============== - -.. watcher-term:: watcher.decision_engine.planner.base diff --git a/doc/source/image_src/dia/architecture.dia b/doc/source/image_src/dia/architecture.dia deleted file mode 100644 index cf98ea93f816eb20a19a734148d19041b8d8a82d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3198 zcmV-^41x0>iwFP!000023+-K7Z`(K)e($d^Jg*7E`(-@spq=hu2AIxvF}qk`UIN)x z9PKDlAj?U!5BuBqP*NJ-WXqDMHVqB5s9c(#4n^|&4tWms!;hbC;?Z50Eu%DfJthc` zM`1Ec=TWkFJ^t4}e?HgaAK$$CVIBqF!=FVK+>GEC$<%zx9Kv1 zBKgDZPLY~=?B8o;s!~fRlq}wS_m+QG-ct3&Q(gORv=$`439?0$?BYP%#AQQJq$#D) z7{X|S6<6f3O-_fK4ht6#3zrQG7cDPu(=5+|DBp#grfD1oNr{?gtFS%Jau&qU#SWR( z!HTOW&(kCKe-*^bCI*yWe;#e8j@(5S%?}UUjS@Ar+(+~L`tp;b<|=D{a8&zUw2Y>4 zxbNIhkmNxemceM{q*yWy`ikFmLBFh6rnhs9u});9xjh`v&}L!HC^jKu(fR0I`f2n<##O`w5cec(lwP?8FT-aQV|;!uT%Cqgk-dFQ?zZ&Y<7> zOf=s7@NS@++^}x~rGUvfOw>qnfk`*G?QvP_PGZ~*2z2-&xe50_(=b`x?2%B1MASYV zP^V=so@o3@8x%TVuPXRGydRaJv`4k^Sng5SW$Poj1MqMY7H>Jfrd|jehmhfnj*>=Wo;+MB)JTe zd121!M%IY}D5W%Mxi}7eW_G)MEBiLzPN_78hksejD%&;h?V~4ASPnAsGRU&@zP@fs zJ6C=ghsolwwh-Mxj_Q}TQZJdiYNO9pyKA8*2v_Yh50=-*Z=Y?to}}SUiXG-Ko4~xc z+|d$`|1-LVnX+16&(mTlbM*ATFgoz=_Uu-z{WxB{C&x=6Gl1Kr5f&4mZUg^jXb6X^ zyod>5Cfh+CkPi2 z%Rt1+X=rp01~y@OsD9JkcFFV%aUq_9l_~MtsGjOyG}Zs~o2UALsQw)Mc`RT?UGQ?QnWY|DW>7CMH zh~0E0c1G>OX|WTFl|yC+9AZiwLMKckQlK(Kou=aUV4n9#y+=AQh5er11>PMU#vNq> z5oJSB_~xROcSv3G7U_JX6eoxBmCqx(Nl+uQa2_$vfNSTw;DR&I4&G>2G4o-3@eJWqS z!-e*hfedm~*=Hg9Eaa~)3t8?A z`#rxU)o=OcZ_Pjn%%t;?5)>RIZU;FNTv}+O ziR+cLIz#J+I7Y@(fE_K+gi4UVg)Wtw zZkmI|l^#rN7UU-}S<5nK)M=yy_woE4nfqz%#Xk)i2cB%|autx|e4g-cD!AP+M433*}JYMRo-1}PR z?hWFH&NCP~os$%(LAXjP)?BzeSHU7tW^9@Q(Mqw2J@EjDrif^eoCw#;F?E*iJ&S?x zswgGcbI&svK;?BDJcLWkk%yS4DV3XT0(=fC8Vv6 z*EUn)3!I}idjHO+@DG&2&yiu=?z~E&8S|@3$5xf+3EUS5}{`sP7=aI(1y;$_R( zrhoyNVmqZXkmYK+gzCd_>q+a>J+`Adtou7lZ3Z<}ppBd!CJQj6gb{2)Ng0nu7}D(d zDzffP*DQP~Z10Z2$g&M1;vGCASIz7tsjFvrIEu~Kzr?tROL kBDy%eqWvMHJ|^;u#YC1D@Mn<)H*a43ADb%Mb-1|z0IJ$K9{>OV diff --git a/doc/source/image_src/dia/functional_data_model.dia b/doc/source/image_src/dia/functional_data_model.dia deleted file mode 100644 index ae7654dba36ad7849ce8c6e2457c4251e4e64631..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3212 zcmaLLc{~#i1IO{4xzFavnOu>ao65}$llwO1&ai}xa^^lp%zcwFjm=GNk8Aja%Azt? zBIhPiLU>-!f4@I^J>NgRulMUChGM$(ztPfB?l0%8f_S2r2jVxhzH!ul za}~}zoz(Tgd>R!eE2{ZpCHi-C`5D_@=&*Z%`J)|)=x*jjsr<=h8PMGQHrb^GC9aAwaP~%l{K|A+QDIAV1{~?mig+wuC@n~ zw5^2ds=&Ta-KjVb_f?7g^L67jWkEfR`?U3h|FyW4@;^B`rUf~T#UJuA`)AcoFcrT^ zy23r-I`<-kqRDT+Cx^+jE-AY1{Z?DU*xlk(HBuU!b`|9lijmId&2+NDdQ`8Q?^1K; zIM<8iq%Stzny1BS`vLm_zbjnxw+5+e?Fgm=fu3i4R=kTtGRFD{6w^LEJpMYVDuY~4c87)yGx|4FTb06BSmk~ z?x*XiW;IV_dA6bx;TpHOQ)Wj{t4+INhw8O#lB$yH&nOuw8TW?L9dxSc=B$6Cyq(#T z&|7>l^RXhG_Y9MLRS!smKF!XTH33FtwNEOuZAT=mEdl{BF&3y?>PV_W5u;Iaq#&0M)wn$cBAfv(ol<1P-$&pV~Kg~W@~xI z(8dpjvZtmGS}(WzeG)tBRfrO8=ojku*d323(K@e`VIu;`zk-KHI)oBG5Ws+6E} z3poMy*#UWPrS4-RQ!L&~|1wlxo@5Du%t$-|RkX*@*3Npv5!b`Ks1&Y(}7E=>tI9)=X(Vp(-NMO)L+Z6q<1z^b7 zkLYwnMo>6m|!!tDj(Z9(6c$w7-8>hA9P8U_#EQdE*yz=tZr zw|E|$C;6X04C$2aH^JPV-;Dejs=W>=u{qui--&;vkn7k;=PDVm*3Gcw&391CocVPV zS9@s|HDp;V(L)7lrrhH8zKeT~}FS1}LuQ3E`Id&}`1qqAE%F zm0-$C?6@WmJJNDPD8Bu43tTMA5kyr4b^R2Y{0e6{d}MPnx2wA>YB?~+unC((vRAU* zxsE~N{I8!XDkHGE#1DE{vrkOd9b_WNLDo~9FvvKe;N$t#{Y#t9mHx2}4BdQx!F>6a zbN%IRJ{TAo5xhoGLfXSjBuf*was{zkSdhmLx>=vJ4oE-G(yvaIdP6hYH$>>HK)$V+ zfpWTwf!|?|WB_?gd0DT9@cM8RWBmM8AGl1RC$fyN_)LdkO3qrO581&YMJHIoSOq4~ zG%P{M(+7S-b1GhFea>N+Pk_!h&R+v`qO`HB^tZS=byxldvTE$kq3vUG_>NyIQ#p>Q ze7rIo|NSPw8K&;)3w!5ObH(r$ZZE~{+GO#Y_Qz5*z>eix4yW}@c{Ow`9hr5`j}q$1 zpPg+Ij7Grs{9WnN*z#Ydn`y}35aA}=_vx9gVV&M!`fE0Ui=f6R# z0)AE~iQe#OeG2HwISW#jc@a)P%|^CgI0fZ;fKt9K9`ou43uhq$z)mrLgjA0ab4 ztq<26Cy!NP^EbMPXcphL#Nv9htP}s`0}=s}doz7M9wV-JRYhZAH9bZ1hz^@{*dI2yAbr6Nro|hoEedqsTBvz2_&X?Eb=TMk1fI^VCS> zlEGXU21tq6X4cNa;f2yr$A5xjXAtYj{m^V<*5D6X_^uN_DFAO+pO*^R#c1oAjyh(R zu?=onpCM{8&csffY*m)_g&A?bXPI`H(%^CQQ?ZEs>X}iiF!(?_gIomKW*L8B>xKkXY?_;C+nf1IRf&Z(J&MU6&T3g<% z3eTA4EG#X{j}nOP4A9@g+8_U|QT@Cv`S!R-*LYQf_g35445ec>_OjQcK#QHcfL^H81w| zCG{(<%i4Einbuah$7Qy79^0$Xry5Eq4#xL$`Mk5l)3GO_mPh&Fa2N~GN+rQ%F*1Hs;s9E$$Q?w z^Um&HF#unmK#E3PKSV#*=4oo?27&s|h>)8FN$3m&0m%76t0hZ=c&F8;!>}jG*22lv zNM6GDdJ)Hng5_gm5=6XCbg7LJ}XXhA=^)i zlKhPG>8v8~x(5djKbt+yk@atYPHMI9OpTjc!xfl9#diBuhy(j(n+k(eS9nY*;jyBj zojcE9nVGg>VGG(X{uODkKq;wYPyjJ0>?PRP1y-7S1=fA${5+;9X$|@YL%=wqCFAf>Ka~OOR0! zY&Z~44E7VV@2m6xLxY#CB7`af{ACNxXxkn=na{UVX{>dE#Vv2|vU>~#`B-9Eyz4Bn zCuX8y-!>Pv%iF8l4@Qm(jm0?XZ3bkk(r+)1nE040j+K7F(=6X=?iOAOyCOM9>T`^vJR8icj&3 zF~$8DnA+$c>!YkxymS$k%XEKUeMfTYnlFdR6XRlo8R6`2S!&7uN(1Bz(eH#5v8DBa W@fcKLqv+C^n3j{5K^{HLrT+qi5H(}~ diff --git a/doc/source/image_src/plantuml/README.rst b/doc/source/image_src/plantuml/README.rst deleted file mode 100644 index e73c9c0..0000000 --- a/doc/source/image_src/plantuml/README.rst +++ /dev/null @@ -1,14 +0,0 @@ -plantuml -======== - - -To build an image from a source file, you have to upload the plantuml JAR file -available on http://plantuml.com/download.html. -After, just run this command to build your image: - -.. code-block:: shell - - $ cd doc/source/images - $ java -jar /path/to/plantuml.jar doc/source/image_src/plantuml/my_image.txt - $ ls doc/source/images/ - my_image.png diff --git a/doc/source/image_src/plantuml/action_plan_state_machine.txt b/doc/source/image_src/plantuml/action_plan_state_machine.txt deleted file mode 100644 index 0eab6d0..0000000 --- a/doc/source/image_src/plantuml/action_plan_state_machine.txt +++ /dev/null @@ -1,18 +0,0 @@ -@startuml - -[*] --> RECOMMENDED: The Watcher Planner\ncreates the Action Plan -RECOMMENDED --> PENDING: Adminisrator launches\nthe Action Plan -PENDING --> ONGOING: The Watcher Applier receives the request\nto launch the Action Plan -ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Watcher Applier -ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully -FAILED --> DELETED : Administrator removes\nAction Plan -SUCCEEDED --> DELETED : Administrator removes\nAction Plan -ONGOING --> CANCELLED : Administrator cancels\nAction Plan -RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan -RECOMMENDED --> SUPERSEDED : The Watcher Decision Engine supersedes\nAction Plan -PENDING --> CANCELLED : Administrator cancels\nAction Plan -CANCELLED --> DELETED -SUPERSEDED --> DELETED -DELETED --> [*] - -@enduml diff --git a/doc/source/image_src/plantuml/audit_state_machine.txt b/doc/source/image_src/plantuml/audit_state_machine.txt deleted file mode 100644 index 860a202..0000000 --- a/doc/source/image_src/plantuml/audit_state_machine.txt +++ /dev/null @@ -1,17 +0,0 @@ -@startuml - -[*] --> PENDING: Audit requested by Administrator -PENDING --> ONGOING: Audit request is received\nby the Watcher Decision Engine -ONGOING --> FAILED: Audit fails\n(no solution found, technical error, ...) -ONGOING --> SUCCEEDED: The Watcher Decision Engine\ncould find at least one Solution -ONGOING --> SUSPENDED: Administrator wants to\nsuspend the Audit -SUSPENDED --> ONGOING: Administrator wants to\nresume the Audit -FAILED --> DELETED : Administrator wants to\narchive/delete the Audit -SUCCEEDED --> DELETED : Administrator wants to\narchive/delete the Audit -PENDING --> CANCELLED : Administrator cancels\nthe Audit -ONGOING --> CANCELLED : Administrator cancels\nthe Audit -CANCELLED --> DELETED : Administrator wants to\narchive/delete the Audit -SUSPENDED --> DELETED: Administrator wants to\narchive/delete the Audit -DELETED --> [*] - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt b/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt deleted file mode 100644 index 6792b1c..0000000 --- a/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt +++ /dev/null @@ -1,41 +0,0 @@ -@startuml -skinparam maxMessageSize 100 - -actor "Administrator" - -== Initialization == - -"Administrator" -> "Decision Engine" : Start all services -"Decision Engine" -> "Background Task Scheduler" : Start - -activate "Background Task Scheduler" -"Background Task Scheduler" -> "Cluster Model Collector Loader"\ -: List available cluster data models -"Cluster Model Collector Loader" --> "Background Task Scheduler"\ -: list of BaseClusterModelCollector instances - -loop for every available cluster data model collector - "Background Task Scheduler" -> "Background Task Scheduler"\ - : add periodic synchronization job - create "Jobs Pool" - "Background Task Scheduler" -> "Jobs Pool" : Create sync job -end -deactivate "Background Task Scheduler" - -hnote over "Background Task Scheduler" : Idle - -== Job workflow == - -"Background Task Scheduler" -> "Jobs Pool" : Trigger synchronization job -"Jobs Pool" -> "Nova Cluster Data Model Collector" : synchronize - -activate "Nova Cluster Data Model Collector" - "Nova Cluster Data Model Collector" -> "Nova API"\ - : Fetch needed data to build the cluster data model - "Nova API" --> "Nova Cluster Data Model Collector" : Needed data - "Nova Cluster Data Model Collector" -> "Nova Cluster Data Model Collector"\ - : Build an in-memory cluster data model - ]o<-- "Nova Cluster Data Model Collector" : Done -deactivate "Nova Cluster Data Model Collector" - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt b/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt deleted file mode 100644 index fd6035f..0000000 --- a/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt +++ /dev/null @@ -1,24 +0,0 @@ -@startuml - - -actor Administrator - -Administrator -> "Watcher CLI" : watcher audit create -a - -"Watcher CLI" -> "Watcher API" : POST audit(parameters) -"Watcher API" -> "Watcher Database" : create new audit in database (status=PENDING) - -"Watcher API" <-- "Watcher Database" : new audit uuid -"Watcher CLI" <-- "Watcher API" : return new audit URL - -Administrator <-- "Watcher CLI" : new audit uuid - -"Watcher API" -> "AMQP Bus" : trigger_audit(new_audit.uuid) -"AMQP Bus" -> "Watcher Decision Engine" : trigger_audit(new_audit.uuid) (status=ONGOING) - -ref over "Watcher Decision Engine" - Trigger audit in the - Watcher Decision Engine -end ref - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_create_audit_template.txt b/doc/source/image_src/plantuml/sequence_create_audit_template.txt deleted file mode 100644 index ef422a5..0000000 --- a/doc/source/image_src/plantuml/sequence_create_audit_template.txt +++ /dev/null @@ -1,22 +0,0 @@ -@startuml - -actor Administrator - -Administrator -> "Watcher CLI" : watcher audittemplate create \ -[--strategy-uuid ] -"Watcher CLI" -> "Watcher API" : POST audit_template(parameters) - -"Watcher API" -> "Watcher Database" : Request if goal exists in database -"Watcher API" <-- "Watcher Database" : OK - -"Watcher API" -> "Watcher Database" : Request if strategy exists in database (if provided) -"Watcher API" <-- "Watcher Database" : OK - -"Watcher API" -> "Watcher Database" : Create new audit_template in database -"Watcher API" <-- "Watcher Database" : New audit template UUID - -"Watcher CLI" <-- "Watcher API" : Return new audit template URL in HTTP Location Header -Administrator <-- "Watcher CLI" : New audit template UUID - -@enduml - diff --git a/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt b/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt deleted file mode 100644 index d36274c..0000000 --- a/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt +++ /dev/null @@ -1,44 +0,0 @@ -@startuml - -skinparam maxMessageSize 200 - -"Decision Engine" -> "Decision Engine" : Execute audit -activate "Decision Engine" -"Decision Engine" -> "Decision Engine" : Set the audit state to ONGOING - -"Decision Engine" -> "Strategy selector" : Select strategy -activate "Strategy selector" -alt A specific strategy is provided -"Strategy selector" -> "Strategy selector" : Load strategy and inject the \ -cluster data model -else Only a goal is specified -"Strategy selector" -> "Strategy selector" : select strategy -"Strategy selector" -> "Strategy selector" : Load strategy and inject the \ -cluster data model -end -"Strategy selector" -> "Decision Engine" : Return loaded Strategy -deactivate "Strategy selector" - -"Decision Engine" -> "Strategy" : Execute the strategy -activate "Strategy" -"Strategy" -> "Strategy" : **pre_execute()**Checks if the strategy \ -pre-requisites are all set. -"Strategy" -> "Strategy" : **do_execute()**Contains the logic of the strategy -"Strategy" -> "Strategy" : **post_execute()** Set the efficacy indicators -"Strategy" -> "Strategy" : Compute the global efficacy of the solution \ -based on the provided efficacy indicators -"Strategy" -> "Decision Engine" : Return the solution -deactivate "Strategy" - -"Decision Engine" -> "Planner" : Plan the solution that was computed by the \ -strategy -activate "Planner" -"Planner" -> "Planner" : Store the planned solution as an action plan with its \ -related actions and efficacy indicators -"Planner" --> "Decision Engine" : Done -deactivate "Planner" -"Decision Engine" -> "Decision Engine" : Update the audit state to SUCCEEDED - -deactivate "Decision Engine" - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_launch_action_plan.txt b/doc/source/image_src/plantuml/sequence_launch_action_plan.txt deleted file mode 100644 index 8c60727..0000000 --- a/doc/source/image_src/plantuml/sequence_launch_action_plan.txt +++ /dev/null @@ -1,23 +0,0 @@ -@startuml - -actor Administrator - -Administrator -> "Watcher CLI" : watcher actionplan start - -"Watcher CLI" -> "Watcher API" : PATCH action_plan(state=PENDING) -"Watcher API" -> "Watcher Database" : action_plan.state=PENDING - -"Watcher CLI" <-- "Watcher API" : HTTP 200 - -Administrator <-- "Watcher CLI" : OK - -"Watcher API" -> "AMQP Bus" : launch_action_plan(action_plan.uuid) -"AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) - -ref over "Watcher Applier" - Launch Action Plan in the - Watcher Applier -end ref - -@enduml - diff --git a/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt b/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt deleted file mode 100644 index fe9caab..0000000 --- a/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt +++ /dev/null @@ -1,31 +0,0 @@ -@startuml - -"AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) -"Watcher Applier" -> "Watcher Database" : action_plan.state=ONGOING -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = ONGOING -"Watcher Applier" -> "Watcher Database" : get_action_list(action_plan.uuid) -"Watcher Applier" <-- "Watcher Database" : actions -loop for each action of the action flow -create Action -"Watcher Applier" -> Action : instantiate Action object with target resource id\n and input parameters -"Watcher Applier" -> Action : validate_parameters() -"Watcher Applier" <-- Action : OK -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = ONGOING -"Watcher Applier" -> Action : preconditions() -"Watcher Applier" <-- Action : OK -"Watcher Applier" -> Action : execute() -alt action is "migrate instance" -Action -> "Nova API" : migrate(instance_id, dest_host_id) -Action <-- "Nova API" : OK -else action is "disable hypervisor" -Action -> "Nova API" : host-update(host_id, maintenance=true) -Action <-- "Nova API" : OK -end -"Watcher Applier" <-- Action : OK -"Watcher Applier" -> "Watcher Database" : action.state=SUCCEEDED -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = SUCCEEDED -end -"Watcher Applier" -> "Watcher Database" : action_plan.state=SUCCEEDED -"Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = SUCCEEDED - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt b/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt deleted file mode 100644 index 36c526f..0000000 --- a/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt +++ /dev/null @@ -1,37 +0,0 @@ -@startuml - -actor Administrator - -== Create some Audit settings == - -Administrator -> Watcher : create new Audit Template (i.e. Audit settings : goal, scope, ...) -Watcher -> Watcher : save Audit Template in database -Administrator <-- Watcher : Audit Template UUID - -== Launch a new Audit == - -Administrator -> Watcher : launch new Audit of the Openstack infrastructure resources\nwith a previously created Audit Template -Administrator <-- Watcher : Audit UUID -Administrator -> Watcher : get the Audit state -Administrator <-- Watcher : ONGOING -Watcher -> Watcher : compute a solution to achieve optimization goal -Administrator -> Watcher : get the Audit state -Administrator <-- Watcher : SUCCEEDED - -== Get the result of the Audit == - -Administrator -> Watcher : get Action Plan -Administrator <-- Watcher : recommended Action Plan and estimated efficacy -Administrator -> Administrator : verify the recommended actions\nand evaluate the estimated gain vs aggressiveness of the solution - -== Launch the recommended Action Plan == - -Administrator -> Watcher : launch the Action Plan -Administrator <-- Watcher : Action Plan has been launched -Watcher -> Watcher : trigger Actions on Openstack services -Administrator -> Watcher : get the Action Plan state -Administrator <-- Watcher : ONGOING -Administrator -> Watcher : get the Action Plan state -Administrator <-- Watcher : SUCCEEDED - -@enduml diff --git a/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt b/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt deleted file mode 100644 index 3bfc815..0000000 --- a/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt +++ /dev/null @@ -1,50 +0,0 @@ -@startuml - -skinparam maxMessageSize 100 - -"AMQP Bus" -> "Decision Engine" : trigger audit - -activate "Decision Engine" - -"Decision Engine" -> "Database" : update audit.state = ONGOING -"AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = ONGOING -"Decision Engine" -> "Database" : get audit parameters (goal, strategy, ...) -"Decision Engine" <-- "Database" : audit parameters (goal, strategy, ...) -"Decision Engine" --> "Decision Engine"\ -: select appropriate optimization strategy (via the Strategy Selector) -create Strategy -"Decision Engine" -> "Strategy" : execute strategy -activate "Strategy" - "Strategy" -> "Cluster Data Model Collector" : get cluster data model - "Cluster Data Model Collector" --> "Strategy"\ - : copy of the in-memory cluster data model - loop while enough history data for the strategy - "Strategy" -> "Ceilometer API" : get necessary metrics - "Strategy" <-- "Ceilometer API" : aggregated metrics - end - "Strategy" -> "Strategy"\ - : compute/set needed actions for the solution so it achieves its goal - "Strategy" -> "Strategy" : compute/set efficacy indicators for the solution - "Strategy" -> "Strategy" : compute/set the solution global efficacy - "Decision Engine" <-- "Strategy"\ - : solution (unordered actions, efficacy indicators and global efficacy) -deactivate "Strategy" - -create "Planner" -"Decision Engine" -> "Planner" : load actions scheduler -"Planner" --> "Decision Engine" : planner plugin -"Decision Engine" -> "Planner" : schedule actions -activate "Planner" - "Planner" -> "Planner"\ - : schedule actions according to scheduling rules/policies - "Decision Engine" <-- "Planner" : new action plan -deactivate "Planner" -"Decision Engine" -> "Database" : save new action plan in database -"Decision Engine" -> "Database" : update audit.state = SUCCEEDED -"AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = SUCCEEDED - -deactivate "Decision Engine" - -hnote over "Decision Engine" : Idle - -@enduml diff --git a/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt b/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt deleted file mode 100644 index 8b56c4a..0000000 --- a/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt +++ /dev/null @@ -1,153 +0,0 @@ -@startuml -!define table(x) class x << (T,#FFAAAA) >> -!define primary_key(x) x -!define foreign_key(x) x -hide methods -hide stereotypes - -table(goals) { - primary_key(id: Integer) - uuid : String[36] - name : String[63] - display_name : String[63] - efficacy_specification : JSONEncodedList, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(strategies) { - primary_key(id: Integer) - foreign_key(goal_id : Integer) - uuid : String[36] - name : String[63] - display_name : String[63] - parameters_spec : JSONEncodedDict, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(audit_templates) { - primary_key(id: Integer) - foreign_key("goal_id : Integer") - foreign_key("strategy_id : Integer, nullable") - uuid : String[36] - name : String[63], nullable - description : String[255], nullable - scope : JSONEncodedList - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(audits) { - primary_key(id: Integer) - foreign_key("goal_id : Integer") - foreign_key("strategy_id : Integer, nullable") - uuid : String[36] - audit_type : String[20] - state : String[20], nullable - interval : Integer, nullable - parameters : JSONEncodedDict, nullable - scope : JSONEncodedList, nullable - auto_trigger: Boolean - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(action_plans) { - primary_key(id: Integer) - foreign_key("audit_id : Integer, nullable") - foreign_key("strategy_id : Integer") - uuid : String[36] - state : String[20], nullable - global_efficacy : JSONEncodedDict, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(actions) { - primary_key(id: Integer) - foreign_key("action_plan_id : Integer") - uuid : String[36] - action_type : String[255] - input_parameters : JSONEncodedDict, nullable - state : String[20], nullable - parents : JSONEncodedList, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - -table(efficacy_indicators) { - primary_key(id: Integer) - foreign_key("action_plan_id : Integer") - uuid : String[36] - name : String[63] - description : String[255], nullable - unit : String[63], nullable - value : Numeric - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - -table(scoring_engines) { - primary_key(id: Integer) - uuid : String[36] - name : String[63] - description : String[255], nullable - metainfo : Text, nullable - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - -table(service) { - primary_key(id: Integer) - name: String[255] - host: String[255] - last_seen_up: DateTime - - created_at : DateTime - updated_at : DateTime - deleted_at : DateTime - deleted : Integer -} - - "goals" <.. "strategies" : Foreign Key - "goals" <.. "audit_templates" : Foreign Key - "strategies" <.. "audit_templates" : Foreign Key - "goals" <.. "audits" : Foreign Key - "strategies" <.. "audits" : Foreign Key - "action_plans" <.. "actions" : Foreign Key - "action_plans" <.. "efficacy_indicators" : Foreign Key - "strategies" <.. "action_plans" : Foreign Key - "audits" <.. "action_plans" : Foreign Key - -@enduml diff --git a/doc/source/images/action_plan_state_machine.png b/doc/source/images/action_plan_state_machine.png deleted file mode 100644 index 41018530a8d781d61600fcc7bcda4ccd497b049d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48927 zcmce;Wk8i%*EWh>lwctx(o#w{(hULv(gvN<(x^1jCEX>G(u+_*=}zhH?&gfOa6ixc zyx)7i@0|1F>>qAr-S=E`jydwW#=!fTj3~zS``6LX&@jZsgyhlCF4Cc)T@b%|9)6Ne zD;EdO*Zm!KSlW~ zpSpD~ehw~v=&tdd8E-aERU)R9_}s}cK}lZw(&ZfE{XM6ns=8SL{IpNmjjk7`R+?n* z`L&KmQTWm(Z&Js~yetyg`+=^N{6VK7MaY_=OhEV9NAq8w@#Q47ZitC4$Vrw|Mzv93 z-wg{CpwTtFMEQ1u%O@wKO8u2 zJI$y%+$HWgL5wyK1oo{hC*rT#G%`H9<2)vw#muI19plk(QtQu}Nq$pu?WIdUNxft! z#~a9e9whEx$E=yt7Q6gS=X!au59zIJ>h+gu#DW{KEXH|_9)Sv_#hqT)-#3b6s47wU z<<^u8OhpguIZRG=q&^NzwzxdTO4{+`5C5JKHxZ9eI@#?`-qTBCm=vNHIW@J=&|J~P zh4`Pp8(12~Qh&Z1yXxXX9{Q|kA-r6jp(((+#W3f9iItu_CqGR-%*R*1a3M8FN8H!1 z*BD1x`8e~*qJ%)`eIu!Ep<54KZY6#?cj;0PyJmuqo7W}3WxlrW)(Ks&t`_Fn3G|`i z#YL{~pZGss78VjJEG&FR0ZF; z=ZJ{(u6eRRW)>DXoT1n~Hgdv`4|9WGB*znhhuCLJ9f8k!<<)5aV15}MKSXtm4J z^wP32VNua6IX6pT)Hz*?K79Om#Y#y?NJvUbNSK7?D65~sk5V_0aFw_L(HbV-W%4gJ#vznCDOujJ1NV26` zM(%>N$32PtiTOs<&)aP@P=5UIfy|mpRGW~~YVP~{3qy6-XHUk-q<0w>QBzaHE}~hC zdPh%>M`;kEmoW+UJ`x|jobOC+G{x4D^}-S8jwRU;uq)HdP5*Ru`U{4JhE;nLzDr9> zDJdy)bNUVUf|(~Szmr_>VoMJ zea<>J63mu_zT_fhuYHllt^UR}lC!0mw(KkK3(L#PPt>BVe!PN3^!Nc5`TKe%#t-)e z*dN|NYAc$Ryn;f=ec?jYWp7+cGEXuJ3W|Gyp21J2sPXtQNS4sgq0B==nv{iwg)>&> zwZ~K->{Q|M)GJNRIBs&%w=!4HB-u)p)VD~kD~1_VA!qoM2#la!Vd?VO?eGX#Topbu ztG^cr@1RZs8%1ry;%pT#M%32f>QE{N&mg}a?i_WQ@K4lR|L+!o75>{Is5AZV7lCd3 z$4>rk?4O++C}QG0{w{*ATlx46D`M&IqxF9UufU=0N*!zJySRNYUgOF4ZtW>yfp!B< zk(LJnfpFRGQP#MIHhd4Kd)nICTBA_E12kLI;woxal|h2dMXOxHceL_e^1pta)Hymd zv@|u<-QSPR z&H4EFbVS`cF!6pZ^kjLW<1xNl8w1O=G5xLWXqOG!Z}xTFDSZ6BB8azCMof|HSvY_9`B~ zfg{6&w+L7@~jq#?RNLOd)!{d!Ub*_=2lR(Ut&(j32?MlNo|E#RZH2-@UbS$iRCOsKD zyRt26nN&%aQ61(V=~qGGCl zS`6{S$$PQHor!ph5|~fe;t5+wVn4I}wK13NXR1A#nwk(&a4Dq3#KazR6y;VOH2(Rc z!m*!NP#}MRVTKvbJZN$+Y3E{9_UZXDHFCpskt~fs0Dj-G`NiWAK$$jK`t5I-PcER zp{c1U^~v(W!ot$h*P*c^{Eu447}0$cT5)iA=c9LcbJNq?>vhB>CPnztqV{J7NNvV; zW`9jxY{8L{9CjPp^Xu#Daw{G=j9*+EN|fyHQlTN@9bZJ84K%0b_h5w|QufVxSC zMiL7AA0}$!9^D^{AU<_xDrjhE_<5-9Z~XlE^U{qwV#fD)?5$K)qgcMZ+DI&$$5*X( zuI$h^>dPs@zCc?MG5(eX=Y{1X--BLydU}dBg_5c`M~4PvhPM+d`1KF|*!XxNYm}dd z5pz^{_?`KP>Y4kCGnzS^mNTCa2#0h_F+^8{UC4aap;VP!g@ZQivT-HIe+5e}{Y@cX zZ4|-l@>HX|2ff`PXIhnaNRSjmNV5FyhMuUKr)OIv6Vk07{9+!Tbpr(KT|t1IVq`G@%xZk(^c^K z_LWstRlE-#d|x{X%$dcu+$Qrx*(w2@r{m_{kdc86z6L{+zP7=lgPk1*2Z!|M$0Q^q z{CJ!D!Mk#Ib{0n{m=$I*XhSmtIpFvg1V$D&HtZ5GAQ|f3(5a~ z#q~MjtqAKo7G53S&QgitF$*CncZxb~%*<9!EgU>a@3UN=&4?;1d`YgH=?9gk`e1I* z38Gv=;v!1wi^*E| z+WA~Sy=O16G1@9|v~t5+(3y69a?LjFw@cIP-X3}ArL#bmz-N3&wsN}O`w89Aiak7} zEr_45oG4a8_Whmqdt7ZaqI%*H+>UAJiL@vwCg@FjR%W{e){-#XGV@D%aQ4Hx z;amjn>ef*5reIvm!{b|LiPe@z*EeT64zkm`YSfodF4rb^UCBhc_1D7JP1X^O01gt4 zDf?#g!T6p78Xd=6)FY1*8KoU|ezD1OA9Zh5&zI+OG0yGNj%sNRo+5gG>sA?~w(qDA zXT@pV?Axg%v;H|#V=HUIcBCroL|jpqK*QErdE`{ubS*Hay%ih{~jW%m7jlI~5!F!aB^4!k$T-Be=9w4Ex82WNHF`ab=W4RBWy&=MfYApW)rL@3YnYAsKy((NR%*tf;Ew;D(N@*mxAi#g8Ma zD~fT*?>NHc-9N-se*QdA1a-V)0lQ8E9oefv3AyDnxxA9mkNHNdB}URItvNNIMw7dU z)bz12`MQeFW*)38YBCQ=1lNx2Ru7H$PZ-pTQ28EcD)sbX>58~v(YHq{#2Ho$8%$DqeM zbjXL#c4D!m&!9I;sji`++v>Q|pTFC!SAD`cyWQtP-xaDiJX}qA%vk%jQ6Y`H%f>lP z=J@@=9VP!arkJU9gAVG0(=7IhzD*@M6tivdM1;p$6f|!v8pE}WR_4B5jA+Pru9gxP zC-Fg5QUodmhslR-FZ2oUXRgPtbX~gPywNFnLJ`lh5EW%XDI1sf`d3nxGRwx=$??(F z*6_J=UEN+AOW$OxHyVEy6ijYBJ8ArkOX(jpz7SgHf9yFlR6QUU5`WsTx1Nk1ec1cy z&8v=Z+Vw$%?Z+~LjfjtFD~>i>bFAKMRaJbU(*?T7ZRK#`?llS4IAICbrKF|JOi#~S zaaL-!mE1xJ{@MM;FO!$w-L2j%4&x9iN7xqSx>@X^8|aSXEo>1`E`D~* zF3Xv!T;A{&GC4I-S092p==$~Rj~+ehCr2shTPhZgyQw`6G{?A$mi4h*@hXvmzXOQ3 z_h+x&iK~1|(yKrA%Xgh|=xyUhUg=&2qmX^5o?p@jmdO_{3U^1;Dvke~r?K>E-t5mm zZ7(Buy{Bra@uwqT8kb{Dw-#bHE-vo3Z{H*(CI9^Mbv-JxsXc08czVa?D(~MjK;AMO zWl0tj8=v=BC8Vvm_(T(fqtVKrXgF=<`Tj448A6T?)4`1P74|!=6VL1^r1nHvZmG1A zFnTV;s4?kS<`%tG6WkYi-RrSBT7BA{D43ahPaM0YFiGWWd?d)PP>hZ+qe8p`RFA*?h!_$F0H&f-j0`~M+uv_q(6(`}SBGO)-p0<#5Dn3ua!r#%T$x^jm;ofE& zRo8ttOqUu$;xU_5KH}Oy9JM%tpb?d zqos6@9xXt@e(l<|beY&kbac~y_t^&;m5>ncy!PY3U$dm7XnR+rZ#kCTyqPAKHuPn3 zZFF(pvWabYxe-AssBlez&JC%mwH1x!j^sfARhTVjEO%FDZS@-|v?MW6rE;JkXSkqD z>_K~-UPajM6UWUK9a+BP-PTsw(o)C4w$*qA1s9`t^Vxk`r)7x4-Kjq}NvlS8cs+*0 z1i$h{tA}ST-_Y3^8uDn3=Q|FOJwYVzK4IQfwYZA(e69pIdeXR&Id?h#`L}@R*|)kP zQ;@Sq{!c$b#@z%4MSY8uo$lGyikt;Uadi_UM z!&y!1sc62$WUw7BB=TJESH~X#%J@r%f++Wi>Axw+&WCc65T}rtI#L53c(whAKb2Nw4>#U~g4AfbZ7d+hQ+(NRqR@;qdl!SKF0$ zRdr>W+)+eI*dKIb%|F}xXP5zNW)H!2Vn7xMBlFZR{C zRxLv?idfq2Dh;xn!1@(@c6$HSPBmil?GfJ94VP`=g;wmdtNXrQ#L8+^ao!(2(q?yt zdgCcEQPAz(kccpBoV#~RTux6QIR_|4S5I%zinA&e6@{l3`kltY5QHNZh)*QCTVfcubS z>glI)CLtWJDQt1le|rT!lvnT<&+Us3;EZ}a<-TVo+jF5n`1j!FSo$YpYPRtEPyyTojiVc4SCmZrKF{$0iv?2V+xQt6^;p!Jt~Pm+^Umw zsh+RSsdan-?yWm4U8sn#zYXCdn>=EaYRwg!3YW1Kug^u}p1sw$CF8@TE~mMB za_7uRPsyEsMF%+7r!&o^u$vP(uaBH^IcvE6=_szcyL-Z0vzJJQOE=R9BY>qc*ApqY3jr7x>HNLw@m_39sSfV zY*O;b%io{ZYA!HT^*X%{r5h@uADG?hn70BCpU&$ey*G#_I+G*zNzyfJLfO>W3a9?J zN$)4xbyC18K@ePAYXAA-;`MfQKY`>PWAJJVDN2_0wMC-1KAHGQM3QHpOVYCQ78ReKQUdrc= zLcS?X@&0U&MY+B?L@pF!7G5bgzjWNj(CL{E3BO}9w3(M0W{7&^rIKCeytZvM!!Ct_ zkEwa}ioa0MW)rE`S*G>b#@aT?EwGW9E_F_XZhMz8-F^D9S=ud>p&W&I(q-?z8aJ4{ z)2Bt9e1ABnD1Uz{G=ia`JXRjH_*w+E#qZN1+RNC>-?y`T!q*^jXW z+8yYt@=0qb-I^|-_DP^6sIr|ew|SL>bz;fas5F=XrTy!IVhf+$-LMBcUI}b#IBAnyeOWrab8E34_R3$XF|DxP68sf7`#467T<(-+Lv(wwNzpuE32N& zjYG%dE1^GGd5jHO)~=Ni+!UlnIb;7x^>&f@Qu+`&D2HS$qRk--Ka(KAkJJ+0#?%O}=FlMN;J3D|hxhj{#Ly@4HKL42x9mu^20Nl+lYo#oh0iRCgDaYpGi= zTxQojOtB5m@*J54w?5sh@vG!M^7>ycw(eE$Opi=y>Wi0fRry`SN3@b)sj0GPp1U1I zP}Pr8%RxmPyy$V9ZGSOH#M&(u0QzX1>+D=Vv~H=F;a&UxpL9jkcXxLY(v;LxKVm-3k@}`4Ns3fngypP^t*tF3C8ga9O-){app})C zBfa9|N0BJD4nf$)m*2mC>okR9I7-=1B{pP6N?oNGO)_!Qw=Jvv7pDBuuFFPL6JfKQ z_o_yFJ%Uz6UQ5d?k>U=HZUI6IO6IDmGm&jrcl3~z;B~$3 z9+66)-lkE?Xsg=llr%)BpAS&V3Zn*pR{u%-DI*>ZKw!7Au@hRqaWiwUgp~bkEP;I^ zvA8E99+H6}^Dl_(y3(W>YZ%o>n34=m!GC8GR1z7z*buLS_$8TlyU zx7XI%^_dxK8jYi)y~Zc*En^w5d46`d(QsLUU&zVH3DKlu>f%+#rYI})q#=)DIXEM7 z9QXCcx64SklAx@ZX3(P1#{y~5$qTZgKNE0pb33-ia7<56ce+eB-ri2R1^4`0Lqo^M^VF~3s%jspo~~7)JVvQy z_;0AB-)W}NeZfn72%2GLwl4FwQz zbOJ5e)n(b2qp|SqFEH2FqzZ^`)0^Ci4IkPM+sIAcx^qV+hFuP?%3;^kXyL)W==tD| z?Sf`MTDe*<$TYrJKQB_dyGOmq%t;VRx*E}T9+}{8MSl6DZfenoC4Q^p-KF|;*)wT8QcKPd zbV;lJ8bYgDXpU7kgQ7<41t+ryV3k8_+k-S}rteSinRZImA)tajSbk*triYG-jFUdB zD;(9_NyNo{4qMsF%v$2-g9t2+A|HsS9~qr}M`KNRFDz94A-Kx$*tW8=3M>}9%Kjkb zpQn{S%9?AyzPr^h2DY8!vJkkS&+L{cw7*@UF+?U^R&^lDBx0qNBv~}zWUU4(-hLso zs*yw@KrF!Og;PVLbur?|Md*49>B zN*P*O+UNiHC3SW6@H59`@gXuYGK5k#oCqM%X`WXgmFUY?kvn7&g7I?N!Ko6K4c8sm}JTUN6Iv5ztOija|zFx@= zi&M-}YH*k*zJZP&>KQ(pEy$sbq6w7J9^80ni9vh~KdMIEM0wq` z*Jn$9d)bKk}|zG98N0yH! z-Xza#^AG+iCzt?~zj$8S(7?dl)>e5p(sA?MZvEPLP1rCg;V^NMOr5@q7Yj%2#_Prp z`powu)pX)o+=&tFxf(7i5#zYQyat($c6(STtFjUqkJ1pv-`zSn#3W$PgNQS1j7>zu z3tY;^hQ)lf#k7yTs{11f3K5>G64SG@cW-ac{a$O_CkfU@Z^L@B+si2M@@0Z%F8qge z_3C48zLZ4{?f$%1jn}pqnSh|Pi50x1wIVG;z{$f$Tamlsges---cS>rXx-IVKHl~F z^vTNHJU@+t9%=kd4GjiA-2l?KCejtjZMQMsmF{<0!gsW;PINwko#cmLrDnK$iK(%% z`oDaNP=skS2~bC*qW+<&$XHWk6mek+yW^Qswnm{$mB6%k`o8i*WLL(Etek|YJtqF{ za~g87*1XouKbcN)hkF#|F_1q0=^^B)$z+V-oC&^A0XN>IZZP@nu3TdI`3?$SAcRb2 zG2~;xSRiCE8v-R__s1X8AzyS=34Yp1NjJTaHK$oL>jP;RheL1UNcBBtROX1O?(a1p zLZg$&S4tEcwX|Zp?nemOea2MXL!?DUOF8oC$r* zp^8l<^d>_SgZsapqr&GwCdw<}EVybag#|encUSymL9kN+W+-Vt<~qb<0Pu_hrI`e#@WL_AmN& zj9M825-{_qA@kSXb3^*I)Dxl}RQ7Zwl=x0-14$k~WTKk_B?K-M8S6@PXP=2$S~NC+ zR;6O*NrO13Oom>!s#79w-qWIbMaIXI&)SuaW;imCQ}@+z7^4JAkeEqeui>i}3uySZ zP=5&pAxV)lv>*ZUowqIY3uuVF9K(w9>`#H|MMERtH(i^F_LP;o(toL;8IP9)3dK+q zF2AI9kdk`P#0f8~<)yNK5`f#GzJ$Eze!QB)w9#CIrFMHj#YlICpXGEnNc4` z0#N~gOxOwB5-cb}%57|rDv_rHXOXtqos6+YbD^N?vm-^Gm5UVczuw<7zGh2Q1Er#PpQ~xrFI(hXT@s!t%!5#qc>(pEenVp| zI2;=SBb?yDbfSuP)H_yBW{vUWfiE(D$*_|~Xgw_*%!Soy9q z<$JP|Yw@=N{*-L`?k99up7MJOrn8NFNkwr9?e8hN@lLfQua@ooESH4vD<0LtD5klFH^VpNm*K(s_ z)V36Q+?YR@Dp#;99Bo`ZyACC4*+%&tH}tZdxT@ zmzIHQ`S0W6giL%jvwE0z>`EwN$!%4Nn0D~nhW1re?8|iGqpu$ySrL> za?rLqe8?vvizgqyu|w2nBQL^WWxl_&HtxdYa)6>j)*2{XhAPwd8Bke$rB_vwd$~% ztyygRK8$1Pr+a_?o6{dtpJ&pT%qKp#@yX5!VU{`Xk*cg+FcTr*e)kxvRxU0?VPUAo z!?|(*G9(_pIrAXJ3B`&U6f+trf>SWPa@IgcI}kvTul@~CrAhA zz-B)K3(keT*wXAo+IP(ZYsb*u%V|Z$!cvrwkdT(8C5I>nX z%&k7z1l~*udKY#qq+-~AHxf)tY?xTSiHeaKBzS$xlXf;xaB(z<0Edt;78*r+3~3%c z+LJ)B3TSA>!Gh&`&-L}k4p(!R8_iYVh|WhDzIP|NnBRE(2Wz8sEl>~@$N4S9q@{c3 zIy5&*hBv!M?H6xD5xHztY!?&K0`_!`Vru zs%0niFfOcr=#*hDzrt959N`hCT^6%db6^r>WJWi(tX$^FUA)PW{0D0P{+3>XHdEMJ zfe`Nj6nciTC?t{5?ob*)g4`tX!8wxw;YCv-USX8}A*IE?+7NSx;#BF(z1KsJ#8w={ zuI&ix|N0XEG@uapFFy%}T);CvMgX-Ad)jZcA^;k}gOLkA*O?melCCb+vgCLuQfB^R zfBGf>t*K3sh)0&HpF30i)V{7?e}*9a=uDRVw&<{C$2L`CB$rcVN|Zxenw{g~uo$GV zoVp|^^?i9obzxOJBcq}0MPynSyBrQ)R;~9@-rEKoy(6uVTjo%ck@G<_Lrh^D)L1Ee zPMz|0kD6@DY&tmcHYf18LhpvJNCnP|DaTopZ(y&q-5ni5>4zp$O{=|e%cAV{t zr74Zc&sKgjn*KBZb-<^@Miq)`4mp5zCSUy2*5~rzJ%1FnM4{OFT7yKje5?I#xHmpRPlP4w$^62O;awE8S6$I>z$3brp# z@s%JG4rB;S(1SW4%=m$LGNZIwITQaM?;s}Iu&5`1pUK7-GCTB2coR!}-?K2=0A$ho z4oG&oh|#@D<6BFOUA(bN-cb)q@q$nbA^y)dr8Iw)cW^|=5+PIBc=jie z1i3>aQSpyRUC9+E;EeU%35FQ_I6LucImPV6_FQ*o?6te2{pLHrSOBj5Vkd~u*MO=+ zqQ70M*ZOe(9#X*x!oEIX);x#W@b~0RwuS2wrnPBW_KTzq_^=-zmx;2INH&!A6+*SI zPK(UN<^mv15zn9+xgUmKp%N?th!rFia5l-N1n?Q+q>zpun2ey5ybE$`hQDh6{l`4R zB9z(4<-PG1-YZh3SI|no53cVAh{2mY1ruqsl_E+)@IK1#j3Ej8u$nB3nH&kiZ_mn} zDeim?b2qYaDq~Z*-vw#vGjilBPN}IUBY;p(lps;0?^Vg_z{b;;DwP4*y#{@LOH5;@sYY1-*5Uk+g}II6u)N4E9^^QW+_G~6jDL61?2wiO9^owkE| zscs)qR(M;<;A0@chd!k;!28)*7eAfmvS>PEt(;b!x&?+i*sbR%OgJBzK_4P46>GtW zkMalhih?RdWPV3^YlY^%Ic@PK}_D$lzS$o{boszcP@E*;4K@J0#bpVoo|+ zOYB4LnXm}GS5O%yBd_AAY-Y8*QlYslXa^2As5AIdG9NJdKpukcsUi`0P}!k(isCAMUO~7xup<40|Z;PO;leE335+9nTWKa8s%G+4{ZDqVH z*n5+)Egpyr<2}441bkRfcX#)P4T=xSb7GA)&7+X_EVkw(>F88 zA{E0n0V|(*>ByXqxfV>ZYp|1ug2%AEv z3YxsEhMLYV-a#*!jZ*@y92pCX;=l?FT@0P-$?=fxtR7<-J5qk}%#Vl90MHFG>|%Kn&xKPZbTwz@8ai(Zv>QOsLLmg8$`C!SOVC%8YfRJu z5<%t+_+`emayFm2?E{zu>g<#^C^9NiHK>YTz>Cr{l~CC`$o%j7o9))3mc%dljC3Cf zF5svtwt7Wj9_PK-QD6%R3%_i75a_%?>7rG6XIv1z-CFIER>aQZNEemsDtB9Ry!95#?O#!0JNDxmoiw>szbk+HC?4$9Sf6cO=}PvP)6cGRF1*B+Gts zIhRp7n#V{7VhZ4|!(P&6nbo2yV=v)Q?2VUplDOxs0FFTEkpB=PV(+&<06*yQDC?W% zAb6o5tpP?2SViD8;>h0W5z>9SCIiJt@BNAAM5-NR0x0+RlI0dIKL;A*XeBs#dq8VnX=Q39{_d`C~7^FCeNISG&zv?ULVtKpz>5U5&zT%ry1io>sniZgv<(T`ccpsP`2lBXp!|cMLdO4# z;`&@UpmiJxH;ZQ=m!PlQzKPNaX}~S*0VixWcm-X9;t!tM0{=1uu<4b4%zXiz0BApd zi5r@nNA~_L6L!KNb)qWwEvNGwCsKi1&oB}wwfE_B8|wk1y9ot@ z=;dQCCo7~eGC_-=EI_A7(C)%H`{w|%Az7VstV!>TLLDhl(m8kv-3JzY*arAPPhH~x z?)iK9PkZ+ZHX^dokUd3DbIY}CDFM0sRZ|UM?#1(4{&4vz$(VVU3O3pz5k4qe-Gvqb zEq^aKh?s-d;d>a^+7bjr5lRHZw%c+@AaX1C44{Eqy+TQGz7pB<1e$bV!Ijt6q?*D~ z^&V0a zzDC)fnNNR=%U-~&)5}iCkU_s!+9Tf|r4Noh3JtjEnw`a?Y;_cw05b0-_7cXNA5#A_ zu(aKJ)Xq5{2;2rF78w@~l)%6VD`lUHo^}lq#w%Xoxadmi01ee(L*k}P%^D%LFtVjd zxA1>EvZZp@_Rz)%hIrU(SxI=E8*wh*J8M z9h2s{j%2>0wHkmdd?0(BFLG&y^Xgu5=OrVVni*Fh)W8i!)~M}O(BlHU9AvSdikS%w zRaOQ|+Pb>B{`|3npbNzPf&vUe5CRYC>)RAeF1fI{=$94(^8k9S1gTod9Z+LeHwxmN z%%{go$lBTMAMJ0#bcN1@v$M0y(Ij#8{vr?4Lbp<`CXaS~AhfBj4wZegWTKdCG8!Hp z){z;Yx7O92nQ910t*fh}BqgnRyIumMo#WoyNY%G*^X{k8qMNDx_HUIPOm7Y=H&LOj zh}}Yzl|k2*wG`e%-vsn0`;YdqVfKLGLS;n-@5$a|@utC+xb?x{>tlJzw6G6NPa`+I zyslba`B|iPTI{R4;oFzIvhWBH|2o6#PQt}EGT+>}zk>dxgracGr388!rnQ`b;EH$J zi9BNSBjAu%xu9NdmAUNGG4)7*@JE!3RI;^u*x*eSNd~oI(ck@-9H01(B~4${x;Rhfmz0McQY4T*~zF|4heqSZN>KBo(Q@+IZa- z==1{maAy^xrnl39bbm|@id5$#yz9Re#4??YwDnX8nuk zf0@O>xXTCgH>aPs?o}bm#zlByKNBaXH%tG}Mhfvrqs$_;kffU}_@qzEW%y?nSqYWN z&He5H=i1>tlYHk)9dQhrQB%zM_N46%tc+kmNUXnRwkwle)*8&s*Ok^EKQ--05>`}H z++e8wcdta@YvwYaXQFvC#9jItucmFZd%h?ZJbHGQpQsq|h_%eWXw8P=ZZ=el-r%FTzpppb z90_yFFvc><&Aazv813G4IB3=XI)X>xd<%$1%~T=M>7m=(%TtOA-P#G z4Fp&axD+L&Ib5K(rWf@}-{<8awI;oI4B36r!{R!=N1=Rk;cD;E8`T$fs{Q`==>50E zv<+TAS-%!GX(^8Ya~6SEHtXpwd3AnhdyI~F-K>=3iMB<+PO4-?MP;Rq z-x17HHVN+298cB-L`WrlO6W0!j$;@hGi1JL)XF(G_en+nb&gi`$ebeBC(HhqVl`>Y z8U!Ps$=#L%>O!s!n8rdTW}+t^Zf-A}6<@s?|8VJsHa>^_)*N)H5FHm=nPv{QkPxaC zj_@ULpLox{&`I1CKVoM!9O)VMj#a}|wd77I+fJOm`E|dJ*}lJQ*{dwf4iO#aU!^gt z)V-X89Dy?K09yx#2uwW?PF!r4Xi8|{;ug&F&*l>42N2LV#AUnU2n|9QF1Vfg%rL^) z$>7qGUD~pYHQ)Xza1E~SmYif`+|Ay#6dOV_>s6g|0tS?o zlc{ziDlhqvgbOCM_UnuhIeEHVuS=~&4h5SP8-_T0qe>KrZA^uSS5TKS127q6dwWpH z6Y|{2!li3kTU&#fe=5;!b>wWMNH5Grs*FGSXEu_FTh0u4*9D-){$nhXK#S(@SR_+P zK2%GQd@fZTPwlw8Wk-+pxdRXC2I7ccTfDnFo4$Oeh`klVZr5;O@Q)3SYBMTeA4->D zd}F%CkSj|h{<(4$of3VKMNGN;zN;(RwqmfE?NUF92?cdMIOG8*qYz)+LiQ+f&9FJN%VCEyu@S&2aXt#oTs4(!vwd!F~Id57jW6OjWhkVM1tr;MCnaHgK zk8}*mS#x?)8Obv+FhEO|K_3Mf+2@!6Wi6*^;Oz;V)KpZck3C8-VDJ~8!y-lM$KQsN zT2fL{4Rv+&QORPw0=XyUno6F!%|pL+|26#;4Ga(QwBpU1HyahD1q20k1e0W1hSum? z0~*de`d2%yYb1<=1$IWebSbkCM#5!Hj`r3m0x%vQq?B)%LQ`ubId#>zwZrur zHz;LdG8`jpR!4?c)S}f3i?u4^YycbsM(}G@doPHDXZ{k`FZ``wD$6lWiA!P~DzQLli!U z(}BG!+6>_R07-T9pJQo^B#Gs6z-FyvOu2|=4{eo2#?Mj2G?G2$g(~6YkAFyFSkp=o z=DG^s_>_pq`4CM^OmrrW=LKOwI4|sh4n@ekk$sTJIz|$ZF#upl_a&*(xF9!O2S_CI zg1zWp`WMOGX~XhfH+CNv1PKhygwF1kU|LXa6S(lO8vK<*f` zfT)Id{>i0wNkHKK!~_Z&XHy^%KVubfZDZ3lU8N;t`j-Om5s=FO^%;K2Bz@j52^zDr zMp*E#t{LHHtLJDp^Yl9&Zkwv3-tUEC9?~4+**+Sj*RaM;2?;VOuAxk+cE`5x3i@}0 z;d3pMpJ7Cmn#4FbNh61!kO*2!Bi48;619^XJcR1zfs(xl!;2 z10I*nSEVecwK0B4NnhhB_>C9fPH`?68W~NKLw2{hv!mnz-hrQ0jIjb{&(q%6Y2RQn zTOo&8_F(tmamM(+lk6%!;9-E-d1ep4&_cH@E*q1L&4FUJR{b1TIXea0_?@#|0yY68 zeZ~d*10z7Y0+ySx&h}^yU6Y_Mvx|IlZNiTSZj~VrTGh@P^AU0ID+5Xnaoml$F9Uc0 zgE%g#A?wg07u8oDT2@GG8a%O@2k|M4x1%uNG*==2duMz#;)nwjLdD$)D@{PpYC6m=XS9Xp2` zXTq$u%nc~WJO0rU9(U~M0k?iJOk~%MEny*ouePQ}Fu4O_m{|3ZqGwZ@@V>MROj(yi z3vzC$X;4~tHK&(zFpKMsy!}yachr%8*) zJ2DS$T2{7A17VX{bK}e_qK{TOl(4AX|I^(d(gFQ~%ckL=o;NL(<^7c_w50OpJA1UU~Y#wsN?fGY}(FGTJk3{pyX6juNyhT3oA z@cBcBPl#m@%GWwUxzqWZ&h$%W@-5_-Of?}x}wo?Wq$10U_Dj5do0C4T}T zL=IqX)Rk|-lEV+3Iy-@fKtV*|KmdfWnm%EZzVZ~ffxwGDi|nBbroC!Og{Dl41kL>` z#vUHN2Ke+C;Kz}EAK4>=bm`FU!|rz1^~#mw-!qmg?>SxrjWrOI0hUZ)M5CPw1d_A+TyIuGS$*GS_+}S! z1}oq38t{)I0I-N1;kL{t>m--&Eci%-W#8PemcSHHWsO`2v#d!eVN3gsvX>KwwDzUBXiEfgc29Oiyem*pa44kK4_l--Uc9iVPtpinlhBPxK+n(5)14HY*qJj zU28@oA05|;v}VyzCGw(4eE|rOgYFY&f<~j6TK73P%Wq))h@C39cuix2cK~zKlF6Kmv!*@P%ygb2$=RnEamBnODnx>>x?lr+A<87N1;%< z0%*W0Bc}(cOL6UVABiIPtGKfFs*}YYpnbKnww-|XK7}hUo`nTP^BPdA-2oF<>v)${ zOfmZ!R8?)xG`2nr+hSv1>$$!K=azu!EFrZ<=W>~}Sl&$PXF0?MIh0`>&C>OJ(+)U7FfSx`8DLKEVKyNL; zgGI>k5U>R&!KfKMEk`R-fAsix#S{^~($hmvEC%)$yFKos;t#xCep3E=vjaVLyjBVE z^>IZ7-1E%VPt^dT0<>*k#F!H)%DHdI*`Yoh0ce3-If=FOk9dlX_|jaeZ97_XA^!BW zY?5ZKnW5pt4K(&*$nF5D%e*k~XrOc!5TKYipiA5JLU~Bsjab=j&dVVj!qU53_P()G z#veI(Lf3^Dsp}gqR>QcH0)Ni$^-!gOe}d!^6yyN9bq$|@}*DG6%7^$D?_`mr2&OogD_w9>> zq>Cc^O7@DZ>=oHU_TDplldP=ly&`+3$jpk!CM#J*$X+3`J*WHr{eRE@dGoxyhwJ+p z=RS_}{AlmY`m%l7PJtG+M>kukfOIMsks#1I1Ok|K*0RxjU5n82)!)}1*8(G*#!$(d z0@QU>jExeML-fw)d#mJg|%EA7M+ z>@lw9)R5lA?nZBa_)t-tY!vNLEB)~g<%Cz6g^kwPs|yz%V`G{y(PvE6L-_CB_lwfy z3OZbO#1IvNB;NH4S=ekjx7(`kLD9IOVTI*jZF2IxW4SEvzsIb)OXAbobQPwd<41ea zYQ2FuIps}FGql3WYKda&)soBu^0G(m(+(F7n=$1H1SPta&Av`!GkZ_PbCntIv9SSN zcVlZysuR&_8ynu|qIbm@D?61==9Ngwek&nD00R*?JU)_k+8e$WbCFG@5cBe|pyfg- zi3bzo6v|)1Ww%x~njSc0E+jD2N%sXRiSYm4YMe*eRm(P@oC!S^ygK z_}JK75;np@;z!BX`Rs*-2rh$%`5X_>GH#O8&T(%Sp0GvgBC;hA?Ch_-3J70#{GzwP z9)0`P5Y_BtwZZ}`q@$}a>uISBfAOPucbh=)oK`8{SVw|We15JQ*jjHi%W;BX6ai<6 zTKEQoiZE0tKSR1UI^|nL%3oi=<_5}w}?#FFh(%@lItUx z@C=SJckcMbGmMl;ybJMD5TK=^3K(3Lr;V?zEK`6)CteM#rV6Zkyh+TG5FMvsdI z(v}*JJX*>7k$2)Znml~gokmrG&CL_o`Zt}J+_n^ zDb{o_DQ|dqcxvj|TwAcm*7thX)%dC^E+73`TWxjqBo)ucEW~VjMh8VG7>`AG)_gNB94nzAxX__1Ny@ z+APP!3?CWIn?zNMg5Y4F(EZJ+9d}pXIv9WBwViN;<{lkc1qD4A9b`hgxxPLvjQ*BU z4qa5VB|179_LE?aC3j{>I!YfI4<3_3a8+A4obu_pzM%l(8HW4xi2~ifbO1<^}{Hr=V#0%gOeIVCWu=&AR4t z|B%SDL zTKgYzPM1)GBy}8*Wtem{-ocTlBjVVW3D7=JC92^Fl?hOAHc~mD5 zRFIwRKaw|~O+i6{fsR&?RgdG=rPd@+@Rw>WGH**ggr~@~rBo1m`u(G--6z%Ugik13 zPq&iU57GT0lo!B&FJ+5PNyh8N{7j)iiSszjl>K>=l0wXX-nV@c`X~D5!UC6@0PWqo z{$W#0=a&Lq<#haK6@@~reGzYbug`RPb~b64bl&0aWX$&Pz28fL1$KSGm5OkE!LP3s ze-xAm$(Wh_f~FoFU0rLK{^aYLMU>>8iUamIAOTOu3=MXfEdfRo@vLA=?dg-&Cpu0p zE{#4XwIk((jv%EUDM;_LFfw*eO}#QPDcGrWeIrpEq+MZz_S$M^zQfl0a?&I@Ik_8F z&X0`OuiuuH!EKwGnqIDHY<9NAW7{D8=E26KgeG?((u;qj@%5j`d1cKt#iCDyd3bFy zHzA7^_yT3j6~O5Nzr_6&A!xAmCSmZWxip6Vvog+kilw(zTFh-G*PHN~V(~ux|F{u) zd{yl8<%BHdd-m_I>z`L3uSEnQ(OOSsmObAmkf8VFgrujg-;5;EzLbT90-k4w>WE}x z)GfzLP2iH4Ik^YQfFF7O!1q}PHFdEA3%O^NMqlq!r!F-3>a&6ir}5EF_mU4yG({`M z8QE~O=AY|N<}AYlqlvFl3>enkAE(i(wrXtc2@SWV$0(0la$lm;-4IWS`3tH_r$(Zp zja6dwL?Fm)`SD+Uk_-fR(QK~FzGozE!-;pxq7-v)9n733TW=96 z6}dE733ok+ttCE|PQ;MrZ0``_%Tlb&*QZ_WXF02_42YKMdUS#R2M?_^GKV@EMR^`W1;7-0O9NbqNMj8^0ifhEd5vPgAhf&psaKUoo-{tF ziBal#m%fr*>9pi64buwrK!>`JHpAom{q(KIE&FRUM^2)r3U;oyO0LJ#(cY5D6|%F< zYFHNd_fs6Vjo&}`{^%@=mzQ^ZU?55ZsN8AwJ=#c=01RDGaPMu=Bvg*aIXgRR^w`c6 zL4P}>=^lEWn2k?B05k?5zA~6kP*uCSzFq|Y>lDnt~}_GzxJo5 zQKLjDNv(SbUa%{Qjb)aVCDVrQ7?S;?HWmusR4%q&NeO3F z|7+~$J^yR$Pr1R^yXzNND4&)dO6$C@tF!blnvELT0h9k=1>bJUeE6Y7e`*e#&gI3I z#&Jjy!7b8X>j@b)p#fo$vQ;+M`00~P%U8KJ(Sfx*D(DG05(=7l zul`_)`RuPI5)aRB(SiT@VFe~>wqhuwoh4mB!&wp!=LAeL-6&#~l=+&=Wd$fMQaibVi^$j8UlJF9y>&sJZ4u_>u zV^rn*h5Q7{mtTMg6uio-;gNY`rV?{XA3ueniwjp{@HfD0)5d8Qd??%QNU!?5Fxc z3Jy1pYxHTU%uO&Ic(Ntvo2q@%*o9(xOD_&jXMHn;HV1zB{rS~$`wznwT`x)T7=m3N zY+F_G@%m9dph>ngj6^UN`S5T~dSBrp7D)Lz@BO-=Z&*-3Kq~aSIG=KK94D3tCPgkQ&uK<%_#L*QA^;t=Spxp}S3G%68(RkJz)o zc}bkI9NugR*D#;0vY_?{bIT1oHkOmMMEl?W8LC>%^L`lvPCNEBM*+ z6Cvtg4y2s(!B860N{_vdAViKJ2hD{m#>^m3V#4stIIE{yk1ec@XczePx#-nNNdkQi z&V9chAHl%V`ZH0AL=wcVNV1`~@-h7rb6BH;%^$f$3&+}5-ZoIL&e%0^K8;<^3V_#*G$5fvCU49j2DCev`)E4FLP;MpyfzQ4<45rASuS|gI1o}-)v1putBF?qtzXLZwg&M`>Rul?3w&+&>N#sr1$t$>xX903)*-V znlC?GXWhVT=PTUlyav0VF+?-cD!wJV?3okwj#SlyIf)u80%d^--si0+$cflnriU_| zfofQ?(Qq97pZ)R$rX+8vpeo|gn0}wvwDznwE*olZd}wB>-Rg0;pr)fUkKYSbc9Yqk z|G{dU*u%AK&x{l}XJjHb#z$bzP4|XuED9Q9xq!bUPO4UM;=epUf9F-DIZe14xwCF? z**Zsc!6+ICva2PABLfQ~xSo0+ri6+IbM_^di?kub=Tc2o!9#i5AVMH0SQ^Dw}lhVeKdE4FgiT_5vuU$(KuCC z(bIl#>ya7`1TTkcQ2tER<1{3J=4o^gyl^htw~<@Gk3MuA=dRs#&*~nQI7U@v*xOZn^Z6j0GP^}@AT$&Jdci! zVCbf*PK*d&7TXMo+)QGNz2T1%wQ5ZavAW-z-;65mRnZP|1@$USpoxX1dKt-)<$+Cx zQ|#}~CuCKZ%CI4p{f#kxFD_}!ApI!WF~{Lz{3;Q(xQgwp!a1*Kp(K&h{+jv-6*Lb& z7cr!cgD89T)5fpI`M3-`dakr`a3h<%ZOwcd+VM*lH<}6 z-Jvs|#S=|fznKAhthVyD#;$`a$^5E=J(j|yV)y;ysUf-f->XYM7=o;$AKVjXW2n3@ zArlabqVfd^*y2&82;Br>G798V74JGI$gA5Aj;~M zN=1hqUh8+Y?O)K+`Ajasgm}6*+nFCgBm>6M`gH$mnnN5}y~knuCx@-nS^Q?d^NpA$ zew*_ViuDs-Dq3+EqI8?wd~9qi?g6eFjZADJWn@oABNLcG4Y1BsG6<4c)g*!)P{4}7 zG^(8wgee}xuzSOIo;N-~XUGB>!dt*--e-O3b}s{FvDVhs&X;{(e*?(5^W&pWG5rFB+-3GB*lBwn7VZ{_0~DZ3E873K^3f(D^oq_ zn9oUbzxaX4h+7_U7Cptq3`Y~=qWvLT*N(K1xL9CgJ{yvwfG^zo^`4do=ZvQKk>awa z?5T+V0pb-Y2}!J60aU?mh0)T`1d8wVab)SAzfAY!+kJZ21+@%P--K*mS%Z%>=9~8) zY@7NoelF_C(pQEfM$dU;dK?ss%yDZlXn5M-Ug^)#g!?269IGq#SQqaPk1;6*<5XLO z6wS6zh~k=sRKVW=2q}yq&NtbpAmCpvpYE#Dm3XJ?!F(HK@ac2@OJ&A<&ocpr;kr3OUB2q%(&#O1}tNv)1W)E`m_YdRdMg5sc~&oHX(qIaeH(%e6%8bAEjtDD@N@@Q5Ws>*H zd@3+hGjw|xP=?_LBT26^u%84`v_L}B{G4Id{;{m1891t4_6jYTw7i4{FZQpZsWo3) z^l1Z_5UwwsIw-)UgX(Y!e}SbfjA5nXFpU6L@7p&GO-<>zhDX#nv1iG0l`bRalj~Cb z1-gY-^snpR)F;+Q>C@)|RVDIin-5{y@@GtKj7Wj#WKA)Lr;nJER4QgBCbWve8>LMg%C47iF|FvVgs(L8 zO9e2B(@8YSQ16`h^V+zbH~7TFS1vXW#-;_(4PAVQaHzvf$|D_0yxVW^k-(f7j0-#H z+GU#Jg8hBkPenr`f&vlA{OfjuZTqM+O;Vi7Bsk9cJD&*Cl0qCCZODuXjAp8L9iSA~ zhAc$h)<9>VjBEvOgd5WIcM{5ISMkRUbkzwR)tK3&@zjihxHFcFXG|8PH8gZ7Tr*%v z2fMHH6(B=1A}~t+jwh;0>V$M>w@$UEf`uAGMrwkPaaR7^8rd+-7BA+DFn!00p(p~u zX)7T;cydzp&HnW`?-QkYRw8y$MsHOVj$kS`gpN77*#5|2UIXu2M<9%wYs1JHvL)HjOW$+V?(ONcl>%PBly%W#7inJ}YtZ&xAqhtUG}o8sE8R@g z`-AB<4MU>2Zz^tWVJV>Z7U!pB@zk~x7Lr$wy_Or$S-bDJ%Opw?sC%tn%*gWo>@~qu zHYlfxr`VCK;E9E<1m>JSwBW-Y&fSEFdL`znLPV$&y2_V|K+?OnQy{f{oW5l6)5m`v zA%ufQFK6dE(tO5Xj76{rt{xZ50k`u?wnQOCJOu<{0SPgmw*#XysJ}lk3Xctv3N}_0 zO4(PF%E;rpYf~Z`rhuDrO#yjpB+(82p4IPBL~Y%!Lpx;tW>1CRQr}BX+ar1XC}36m znBVV*$i$~pPb$h>8Mdwm?5`KpeW>ry*q{--;ADOEC)Mh`%cu|WZ9ev^{>U_D%=}6? z({`U|DiD;rc7e$jXmTErV3SSbB z)lD2&d(d)aCYe1oR}6_j0AjCrJD6=+*;1>!*}`B>WhWSmu{E_sFzK0xJPBM2sB%}+ zDlAwoTP#XWseJ@zVtCwudm>RekVKOPc@dTCm72b7WC8N}E z5WBNe2{?z3@EdsMg2=n#TH}@GfOe|sE@+_b(R4ki;3_9{N7$ir$i_i96-(*=iOMcA zO+w)%BzQ0JVV$OsBtUKEmsg09j)~DbWqN5mx7Ie?-E@8rRfx>KVzb{>JF1PKtq7DA zg^7g@XBKejOvMY{e+o+Zt_m#RhTr@pd=AW#4!q zJ2XDR|7Az~zI(PM?1pjt z6B3^ap_PF&epQkf;-fgCl%vK4`GP$HIi zfZeHqIYcFp$5qbV{)xQzju!U6RfGvND*{Il(1ih=s7btlJNT?q(FfR_hV*9lXT88B zUIwqO5-b<)0$tZp3;v6D9d{pklV=Nq?e4|vj3uLb{DQaBDBMUuxc#4%#Ud{^B=H3; zBDb+H&J{bv7)nibAA{4BN+1N<#T(*>MHNu#O?Uf`RH*S}ziY?mgT9R3v^O(|yccz#0VdAezs>(W19QcZ#;TyMb`M zYCQ!f4K~EH>FQH{L|RjKBTic^sUMQW^LERWFF-|cA<>zg%zYT%fIxf zX_B%~)S^kdMLsB^&_Nl29&JMAW=cTys~bGrrQOe!w{O^fWoA4MQ;yPSPdopGMgXmV zp>7{tmZL@FPnq6gLu!nx)s?QTU=-A2>dKc;JFLJr@QV;Ym06#^s77-j8*K>KEj#kf z!YE)f+yWOYVfz`z%k>l|-JZ;U(nS5$59>o7qYR-Jwu`T=ok284aN+3Swl)g|dE`wF0)X-E!gUeezr4#ivgX@5C(6%^C9u znCW4KB01`RJ)py;^QBaf+?r}|V>v>vQuH-8H;3}>;PZk_pkgO*Cw_?n)a1X&lUew= z8x-ZBaIgAzS36x08)b4USeNDE0Fs`Up^<1yRN@e($x6vVS+h9y5CjACGC?T1rlw}z z#ZZ#*_f)g@-#<64f}`4tyQ8C`%nfqx>3#V9<+Wf6o4&KVdto{B^uH-BHHmSA*A$?* zF+DK>C9%A^y1$FFB6Suq?sIKdm#qH$g_ltZ!XN%X5~46MFor%s4X0jjYbE^AsvZ=p z;QVWAY6=!&j{$2gm(G%r=&j+Wk-P=NHTs`9J9Acv>!wbKr?ZS@An2sF0aeR{b_;hb zog&s(c%CamIcb1;q;f34dlF>gUP-+6@w`^MU}xS$>whhiP%E@kxE}E+_Xar+`#O4n zzbm#6r=j&pilJ`_n{*UUq5w?{i*dE+SxWnyvYga3)j#FQ%|#iB&lwpM9epw3;nXLi z!z%`i4HXTWIu?=xs~ompGAb^jWnHG;x<-$yMdlT3Axq_}l7lM_I_{Ytc0>@5hGk@A ztSGPP8SE6=q+LAtN<&sTw9;6fgV0=-%frj15`9zEj=^{>`Z)=Mn2?zxB>W)uTcKH> ztOyVSkN$#-i2Iy%>NMkfvf0`y7WRl!Cn_i;+vy-S91_SHgqLaxmag0Z5bUYzl%j0Gn^F8t>lu+ba_-5M0h$l` zw8Q_ofzfjyROg^lTg~A`twrlFZgMZZYk6m&Tl#L{iTT68(n~$`1#V(^ zN_nd3?ZJ2Ujmg?iB%a}BaxrYQOhIe|WgM2phMv6t9QyB6#on!dfxF&0vfig1>F>`^1^IKZg7Z$&prRs91+Oed@Ogq8GyqU7b{Ke;jlfWp9x}r3dSzw$gYv^mr-V zp{>20_`ZRHQ&nJK`^k38AkTBqbZD9F7kI0z-bFNNyZBJDWkeI;P>a{m-@bqEFZ;W{ z51^;2{*O)(D%I0QlN+VZSaJq7HY_K_whE|RLURyJ34QK7wnak!%ITiFd3bF2*s0MN zV0~ahT_k&)af9|G4#%FL;R3KJ>?q2#V&Z%MZD zl>I|P`a7x%n_}s&sK<*x#vKKcQVMtPntqc5n2wFn^jGeS}tLsW^;O`$Vt1?dE)wUZ&!--GDnSl)Qtkf33*sk&#RV>pvk76~{RCLnj>Wy@LE^ASQyJzaM~3?J z6nozy|GKVK#x6DGj35^-i$qbCxY(mC!ca!Wjk*fqD4+|qCFEChcf4xn^Y}WJwWibp z2>q&*gc*pKp3XenaS|+W>2D1lp~1%{krm4#Q_N>uoDxz3*61dfqMquAY^Bnk2@Rqt z!;%IJM&Sw7dOXQNK&*M~zh^5QIAw>hkezkMu36N^88|p>2knRO@LF*`*PRrLW`F;^ zAS(FU2o($i2ncB3LttQFWC*ye?T;Idi%46u@6t9)JvnARW-@*1UUC0*6CSDNvq&mS z#ZJF2I!V{vR`os9*WV&$%$Z9~6lpHjuD;5amUg=>$@|@@9)^9R?+wRLkNQr_tgtqr zKus0!v|-dMO#mV(aHha8@xNqx6vpxdTp}F$sLjo~*`>Xse(z17B@7KI*RJwRbElO) zhu(t@_hvW~n2QPTfEiH_12epqfBWBJIlW<^$}T{z-v!&gz18jdN(DCFg8cmW1y#5ECMFW6Sc!oti1tF=nCGpn@P|#aFj<%1`W-HD6 zQbm~8kloxl^nr{#IfcrC0Q$Gxo%#q3QatKw_TSCyhiFm8t|y@&J#;dj%0Jt1Gm?C0k{OKu-Y;M}}6xsL%ta zo54u$zgsn=#)Fx{3g5GRZg&$N!;@aFzuoQbXI%2UIKrPBfN#3UqEc6N_z zuBP;v8!i;fN6OGrtBZv>xw=M1MU6eDPGVxXj&b$sdrLL#7#O5Fj8vc`KhCF#ywfO^ z&Z3^?Ka7?&B9bUnSjTeZ8r4+pkv}1k^7m}Md<99#tHEKBIV;$nx8VZwPtK5v^P8ri#@?%}U zNhdXX#&P}ku%R@iS^sI-2y!Z+`T4RMZV_hZuTMv=yPtUiNG@{|)jk((C4nv*xuQ|d z9?q3dS4ttJiD-BTHTut5jHZdwPsN4%bKgopVh$;->2uoOcIFZgDS48VTQX9_ANs@s zT7eQr421%iq+~tMBTvuGT@8^5=acK;_c^t_Pjh_ zeP>PBV9RnX`ZIShVnsF-%;b3D-Jl#&cq|kGAK=j3UN*TxDX4Xi5DS8V-!cYSU|0)Wnq2u_@W6IVQ(Q?Rt{_5&6M)9m%Fb<5;s-cUY8xlS z{bH(Dn;$-jg*qd|;R0!rHz26ATkU2d+oo7snnJYy95=5HdGICZv1Dnyp|&xP$AF$o zo+2Y7qobqa(DSHQmQh1^pwaAZuR1J@N?$px?+2EZX+WJDlH!JWM79G83XKX-u2yB5 zE&B6Sfd9}8iH_oYueQqUMYsoRROsgG+(t@8ym_;}a(Ikf52mv$CXLYb=(y{!<) zLlMXIN$OJ>Pc>Ad%u`Cc_Xze#WRJnesy7wRdRLEs_5J-HHkOQjo8ohM>)(5Ord@>J z{F)00+TA@os<;w4F0oeqqD=~^7UBh4pLuHx(an*t9ahA*rq|9H9rq|Ekk*RGBkfsHGj3v^qOb=3~T~;O-qAW ze!!0K+FK$moiU*~gs98C?LNU6)n_nBX zE)~8GqYR?vC5LHLAOX`oCw1H$vIRhl6ja(5e<^xlSw^70rT&R%;5JG+A3{(s@|Ou>M_ znEEQ@4VGJBF!#E(>^cVe2-Tk+NEr-$j|v(h$Xzzfecm2^})>c13Y_j zdU|x2u&ZsP!spM8*J97KAeVO*eldp>aK-9PyQGSl3h?vmH@I3WQ1J?EmrNIr)G{ma zKFf=ztI4PsgzgY0xzAt=psS|1@vSNv*Yo7}HHN&~xVDU?(fYKDVJnv5o>LQwad=4w z32Cf^mzj{V54Ayho4u>>V>drd&2f)eA8EAt@Y{Z?Qz!6lD+Itp?)7?i2nnkJx=fPJ z>rPF(%Fn^!1o7g=0n9*ErhDi|NkNg1qu|ngi;PTedF>yd{0z0K*?mseL4MUzu*;ejQCfi$QkUoqL)xH z2kOi(>6Lh-|D4-P$k6R9?8x=KDhA&aW7O5wT6{xMP`pY{pFB1KG04M{6dbZwFza{K zw|e^2#5eb4nm#4hBjrX*e!#bhw)`BPWpuV9j@4gNsJWvWz5L(R^hI2IM^a)f1af}6dS89gJC_J>i0)G3|~! zp32u|k_7>_Agdmyg1F1*lm#AUd>@x80!1G}z9X5A#ZG1ZG#^YJ?8`dI=!>lfkOE#J z17tu=8lkct{a!;#q}ofJf2(oc~;daG$F zD0C0yd|DL(<_AFFtbJJD z(sJ>26_dO6Hb9~7?(X3+wqyEgz|x8sF63PxM^l8egp_>z z{D9lAYF9pTvL(gj<*gfb34L;7Zf&i%Om$!4cG7$+-$u&UjPNSOn8gI9QIG9RQ`+k{T_kN9@zj7Ge7L`aHr5TrcB4>P>TMqlV2#13 zu$RU+5RsapU!@r0%G*BvLy@$SaCWYoA8PJw1x_8%sqCf*&}oY5KGNB}5~M%Kqy>QG zUt)pC-K0-s2$CA)aODFb#Bl)X(6DcFh)QhmXmogZ=S}%X&f1_J@2tgV7%3RixmTNH zRms}MEWSHtM{m9A1kf9*p*5se2@yt~s9bX!PsBZp6?~&ys6x;DxFz)zW6=L`B2du) z&;4^vRRPg^kUtNRBe~*Ub|)G;DN1F9(zsP5`X8#E1e7w(zx?>BKZ>oIO40GKYgpl# zi82Y$j~gg`rDjfWzad%^U1c&YUmAz{U|f<6FC`^>tDYWflWSe1_3V;fk&P#8U?o;fl?!2{1my zvZ~)h7`i%TRbQ|}dg@wnCjq<)U=$I7(KJ?vPOCD#vG1*t#Qz;cFwy==6s$BgaJ&6*{MO) z1iiT=vBZ}3EA2NCLs6jhqVdG&t4(oO&o*-A&d{FQcdE)NCU4XmJ6){ z8znAj49v+1Vg%;W6AIW;CP)&L?4Q^d-PV#A{OU-+A(c!GC=l=_XzXyW0=;}SHvVUK z$f(fe-)L*UwHkJxK-0no6p-n#9fRgL|4l_au!L?uVDISLraWWc76UXHXf^lbLyZzu z5QJV+6KsyV0Vsb->uq4!z%?xFG~HRTS*4x3%nO)g&Xzu40%gE2F)_;HE<&pq3CLAA zeJP>>1irB87;^)0FAAWtcy#II{Sr0%?=x_O4$zbK{NXF zE3?%Ec)u(-e9W$sAx?yDijZJ1>)vxw^9KYROlp(%o9kyZ>U33a5=GphX_$Ya;tq)v(V@SDhK4sla7_3kb*bE>a9ie3`BpLTxe};` zXjS>?Bj)7HQT+sssM|#wFE|Fu&zCSPS;8Y`wiaL=Mv&c&2p=fKFn%ZP7!v3Kru+pF z{6>x0Kl^=+gO!N z=+3@yOkl*}d)h-C#8wL_k-me|kdrGqsM4di$dWmF=2{ijYtdzbNxfg`ddkaMWgfPU z+(jvVB05O||Cd)L&3KOCZ$6TES7F&x9o1N@;-DNXJ{BIFUR+*~5PrSjLxw}$l5s|B z)BTJ}4D%QO_Et=Q-$50GfG@n6ul%D)$+az2eEtE*cyC9g3QPzQ^8SgKi>LG1vmAGZ z>G6p@DgC7p*=kNNc|%GwOoOPO>=ItzmQ}e)^hQfBmWO81VrCTM@3%VT-?4K)UpGIu z1O;d!xB|bf3u;mPx!>_S6joJt9zGdwZq}Pp`4chw>02v$39y?_?;p1Kbkn4MDwB6V z%d9;;GP@Od1#!K^_2Un+aFL`t-U(IDKkZa%RRIU1;IU!H8vZRR+KGOxAY~pvqo}Tm z^8*>4>2f2wa=u>F151wsMmtaUTXK#Ujzv8SFBWvF`~q?P1?)ePy4oF=-d{RZYuc$B zKRz%Z?KKYeh!bt zVdr>9J*hwdyvzs0I>YmIB4%F6>u0G448R!(#v*SufuN_}{x6dM#gIAq!p*cjp7wi) zV$fg}U_9mcVQ_KGp8WoYjbzJ?!y+Ie66!ooEwFv4Qax|c~)~zRBC~D!sSU; zh;}Ieynkw33(tNtD0EB8PyJC(Wk%ED1S{>!v0PLpt#-C`%NE^VK*pI63w0^cIE~;C zd^jx6YAD~`o7wl4uL6|vpk|dNO`PWSg1KUzsgLfYvM+lG+Zs?f8x;N^F2B*mZE&+t zr;+2T)r@Y-i<|&D2?3!!L!JVy`gp-1hP`ZJM0UWtwqCF;4&~R34Hyi(2lCdHF6^EV zbvxZqIB%q^8oW=Ka_V*h;RbI#$6tVm0q6z08b)MS9?2KVaRm*b$`@mN-e9Qe!kbyaVuA=hT=^zVTq~SczW8$aa=;XMw!P|tZDtE#Ui3Rn^$P60D01rvXz`x< zGx6(PE|3I(o=J_?@D{?3VKfwiT^luW@j;%Mt*A{heeqKVF0<4$h6(Fm%JCN>c1E0$ zEDY;rRMK}nJQJU(W-$a4pd~B>KVJbCU%w0t6uK6nxVmS9DSLIhet#5uWoQaMCM)Xv z*=TxZFa=X0-X-yDdGMUqGVO%))UVYg-GDL@s59(Oo&0r>Gv+^EXKV2G=sO`r8mwii zk(vQ7L0WW6x$r6H9earY|s(ywXp6jrI!gs7gdFYwLx54|dnKhyPQ zwf}gN(S!q()a*UZcXHtmMH$y(nyym*Gw^< zRdN)c~a9iUgYdR5fofo9KE;BU%?l^Xbtq2DQo`TEw}O zqQR%|Zh=y0_3mEkau>ZOH=eJ{a$iTfitUexmZr&x3C1M3o08gxdujv&Jt1z?!|?-- z)J9^UcuX_5S$8)t8W3zH_Qo^eFO@nU$7RKjWK&mj2g&c!{8QCCWS|?1r9}8MjZ=xI z4Di1b(fGZMyJTozRbOs>>l^|t4k$XO7F`&SUWy&$z;6tZ}bK@wsR0O&xUj%PJhSb zomH0??D-y&ZVt@ok7~6)?gyKHL^VTl%*d3Eam|?q`hEn=%rawZ$U^A6%n8F=e2-Ui zCaW$oMUu`TEIjfLKjE5~IBlIdr{r-kcpdv&(2guQ|MX`RZMmU^nc3a&N-fZue0~Nz zT*BZ`EPla@s+#>bTowvN-|2`9M>3vVOvTJkW>Hi7^80-xI2-@=&0=$kmy=6$Y)RA3 z`2>T)m^p)Stn-E<#vi4lPpQ?`4k}2By!#1KlwXxep=@d;B( zrAcUf7Ei`poCv84E(2!2Lu%b zU`Yv#>1;=egfC2J~a;mCDCW99k|*UpLHOvE-oyn_xaZC))U(^1Zd9 zkK}ipPHK|;_D4go;{E4{&Ag9IWYP7@vkjOv+|G7w(nZJ{F~#Y4fRsT`@F z?NHJeN}Qjxlyk&Us3ZPv7p-Z8RzyG>CZ$ict_!RB&Q6yr)egf23qSu+XB4@)xjB@! zJzv@ITO}l1cKebYKSh7iv9mz+@ZrZb(@>IK)!}99@z*>M4RjJ;Y`p!-6@)vT zc=sMsAam{~GzB&T8@soK6=I^CZ`lB+%45=@_c7v zmRD_ct`7YX!VL9c4p`V{6cm1)CM zY5U!ZtUCgkA1>N|QJ&KtZR?E|mOoGB5SkN^jdPF7r@qGnfEjRTpp-P`NHDxgfiP+k z$mc3gl;dU|=Z2HA(+v&zs8jjNFb%68w~i&D}Hp}BoJ)WK0g<2_S|XLt^30Kk$jj!%<7BpE5Fx{7x)9w z2nO`zg?iIxX*@|cH^r#VKj`V=ok`IDERD>fXZVW=jzE(@z`Gx&`oHJA44~=ew zyJBnKUf4uO^h-mzvt-TL53Bn&+>Pi6tz_gRv`QCP3VrQux2|Z8WD(yAu6TJ@0lE?4f7Ru2D4Izhzjp# ze>o|k6s(m_$7iOaW)=}S-(Bp3x0dVdf0TjaxW|F!k68=J#)S9 z>_}16bWG++>+riaIWty-KmU5HlBh5t$x2>zTrIn4-TI?YAs^;?U*X<%)1#>x2j%7$ z_&5rjue?q+FCO!+O;(BC7+;KWnelc=$ePtzDvBT$1@?}iOgQ^MuXXSvY&$jn4-A`m z#@`JJ3>nHL)CABou(cAA!#~Y+G^F~S&rjoiG}e`p*LlQ$d6RXlSe5lL7EjpDmf=mV zw*aCw!>t>-ly~>;Gu!2}_D^h*J)3_z=`ucbtPX;g&Rc$dx;JaHXG=~+n48AoQmOUa zdi|Tnf0~*<&1KE_UCeqvI~Q=9TI}m${HxNLXFkbEd|b*MIweiHX;AU=BFE6q*7xV) zexdTA;nb->j~DJ3|Nma$#{61?mh~Np*L>wOYl4SNh}KHim$LU5U)>x?l~xQPK=WiH zf`1DY4@oXD?!ZG#MU0>Izg6LNBjm$jT>OF7FoY1XFOKL{sB&amVZOA$pG6=BgR|hB zD_87ri$`29)qXxgWL}5=0n$#w0mLwF1~$Tz?UWEAF=!mbA215I{K(}$1FSQJVZuCo z{(P5!c}n3~jywt5t9I%q+`9A@2?=xXGZ9yPaS%{{P$3sSHS4fVPV|>xe8oz@)z<@} zKiGi|UrDV;TK6?c(COH5*KysS%qMQZ5TdSJDJorx|9^hs#W_{0kQj>#-%bDJHYIqT z?B5+;VL<=yYW=M+uV-Mly37^9t5~QPxukbr!CL>_-9O#Dxul?ZRJNo{!c~ z_>2GScAgyZ|35G|p8xfUVO*mBSy3yA+e#d5Sw$;C!Yy8!LdJ-7fPXhHfp?L~CF{a`x^Mh-WRUK`0-d(uP zm1x8}-+sV1{eX(;QFn@iEM8P0tPtG~-2##DPg{va-!{CS(FEab(rR+wu4Tu6Tc@tV zkEr}SBUc@~jOB3be?Ahs0DjqMVe?T_5x#x#Ks>*P&sx0Qe||?=mKRV~s-)ivb$aJq zdzk{%IFeO#dyGue|HM-vE?I41{tqwtzD=4J+PL2x`xn!_4RLuhzn^9O=O>7FsS@m8 z?Kc(uZdhnATfV`hnV`(A%Lzg1Yw-(18m%;cf&#%aLPZSR4q)<;BG)ln>aSQ*G(P#J z5R>@%&A-2bpSw13bri+_q;lQofS)bg;(8HO#v;)K0t$XQIZIK>-$1-QzNV`aw z`g$LD^@&_6QJHJE@*}^Y4N@2t2OXkJ*S%@-MJT2UUIXhU~SX(Kk3Aaim``$ zm#1RqWru@VXK}oi1BN>B&oA&(dKUpY>G=BaXux>U-o^Xv>;;?O1opdgrVF<_my2{kwFyPT zKoppM^F;`iwb3lN>K84<@!*yDhlosRBe}z_4FbIm=QJPL$E!teTKA}~`&B&czs%|U zEd~n|QeFR;&Ffyoo#Hv@7S}m{%P%`I?`3#@Dbxiku>k>SC2BgFnhbF=@FI3`ej~-+ zZN|?hc#?&jlQ57f6qEd}}RG=VDMe+ZBGKl}^ zzUczlQ~n+6T}xuZMJ&i!>zC;zpF5B36lJw+r5sMrl72vrEboS@%Dubv6IwyYC#9G{ z;#9Z&psz3NB-69qY4I#iR`j$dshweh)wYx|I8jlKwb$IJpFho2rYY|KRQ8o&S#{mI ziVBEI2?8n|f=Gw*&<)ZZ5)w*Ehe#{(5DEw)C5?fgq#%v((hsSmbVzsi9t+>^J>R#l zbN02*A1^PSwbtx0#~kCn?|B-=cDCd=@kW#Geim_eRO<|ZcdJ{l8OMsN=vg9H62fD1UQ^u#IWl#**^HlQ2H5p)qkvq(&pj$t-G^Ym z!z!EiaoV45i?YBOPpw-VI_Ola6J-sy15gdY8wbvzL~2C)w8r^ZYlow#1R*o6=3o`8 z=q}03v_dsJmc-ZkuUo2Pow;WtH0jU4dJ2JaC*&+3@a?I-SuHnDgKVCPU-3<_zAY^+ z0Z*>96m@flf%ts1U-1jSTTA^veRUIE*RbB9)upY{Nz_p+Uzub4xu(s#Xp3K5yhFrE z3_x}u^*}}&LNqVS@t?x#uYNsGSQjq_1f%|5AQ)L(fBgmW z2u1%)5xOpL>bcj)CC!^aC~XdMh9n7-8qRlE;9}IMzbt`k_wdf+;!uWfnbn#!lvwnq zQsi_@9spq;Xkc)AARAKxAT~5?IHl6hk4V^PaW`!aWckOPzg#cnf5V#f(w>FPTW@(1#b7%shz z)__IPpoOZxNs}}RyNJg!OddZ?@iedzDO8xO3RppvL-CMv2ur$736Hr1Nkd$YT%JfX z0D0nYB_l&00;Yf+hio!ZqTkdUVH!mF0Md_%0zBlYSaleCHIIb0)tc;l_yPhB1z3!H zh$>~UC(zwbbqD}qAev|zDDE-9F4(t#hajyWxf#3z{3L`IAPQY6hK3yHIb{*zDgsUc z6g)CR*e!IonTe%KOE(w^=AVic8sd8itpsL&uoEd#*G6)eNfH1hfj?#r)E%tq0F(O3 zsb8NVQg5m#aZ-#vEo&Ad%i5?HYV9Vm6#GZx? zBF<9yL5Zdf5JZr=gESyAv*Ku=1yFM5(<|qoSNymDCXj)p0WTrE*-`imJVTNp#^6Ie z`3>9Dng~%mK|ufkI4D5*1^&u2$RU7tmc|pShtKsD=w+wx^Fr`A5{u=0*a9Rs_~2Kd zjKsp+hv@~Y6JbuB_DhAD7*+DL^PDYs`ll2Li0mTq$XNNqvtq-Fi$_X7iHw60uggNqCY-krU|Jjy@Mm~ujnC-%+x_gaiqYj}299&?0) zdYrBx{h--Hx+j~$^Jf^%5M>uQzmr1r9-qS5H=v5?lca-%KflZG=-)urXWO(_E<0y) z(mpPBfF411R4#&Ch*3>V4NTF7mC*O8zm(Cqy7K7JHmt)R$KSGe16Uo_zB}l`J1LSc zVm`cF1ZxJY8uT8{rPvp{>(j}?o^HeC9zoizipWdCtP#*)2<^Z$?4bBF&+`y%s%Z*| z(9}e|}WWKv9kj}PPpKu1`$w?`+sgJ3NyNK%?3A#2f zKR+$!|L`Epo_Kh8A8z*EJVu0j{swpfa+LSoW1ZO#fMW*yXF_DLSwqY zE}$!b8oEGXbwMw-KW)t*eKGwV{`W+_0zn%LKy5O_V#jhhadDt$W(YOXxW^4(p+YM2 zNTtH0HV{OaI_{d9j>Fa8{flUP%C*hSMJj)7k~6aZp_)aYI3dHa%yCcK0E>3>u-V6Zd&s^2R2Y_2u#^PO>z1k4$|0Y${!=yUs`3V)xw?p$ zS6$LlQfTTR&we)gd|$CWwe&Hx;qY*Kq}PExdaF|ghAF;3kxv<>6wFpg^4DFH+}B5` z#UeHbpG!ENH~_Mevk=dJWM}8*;E+Nd+A|wfJI2e$$0Z7Ps-t}l6$awtNzzsLW7Dgc zy}uR}QBi)*56{wC8l9PWJohsbu&VDqs?D-Kcf85rA0}W{e=+)4z5KdGa5*CK!}B$s#@Tm z3Yx?Q2Pkoy<5 ziQDSoy#dRk-Wf5y2Zf(Dw20VLGL{DV&t@A}CAClg!1Kn&#s#8)^4i+tp|+h(m)^RQ2C05A!l4x6dU^ z2P83GCouv#4|Ij@UV~df#wR9>eMUl4+|N=|CufFgpTWY4Wnw3J*$VTy|Bm3l(dz#{P&i_WC5^>e<%3jk)c|Muy>3mxTv zsX=<~I_U|lEw}!)`JfJlc3uJ0Gd~iC|^H%^=~0GdXvsLQqIZK2W(-o=HV4V0vC-i;9Z6g$c~S4scR1RLyc6 zT)Kn%is1Sv(eT%8{G0ZaW?^$SNFm^0W9MreiNn&D@JDLUL3`g8A|qhPm9o~K*x7wa zNVxVHZZhVMvMBf)4+|o8cUEKA6R8raq;w{ zm6alJaDBH3HkYIZkc3_0pr1-YofYokpwL76t)RYL_4q!3AO7yF`}tr(fyApcU)Kl< zZgBXOUFI{Ebsj}IGBBVo zS@=n5o)Xpgf`X?|^c&>n2lXa&XrLvY^Y>Km!&HH(RBA4Ev&FbLT;dYFZx;m>)lu_^ z)6R;?rSEB_mReSV*Y14!Wo$+N42~4d!x-XV^Qq=&*nTl~+S=M6ZDr~T0wN^fNF(N^ z1jwS}t@_?jS&wBNdc`CIc3@9Im3@^QoC_){gx3kZ%T-$YCzyr#dYSaK5*@2XmPSWu z2yvOeD#mv|Z|s1+KWR9`-*zsXQSS$>8c4fPjJco@xg`yXKXOYuF(cb_J3tjA^(0bE z7O6;2H*@im6WrO`1El!&))v5onYZX-y$VqPM|_{a$5=lyLXC8A;oVbz4{7y%sB5#s z3j3w8tKFeQfc^VbY|r{A<6B2}H^?WZMAc}xX)@BGPy_Y#GE*P#{fNt=CJYwHxxm+h{JQRyy;=>1eub+`+!E$ zM~j0*PbSY#(H&+ML4Xs3$-Q*D^G?G%ZN-E@;#TJVDT{pFadz% z=beTsGWly7u&?gZJc{=Ou|n|Iks}Opp5laY15Y1NM%IV}2i2bNG8bElnD$O1vsoG$fr=0$9O5V(2?N3vQgNUq-tv$+q+FdxKQ0^`fBNahAT z4r3b>0=-JI3N<+I<5VbdjO)^Yv>Ea%gb=BbdJDrh;MPj=

W*8KpbN@Bh-(UWP^InoAD2D z081kibipMj%eW1m4s;ZNHUL3{D@*`SilO~hxoDz?K(wcA^(NjOJU)m?*Gp+@D_|uV z$iVRwv;sK9kZ-~U;0O*qu&XsJwPvVSGcv9sO-}dDU=fd~<=h7aPB9PycoXO#?*kapML_)B_E-A*!;EkogL6?baBx7JBaMBAumvni z77)`7i(YYbGy}5B_Rq{X<=yd01JOM~HOzyaFTn0Nab1EKS0qHFPzC_ya5{o%)2T=V zABI5-6h9I4H~`@nI6%RnpoRtx$ame9%&c%@U@rqP19_ryz{em1;5SAQ(G1y42<^hm zp?<3(YsNN1Sm3{bJ+%m`4FD|Lu5G{n^A1vHP@|&Bb&2%q`0i0l_qtgv?tE1Ln2i;BmG$e3a5YX9+VcVTpZXJW zVNY_O-H+GS@rV4zT?sx+yRc%DY(j~gk!Jg~p8$+!9t zW&R;KUqwVekKgM_s3(CQ_rg*~w@3vvyszE%c;`oPa$MPHVPOf&Go7K$zTb1Mzcar6 z%$V+pzSaK#ZTm^L#I9LUiGLX>+_1q4yvyj7u@C|+_~@8l-iJQ4HvcEJDP5)d2TVN`edBXUO9G2l^AM^Urkw= z8*`nK%5k&tw$9@nX@DpU5Kxn^rKdD_?BZ;4 z2(q;nE_4O%=1tue;mm6AYZfoAic+H!h1Jv1FWqjzyzIMP?o&HLzWsXr>z2{tM8SHc zvv%h;jx>%89s%`^opPp?m9<7)vua7IYkF3@=>DjrGZsvuqxznm!0w4;_F2ydeGj~D zq-W6P^}pp^USAZ1R*93)n#3h=H_F?+ni;I>x}2lupg%Cs6ldpygbKFG?GttLt#@A% zkZe;CUlZs#b3MLzZA&&CH_lnpx^HZ0vLLVIiQ((~RIwV{Z}#NS<~|ErQHcOe{L+e# zz3*6MKiJ1oJ1ZaeKKfP8SoNy}dXf@FlkMInzhhZ9aJybuYxwy0?hRuyl#~xFCV%aY zh`xg@9%oi{*>)ug1DJ3y``SdTtyen)CD7U5T%9bg?Z6yFALu&h>+24`Iq<(Ro|U(q>{B9T-HthITWPzjp#A*z;FXr-hwK+A*X)*_^6xF9-%Sh( z(Hy))6JFZoI-Z>Wy?L$D$z>0w|2k^uCo_V`vfGox%E;wXGjhpKth79n>4Q z&bv#KtEuJugrT!b_6Q5}$u6DGBYaCMSQ|2p0qYQeZ*JpN`>P#eC^Q+CTYI(ACsZW_ zS8zr_c93$bw`Je>o=*IF@1q=_qg@*u%-VEs5`lQ+$Y)>2WmZ_AMT=|ndFI{ug{x!s zJgB`+H{UC;N>fO^@_jULBj3?4vCX+!Nzcs?GAw#Lk_?Kdlk}mWGw(JuD}JQsMnfw1 ztSW)srhL42>p8ccpg;lCvVfqB=0Muz?Y#?#DIYL!VOQ<`C|_5B*31a|BHuog zP*9{D?)$`j+h%~%*Q#&*3v27F5D_t3&f)K>n(ArcIEv6ZiI8mHiuaSUI;7XVmM$EA zvAas1kkwn^vpjJ@S%DQs2&Vb=L$6<-{falCuTqPn3N-Zr2Q)@JB!*U%!J{Y!hx z@&hx>W#i*cvi9>O*>PoNuDi{&=<8k>E?GnG%{l>r%kCxO8co!E>(ql*`|M@K4WjRM z-M^I;6s!@5So|J6W@cwMdVPe+uI0eY4^-`ZOYK}2CRWg{*vgWXTzJliS%g&@=7Y4; zt?k7?<~gl&rs$0dZj{{GCab*Boz&G#nr?TIq94m`N+rDfYtyX@Ikz7Lv)wa_m_@deCpMwr}`FHsm+NB4G_u3mcb@M;bG1 ze-_oNVkysu<+jCgzQ(Cr>kMd^SWxCly6xh-7nh^9j_L(=hdr9mIqipIQ}a|D4nqRS zs(4=XXn4&|8R_luVaYU$xeDHu>4GYEpCj=@h2z!unTuZt*-WPQpFMA1v_Y3|4PdB` zcVsC`B;zYS<=f)0BxBnoTK;#~uwK}i#{CkXxH!ji(j3Rnf znUl+CQtI1&w+UCN;ipEAv02mUDOUo2ZY=Fc=Jo{KFmf(xF$ndwTZzkSV9X1Y4PikBv7EL1ka> zykwEx%htH{vI`tS6ZtJ|{Y8(YaTX#BDEM4Vrg6g)RW_@6)e zSE=dbGb{uhuXf7TPkTDRk3@&_XWWI5vO0zhsK@sR5ziUFllGrdU2uVUinQXS)c^6vf9mUB6senkb0Nq# zccrAQpLV$|ez=b|>KLN$(1^w9sLh0$u$=)k_1s&^HJK+*$bE({|A2w+!iTEh(=YQu~#e+pfM@j{Jd>isIZmEb&>Qa*wH-5#5H^4rK$Fkkbw z?#nNSoGjMv2Z7|h3$E7%H?OUj$jW2&zV({V%WQKOiSn148m`($xi+z>1x!v&Juo*n zx3D0)as`gM^%IWN=b`7sJo^kP?6!!`DENAHTO60|_=QItNFIZf!>djTz3&iekU*cz zchWl3JV+QO;~*~y>Lq9yI5d`UyydlBzs86&$4A>atNW5DGCDzsoEQ&g_QzLUU0ra` zpcLI$EiwZvZvgj@m1mmdIv3~9Mw_bL9AxJ?Gkc}N*K2(Hx#+hI(59$F5fjrf!0CNf zUJr1*CRASJC;guRSNcIr#`?R0GkYapj?MAGv-Y^;ClfXVicdW2e_S(e{zV<*ZlUB$ zFML>+<MOU_gBw=Efm&sZZ0Rv zkI-$fK>yxMMD`Y`3{gF?3`sqy3|T$11kz2E%+lklj{%wh_QcA{3gT0JjvSWoO}qge za%pk}ay4=tazpZTTx7Pmq>yChFu#zy5si6@(4pQ|hUbide7~pbqda61RjC=HKfcX(l8! z;om=Y;bhVLFCRqA$p7k#|KATH75E?P_^&Vidrki|A&`XIp$twe~<)cU8?RN7Z^ ze@(3N+4N{a&Q4pK-I|VvXT8rX-1}g)p%&YuAUCi73{*R}-P+n(U0q!wAmHQ2la+Ac zoVvdL-n{WKB3q`Spx`;`kAqjKuB(g0qUpnhxFLIJdxamrDoq9{b0Iqz65-N=r-aM=Bvk zX>Vy6L^nC;+`gF43fpbfdYd3mWkg(DKln3fswd2Gbojx@;_16LQU5Z5(?mj7o`E=( z*{uGz@lqscgBj(tAIU33PSwtH^zZ`t^`;*}5A}C7d=LkgZqn5H7zk+?vGj zv}2AXV9x-V0UjP6$v3Z~qovVOm>aOz{VH~FdZt=@O`)%&n3M+0&~IJ3>*c-=&< za(zx9BX{DCREsExh%(GY%>MY0?9|Ls=IPTI%VoA!)%8IoIr#F8xsw^qMkK5vJQvYH z3Nvsa*QD0%NzxO`=+%ja8}yU!6YTIxh_z45mfxF?nVItP@^L6v>&9S0($3G1KPV;e z!z72BUk%)cT(?JjkD+HHiPjqF!#5WCdAu$qlqgdoi{e6bOw9J|j~k%x1=FaM)ZX&N zf#LaGqO_?b&!zL%7;d|b7$JScwhG;S-SICX(cLDM_bSOr(b3%-r{sS8$a}K2Ktacl zjlL9#qaxIR1bf63V4{OsUJ|e{WU0M3AQ{>4g5okaJ!*!_ar`g1%2mXB?=3VN_GYyJ z27e=9I5BUsk@83ZZe(0GS>5I=|XLE$TAU zp-SIr_EXYRiYPKFN&}{s4L7-jO!{?C6M4e5HE}=GYa} z^xWLsl;D*|kGT$*Z6jU~;+(iVUO%qF%wK&hWG(|!?ZJboG^U#3vAUJ?Tz{LB-LLRr z2;5eS#(d1mx=^9^OKOL|oZM#;KtYHP2kRY)EB^a`D%XzxiZQ0ere>K#!ie(H%2Hn> H?mzu6g=$-s diff --git a/doc/source/images/architecture.svg b/doc/source/images/architecture.svg deleted file mode 100644 index cd72b88..0000000 --- a/doc/source/images/architecture.svg +++ /dev/null @@ -1,1407 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - watcher decision engine - - - - - - watcherdb - - - - - - message bus - - watcher applier - - - nova - - - - glance - - - ceilometer - - - monasca - - - - - - datasourcedrivers - - - - - - modeldrivers - - - - - - actiondrivers - - - - - - plannerdrivers - - - - - - strategydrivers - - - - goaldrivers - - - - - - - - - - - - - watcher api - - watcherdashboard - - watcher cli - - - - - - - - - - - scoring enginedrivers - - - - - - - - - API call - - - RPC cast - - - - notification - - - - - - - extensions - - - - - - - - - - - - - - - workflowdrivers - - - - - - - - - gnocchi - - - - - diff --git a/doc/source/images/audit_state_machine.png b/doc/source/images/audit_state_machine.png deleted file mode 100644 index afe21c93746b4d69e86f20ac8b592a3b86485c37..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48406 zcmc$`WmuGJ*9MG-KBtt62eG#cSuVMhzu#+L-)|#-!(Jt z=iSfy)t~P;zCWpahkKqUo9>^p>aFfcH%rKQA_F)*&DV_;m(xpfu! zFF5bsfnRhE;;$XvS=+c;7#cZXNE%uh+UYwO8a#dL`qae1!G@QG#l}M4%E8gng87}b z<%8!u#NZq5yQ#i*_~+j-FkIp@po;cm&+q@*jGf^_JmCFqj)#j&#+x$o+_ozz=RK)R z7Ulenf$s|vktW9n5!erDnF2Vuf+aSyDy^N#?ucjfX^uU`-Z}2VFV1&AJf6GBp8ytx|Zd4rkl8DRo>>0944Z5zF47%{iLUpFEqXi8goSG4)ZK z%TN0&??-IjW7`?1)(UUFxO0YQCGV=mD6TTEG@qlMVU}PXrt@B;;!%9>L#1{d-S?K` z1xxuZ@J-D3db37RmwI<1*Vx~4;NModL8q+plKKAZTog-=Ex}8}l0>YtWRXY7I~mU$ zwn?xH@~i37g}w50)1BTw{r-nw=t{sF`f@U{pPlh69XSTt(DU)@Dyl~lzh6K#psOl> zHiJ)vAD;WSWWRZRP@I{cb!^?fyEeR}wD~)1Pbe80i2KedM<)lf|J1Cejh(~q_}K@N zcC-17_&6)Jp{qBmJ{&Mr9m2cnmUKj)^dzrWO+GfgmlYP{#6%#XtTmHoc1N2ko$n4= zxe~u94vkTZ$Yt|NliY4dhmOIshqv+5c74Y~_TdQSpuQ@fU(1b}#Kf-CW03LV)e4CD z0(Z+Nxpf_jr|`J_9{x{Ne}rIEf(*P)zn${UhsTm%!U(o)$hvitu78W;#vn>JIqx^^1`q^)Ee~GZ ze*CIR#OF@xbW-pAu$K>nkMqt2PbLLA?s*fcbTCnj{#dS7d4X{o4)u3SM~Gde9zGPEK?zUVsv zn^AgZrp!D8J$-I=_S2oeuP5ZFWF1sVPhDOdHa9heL!srhk}s~JKl>o9cO(sxDHnr_ zhnKb>KzN9Ls7@&*C8g6Qc(SnXP?28IfD=RDF;^ND{yT3LpJEC1_1#xh2u(K=aHA&~cf{JiwQ#iHkSkxW@fuNwE5D^sxPr+$9 zwl}Du_vqn6r9^a$jApiGn%A-1b#-<91kg9LyRf+ER4QGhm?X%cQAmaO8;>IJMSL4o zbD2$WESB2)wj}yEsC8}h@ZA1|Z>i{Zd7O0lL%;c^v>R_i$UUvUAsIuAmbxEqcs8eV zc^uhDf23dW4+yYdyMew|X=Gd+(f#{dTg^C?4|hRaypB8caybRaDfRXBCsaw$SI8#_ zZXFmHh>ngPEj1R}?PR-l^R5!X=0?q?lbzj3m$ldKsBuzGH0_E4wLpQUBK(lQl8bg6P5 zJ$hv7s)##TuEOrl&y>Pk7WQ?`K#b;3cw%i>IwtyUne_DZWwrQ3M0t1uiMn6~UQ_NU zy-lp8z%!8!#pC{abq#s>;K)N|6%}X12)RBBFRz;l`fXImw;BD7Io2nSjEoG4v{xnZ zes%Nn^F@SV0bsRv!sQwr-OwxbIPwr&kg7v^1gfO*=Mz68J$-nT$scrr6|3SNJiCsL zDOi0}0)WgzB_D_(Dr>>3s2c~bq7n%CwWxguUPZkDyo&lDcon@>|E@6j;QwFW+-T9H z8%jEviaU-lb84z~hR#x>YRMweK|WvPYnhRnnZ0yL)LmD<8quz>(AR zTi31~e{e9Sj7ZRlCV9<`%BAEl&F}~YjgNN;aEXb}+9*TYxb=(dn{`YYD~} z!NJ{|%_@ol)Rdq?z$+}ZWjS2d^(`&e@_Z2 zHFs&Swtixg-6iH$qB))1^rE#6qfPQW7!4-kGV71$bzB86*4oT`fVpeaAE*zVLlp(X zJY1F%oCOa?-qF#~k&}~)2HcA%w|8<9^f)S(j%jPlO7fy-D5PnF!Cu59Gx@(OsoUAz z)pJ{+)~s;f__6d0UyqTA$?^cg!NE~#+~YOPOAr>&)g6o~(-ZJwF5B6ba`WN#PkxvL z4f-S6RW>PcapNofSy)(DN#F}IrSl`4NL@{L=3$@^t`_?LOu(a{mv`Ld+ey95ub zmoH!TA6BR$3i3}`ytco*`yC=p45@ZlnRK2KFzrj9`yHAWFxMJvUj3-Hwl+v2O>PVB zz8Orw)@tyf;$aBvz-=~=7Sv%Dy?Vw`$Dw>pHsfxK1IT9WZb-h!jAO~2&H7WTQhQ2N z-c-IoNNZJHUYzFT+rDs$eqxo#KpR{tZgF0hDCwes;Y)0a^taU@ZmzI`>-AYQne}6?Mej_3d+g+nbcaT)(s_9v8o!h?I zoMbe=g{fdnObjUDhahHuQbgxwXJZp!UCh3)tL%vq;NJG#&WrV%2?+@^<>P4H=dKWG z(;dV?4kPbee#9%wu#v0K0G;<}d+zjbvncY1^CrBjzQf>A+1nQ}M5qc;1<_Y5ry~fK zScl=!Dwr@SC`%5PAKS;1fX+wx^LELsHsuEGU!W} z){;L@aE9ADI}bCfn#WR5Rn9Lglv5*a+_(WFHh3#7hKWP!u|KGxefcT!aZbjuTmr9} zasWS_>tad(shQKZl@o!Sx@4S$MA?`A!be34v_)Q^v&l~lx_zH2cHyH(l!H(iQN%o_ zp~=k5Tm&C~`OXC}6v%Nd)(GPxZI7wuX=joYbsY zvUYcO=T^2V3LdB1>V2uNuRlLMB;&SuQwlLGeEq#?XsCkekiN=pLB__W7z$OHs`m`Q zAuAkS`{#x&CaP>liuGB~+rlXE@bKawwQ-P}(7{)Aw(SND=j+zxRht~`+D?P<3d@~HLC=%BgzQpx3Nn~b8pl}(>wV`}})_6P=KeFBJK+%~}NZ!<|eW-jpc6_t>|FkUB?SNO&9qFHz^{-y{_kRCZuLE!RuU_}Mntm=uy(tlvhi;O^cLO5x3%D&T zI1v$<2%!o$Hu3AIOTTfY4}HA4&R7x&75Ld zX=|0K7n=50>4?#{CaYG)A=$}P?qH%n{NdNc<=G-xP*g3g zKFiLj(3diTnp6MFl-KAc%s3wQNN?5cg6be35%<7g#G^>twB76T4fKagAO+SV25joo zb@I*^>=!US6&m#O4LxnWhoBO-eUDGEGv2SLl#CyqEQ-UKl;5mX0Ced(u41xlf`}#*xvy#3-u`(P{no7tlI# z>gx@~d8ukj{+hNDwy~l6ur-eE{^fY^Hy21 z^y*>_SA6sR^JULIMH$qE)1K95mc%e=+ZovS&i6=oO)ky|qL?#cY6qeZ`j&e`IoLfcSs9>Q2W!L5Tho}dicX`& zGb|c~2Aq6`47*{%x+iD5&aELN+rDHIgIPT!LT59L8oI}>B2C5xNu6}ZPQE=!Z{NLx zxua9)4oIO-6`stVixWf5iG(?Y7Y&`A*v!mnaF5-H%#^&98n5%Uz$7o;hK%AY2{W^U z1m1E1irH(%OEWz}mq8w4{dC$fiV#RE7R#AMU%G9Lb5Wg(t)iu6b#ImB`3d!0>v`3F zYhAQHw1Jq{6qFKMTU&r=ncsS-}z;U*4YL8rGd)rd% zc{)@{i6A+$iOF|a))RvKGgR3L3ttKT8#S44RE9E(JaO6PvLkspqfrS3L_%I=MVlR# zKa3PFhK9Wb?9x@t%wB6Z>8h!jHF$Z(C6r_wx9vr-TA*D=r~4 zxGO=+`UQ>bxZ4O+t$s+dSLB2Co3NM2{>A#b+IsYK$7XF`$r1(P z{>Y5v6Upj?hUlmGm(9grU{}IztC%V+H=c=_vxh9Mo6ba{s5jfs3MtY#-!sf+{)4 zx4j%mL((h%FYj|aQgVL5RNc@~UVM0ZcA<<3w zyO;Zy7AM+I^#4|*Q2IFn7(cCux?^Q=)Iq>;HN5-5g9mG8muCx#-f5>7J#W1Z+*+67 z3WvCgHPAWpWmpM9WXfiAS$jOmgs?o{W^oLyeX74nmStObYByesQxS3$0q>skxul0R zB8dLBGHOK^wr*49N*TAosg0q1r=hx&0B`>~Z{1Q2+mR}7czg7f4LV&fW4yjm0d&+d z=#RKXhUZpx0fnVFGTyK?cEkMvgOEAlfocr9>M*Hxo$KjbzroiBu5DgdQ6XiN10B$0 ze5tQ?YL!NMb0?<3y86`kK{x!ta5i75a%h#@C=H=z(;TbhyuTYj*olx+d`zD4z6`n5VM1<}W^<{k6@f~en zs+wJgqfH(0`|~+gNL;*E%F~y2+**VzM~a1;;3xZQ@7}%zgu<84pU0Hnwkr9G)%$(7 zjvt~e#d?ndal6VIUN4+`V~-xdwcs6Cqp54_wR0;PdG*Kb?QeD0j|J!&+}6e*14;XV zBgL=GhoI54`ic~KfOd0oasp(@xM&HL0G$BAClB-rF(PBCNlMDr=$X_r+sgGVFFU*K z$U{Y};d@*?7pL(`(i%&aws0r3oO`eLv()%fnLCq-Gz&j}?KV&R@J5Z&E89|kI%(^r z?BUsX-4`DTtH}9Kxx7pmE^$XNWMJGi-Q}2LUAqLp>g?=ip4uXNih4Cp)$bZ!)YN+H zs^3U}S6o=6c9(oqIr%e@+?YVhMqQ(8{)gzI;}gz>S@@nv5pF}oh{v3y3;yq{?{TQG_$UOsuF90o-!APa-PMlGPfcwyVb9FUvc^91NE`1Ac0NaF zDVL0p&kAZxicNi@7Jd+HBCW{xUS&vQsG6k9jYeS&n&AhXFe+DWLSrow36Zvg-&y4u zII!5**uK8L4<9}NnrdX^HB2PsJy`(Lty|uFZ<;?CRy#*YGV16PH0yn!6x1f#-lSTD z5$3GzQ6XSe`Dyt%`9=ATHF5iLBKzM523i8e(Y!5_HIrT@sz0VF8 z;(;BaawiSf`k+n=E~}_G_5GcsgF~6weQ3}_BbdRlA(H{bAlRVJVBJcxuU{2EYJFFe z*nCrS%a+Qs61BH|wAB(5l4chw2laOf_Hc4`5_?TLySo(>6hgoa1qOx%mBGScI~`hW z)#w|VtWO_yv8Vdc^_gs@%O9Od$q4{UZ|rL3SMIjq^l_*T!)GU`Zm6lMJ_LN*!-rDr zGV<~fL3*&knq z1~hax8hP^CG_ow3Ma&f{Cd96$j%(T1tKY?zSa)V6T{aKmWtGsNb9iIa^Xv-~?yhp} z8S^|b_$p8HMP@BL^G%oRq>uBK8LX=Zz;I9L?x50*QQOg0b1430 zpeu4&BJ|Q<+OT@mz`N7f4TZ!}n8Zgs+{D@D=cN0J z`e}Q|lRi~+M>l*tzf+S;8GU(&m>tB~Ll&Vx*H)!#@-LR3?MFm8^E4=UGokLz;XSx( zQ#jVX6Y1YTUMI-|`P{woXtd@TI*`0YkL!DkZ9x=34t{d2^2`k~e!DJ!m4^E_(pE*W zeLML)?|Ul>G4b;Y85*SCPdnvtTYNhYPK2Pcq6$Jawg9q>tr60s>u5`GA%r zk}Wf3XeuhllPSR_1lgNjQ3o!D98Rw%6g{VZF$9NRvZ6BQ&kNAsi7(%NX{iMr@^iDq zo$HWiEKeC>mX8JMKOozUO&-_{T-*xM4%A-UJn%AP^JXj4^(+}&kv`ZZETjO8ZP%%`tx#g$95jb;{)1>ii#?Tn5VC`)o5$_r}A8bx>MGGn|C@_z+^A)YhIv0X{&Yb9(u~nj;r?fpUb2iRr!D z#nn}x7g0QnAR7Np5fTzoaz?XhY5I151yE?D>?TSexX!~wH%W+zscb6J@_GB|CKdih zZ@T8=>X@kPQ0rZrsio4Yj&ae&uDId3spI40^2B@~N~vmgILcwrouLC+9AQo=4R2Xepp*;Fp>9uo?dxTKCfo%p~z?+7{`&jr#c5zDs(kl z9XO7V#H#UdRaF7abs&+)cGhp`4=vqnUxw`J@$BrZk@kU|y?q?b3r7O&HB%V^PK(!b zio<1l515#gWp|Gyzw6bRJ%0j5U@NC>z&(dN3dw@IOW+sz6^zOiZLvbaQd>`}ncF zE&9bfkn(l8x%ARLP#XC@fkh-4Js1 z+BNN@s%`$^e9itmjUs?Q%WCqgTT-aap|qWg3^>RhQ|uMK0kA-a60#MSHMEcT%A^@q zX-yS(9PiKRS)`!JGW9%mLvK_!k!<{c8s$vr%`{E{6qS{?2O-lqx(loS8y~S+rD%RC9)2%tsNON*j|By| z!0T&>C>l9Xywe3W=f$-NHe=7%5FH~;HCHHCg|*<6*XcTlxVf2GnK2M<%4{8fJH;%j zKd!%4DigZqc7Sm05jYHb@?uue`$UDI!{{D)m)|c>VNikFE9aE;Z~sO&>0c(9}z4=wP1_zd+3&kEhK%eP0YDSMEw}@6>#??rx45aa$Wtz|FbA3GUpsqh7UsuZoDpAZ-|J9z|M+~_jV|-U1*O06>aM0&lK;= zi~cOdsAz){j2ACnfW2hqu{Kn|n#81iVH?Wf?Ck8SCh?Cx*YsDPQ#CDKVI3GAZrJVS z^Sy~(QeFqMV680R_BOaI|*4Qjea|WbDM4EsgFencPnfPwU zOkTbVXzpZH3KC^BeY?4s*5hWXU^>Va&aI3Ww|PmdYdR2flL;tyraz0P?wW zmIGhLV(jv)J2PdE&Z@G2TNUUP^$HS?nY5@BTYvw)T$U6Hm#K|4Y7HW|a^*^a@om@T zg#|-)!i>U8v>YcP(D3%x9bmQq3Q@d}x2|0;Xd&{8DjFKJ(k1EhV>xaY7ZE{&rx5eJRodwN~+NL{}!+fzbri-~57#F}W8bZQHlf(hUvje7RIU(1b z`P9_ZiWb_ypCw-ldWthLlqpL#kQ!r|fx2=xO_7Yuw_s(LNqdl0z!*?buWWhOy{q2& z=Z71lq@?1*Fm)47M7cHI6a^sh3sUU0P!KQ%KtO=3>+A>20pY!<7PZ~ydgrY9#i!r>R76oc9S*RNk@Iy4)@MM;Ftc6PjW^PdQX+z$}bKYinv zn?dY_PL|U)3oD<2aW?ni_L9)0W}-E(El~Ox6-zs&mzPJ%g4c}jOa`y1zNuX2@N3N;&TzyQnG|^fO(I4MHemJm{6pSX;rouKS#z0cem!&567}%fSQGtNw2tQclyRyH(e*&Mg zLQ~e+ppjL{-S8+aQ&ID|59NJD(xtNeHBrmU2u@Uj+2`JUx$X z@lxwrpe*n1>{vY^2UNhs#Dsy2bkJYXF)m?TlFZX#%JZ;5uR*s}R8(}cdX3rfZ59RH zvw!P};hgqR!`IagIbf`o>qiXQTIv*6_|?fqF&i2hb|nhb*jmRkbrtVt#PT}kQZcxG z@e_!NgXQfm^$Zt|0xtRt7^tlDMxZ@?g`@5l$4ld+2V+%%)?pr@6})oHJ>5%Ns5Dt7 z1l0Sf$K&63ZG=krCn&1F{c)Q^fF=nnO5?4SwO@jQh*3u#d)c{KUl-nB9AW>G3 zNtQ#!3q-j?mu~51M9k%(_hltxI^ymZe3qd?aYVMf#~N5S0AGj=P~|;3dt?V zIexXBMe8SF-BuG8xV1MOPNJCih@Y?;eEPwpChAxKCe+#DFl;f@G);xOPBksv2`#1# z>$x+u{6DH^c@0Z4Uo@Msg01Z~3`z@uKrXiegra9~NwrFxl6u}&JVegY(-r2RWntX{ zwA!OTKujA_`MKH)Tm$5kl)my41=nxf7#$zSA?Dr+=7O8Jm15fiF5*?a6<7}oMr2WH zT(>K)Pnn*Rs!R9{k2+s@K)TG1d8T35bvvCH^<0(Iiv*(BV2{)YEcJ0c{R0D?J=GFY zFy~>4%vT=*Ko`I-j>^#FqRh2rTp&y0Fu#`PL8UYA$Ml+nG~F*`GZ2FjQBw>^_Kt~u zCx-Ld^6q0~^Zyg^Z_0=cyO9`B8}az}CD0!q%i-q}`qjZQ1n+-h2B6H#_ZUXABHb^* z>>=vNB#s_Ec>t0HEEbrq$f(aM5t)k5*>~q)dW7B&Jkw z{sQ8YA(ixlT}Vn2eUGudN)Uz!B>n-Yh#~Z`@>8E1JaJ!1g_`2*(1j)rO%R}_+8u<) zo$JLd)?nIpzK|D1=D<;uN_7H=a3FI%U~`^fH`;;l^OqSgEudJRqm42_9xuPc?pH+- zREElgJ{b)1qdhV4sPIinSIatVN4TZT3&>n6Cj5So7!N~r*-6!ZRk!BBlbhhGH2+?j zmxs!(WzW$AE1)~xlDGlNm;;FvLlVA{*T)dfWR#?{v^ZoBX0$Rh7~ zs0f|z@NP;#{#NgNd3fv1{=K83@J&ASU8~EcCa4~POrb_d&!@fgu}W<#rUiri-&eC! zst^8zmjfh+<{w4gV@%2b6q#Mq`syC?$=+DLU=fna0%$3LH)&(|y%hssESSep&Kd^W z^7@}o%$@@tV0$Mm9Mi!bz|+Sb#L}n*@i7ESoastN|H(XemIi1~fS;v%hVi>$Bz&>% zD|$(=p4uvyS<5F0q1H+JCPB(KjAxGmf&p=-g1-F5=gKTVP^~hsaCwzb(XsXZ8lL7B zw|gpny-}>JfMIeA7n%Ks%}fqJ#k1+65`1U1fo-c=4Zg* zh`Wn1p(L%m$h9ki^VEfJ>hyRjcuQAHRMI9{%Aah4k_mQO{I-$s$ z=)UceqN6}!Bsq}rz2m~=V=$`d#>tYL$d{g=R;KZ-4m~4XPdAHnds&NcWG&PA+ybVg z*nzt`wE#iINW*={sE)!UIiZYk*yYNVvR^~wDu zcEiisg)TU;BE50kNr)O$d}_VqSa+Do32*OO((wLSAb;sGJc&%FIChuk%7oHTd?RH? zGy0K?a;a9<#G(jr?Ikgmx|@Fe9y%+3PBRafd0b4*owTIFZ{<>n&mlw0V|1~U znH(3x?wVQ(;AtsNLjpATdT9>6=X!% z>}-D0;s(53PZD}il6zryGMJ}1#Eztte}%e854~`0+BBExXsGcjx-m=abMF;#QY61V zCpu2GI=~chy2CFmOi;g@&$1YES5gQP8oofesGL2Q4MN^-P4GMhbZpY{*{4L z*d3W6;!CUW{~K?BkG27rtZhf6P|5!s|34;yHLFJl2Hd3rXJf&SJ>Uw8iY(8bJ>9%g zB@Vy_AfVCP+-T;TRlxPcrs%I5k$VGMhjjJTf;|@f;u)El;fEL#sduM*{!>bg$h%Ix z_noW?Xj6A~CcTfEi-0t`*1vS`n9sTJ&>xF#EDwytPXtcBfBfie{^T2NxvlHUZqF0} zpHmBtmrYdXOn?yaQ9FZ>qSR6T`tD{nj@f~M0<46Rb-W(7bH8vOxQ1)X}ozH`aWV40j{I3 ze*x0^`SYCvY>agqt)$v&$H99h(sUB&tlg{5?q&L-Sn^%9a7Dkz%tkNvU8ZGB^ymL8u za%O$*t3{|FQ4k?=OAq%6E^eyAB9LI1nV1X@F>FdfS(KdQD8yrhVQ0|_AWNZN-90RK zR>UaY&VY&nZY!Y>o%L{GeSfCL82-W7Ik@GLli?E+&0@L#78OSXFv@(173C5&|JrGb zjtxo=JmaZvN&$G{Q=GN-wV%;2TF_J3W-EzM=(q(>w7+kNBcXuvXIEnD=mw6Nz<(JE z+%Zf73hqJkf{e=oGzxVHSPi6XY_=#v`6=f7fcTApVf64|4F3M}U21AQY=WKUQe(U_ z+G|&h6cZ7BGB;~-?rB*q_-f&}KfuxeG}K3bf6KA56~HRdKYRA(5aZ9iZOU4p=#>ZD z^qtAcN(>L{O_Q`9uaI}Iuc$ca;+m>Ggu9cuOuskmh%}86lK^lN4;J7IcU^iHDi9eS z4xG8H4$DgD0UmHbaRyHaj`#W%0c_7l`)Hq%lj{Q(XeU>$)ToPaKU^Y?`}y-faCp6O zPvXvVA)(8RDBW=yCy}eG6|Ha<^FfIN=3$SUT8h~|$!`=KkhsO?V{BmC} zaSY1AajK&uAs^4p`C@w)3txG4m6a+*T*50`{?Ebj)GQ`&6*znVR54GP84j4tQJipC zC(Ya#cr&wuy*;1jiG#lW449jTxt-5v@f*0E5Ya6Wz`qLjJe&kPAFtP0l^eI!L{+AK zPe(_G348l>K#srx8z@KP0u0P)si{=)14;TP}uHkQu;1AI1v(_nRwxips0B^m0KzYWEmQU4!5 zNN}}fb!bm|Q3Kqjbhlw2hXQe>YO=2uKk@EVVJmg|r1#}H7?N3;y$yBxKDn)cxnICx z`72=$3}#9U?~XtIt(hFf3<(baWB*vQ*ZBztwtImh^2owp;+bZpdwvGY196Z9P_cj) z+-PbaILp~}Z)m0wb$9jlzA{fX@A%D8W7gH(0)wppvc~KT&;-7iI99Ugr50cy1JIsV zX)zxMOE5%rZg;V3ZF}?V>})`mlarJFdM@z@cWrrMyB4A8%5i6>91Ba)=|+w9@}Cso zs$!~JTOG_BYyO`hoyi{9;Nv`_VDxRxMoU9|+Exd1Wx(hymilJSZUfNR+U-b99A!C4 zF0jKd2%D+#=3L;|SYK%g#M2Vhn~SUijdzqLQT$A*HUZ%|N~cSUY7!nZ?eI4)YZiDy&~E zEkIf)2JuH5b0SB_x&y}1HqW0Ki_JvLxGGZhk8D1lAa%&q#3g#}`{H?5%t9+6R0WPN#Y4hBS%J|g4a zAM1c2{~i%%Cs1tAZ15@!h$SOqV-@UB%kc`BHt_lb)h2m*`pvo;)LBhKqhWtYJC{p= zpjV;5f*Jvms}9V_Ko}~S1p-llw~lPy5JIj{=Lf~6qn+LAc;zOr)b6`^{_FOpX+rYA z@bS&}5prHZI(^Wqh5q@v94LrjJo$!He|(tLfz#BkJFIa5*QI*>X`VL|;uvXPnm~G7 zLFGjQI0)lqnt8C+-QlLho+OZLRuH5mMq8t_gGs>6VO)_GT!&E_=tvRUL3aV}27H(q zcgDEsU5+*zFpM}x(Gsv4dsg)re7SX1^Ua-LK6Mkz3{&_@9F*UVDMjSpwv0O-70ezFO?dUagQT+_*y zzcuXzPsX^O+B>}&+SD-U)QLUFbH3bcc)ad9D&*RP@Oxn`1s^{kYxw5cdiRKD81bmp zPse;T?xJDs!r1nR=#pq=u3^8t()oNR6lk)8G+#e_t~QT<=mPM7lY4iBh+x&5{ji0Z zlH+cgbiFnnbv@UZb7CKw*%0IEvm~*CsiqVXpa2flI9nf&lfUYxP$mOwlCLRm`a{OV z(3FSfwmkg_jl40SC1|$gIAS5P51jea%7Iz^Im`$Qe}nm8$QbBU^HWO7)`~Q!Qhl8o znZ<6{P)pzc(1SRK$;VGLrt}~M3=yo%vJ_-gSec%mugkj5nR=FR8asB8l9ZKY_(rbI zvcepd9n3J5W^`n9DJsQ;>Fnuj0zZ_98>qAwQfSLm5Kd7h6NOV26Tc~-LqLAm8Mb{T z2PYXq`FB1A;Q_D!$tgTbl~zz8Up>OtmW%B@s7F}i_N5OMio{;0-R}gGZjPzJTc}^= zEHpCNfIBKIOHivsm?)~Eey2pn7D)K441ssdwyAisZ)H7}W(wt}ydtU_p+t`LG)&Q7 z)U~El?COmCqlb02PO8@N$~)mnhFo8nO1$ARw|8se^)%MMmkb&#N$={q*!(mby=KUu zLCo3r1RTh+GVkj*>G3#O$pYoF;`?`7C#P;0tXFyBbhBY|diqa;_az`t!4UusK2hJF zTLUVJur{!dD=o*Jnx!W*uNxN9FPmYn7<`v&WQ5OF@%I-ZS(mLGnJtw_uc? z;r5m7UNgL1o)Ka3G>;OC?v*f;ZkT>3<7++&V*MTutGZ-`S+z^6uU+)+$!2$h$_N-% zAP4V(EnqY$v;eLX8EJpY$?nrlB_yyDyfK^HNA=Ow)fJqv2(gtxd6F}4ae^qwfx=5MaG2V=-nn$$rDECK9q&7tiI8$#)C7OEJ#ts>NY9Wmn z0&W5#J!mFJPEK|vb5!5#*SmnT8T=8FP#Y^>s`Js>$BaoeFRgp#+auLsH&aVnqrPKI z{UWT*Qrg}P9f?E)Pl-@RJvpB=up3dm9CbBdX~*e%^gGM(IOYw;m~j0sI|q)+VhwQQ zrIiIzJy4`4#IZ3y?=au?fWzwmG5XUNlPh5maLStvmlvz6W&}|UtjsgT`^AULX`!UN z4pPBO^YfsFHOl3Lkd;5KXw}F!H5SQjtN=_seX(?HSgw6~*XwFv(j*3_~6kA zdro(|#UR}$5_s+qxoSrc;|$PvbG-A)BRN>P6-?25D`& z-+0zUkB64m`3<aBiGmPxPaktYt>mC3W zO#0T~n21E$_wvh^5bJhx{;ohg+Om&=w;chFQSQOV$LAU^i2^N`eJ+L(SWGc43U;!} zD{`0x%D#*e{^oR)j&>r5<9(qFY0ivIVfDB+P(zr6n`6kiW-jp64l1G&s9*+LZQ^kp zjQa zU0AaLt{~Y9pKMR1Ct5Qu=)gxIoGGX%Pax_Xd zlL=pq8X~*RrDNsprJkjS^bzgkYOSVU(BEKwr0ICdS`ZU5TTNHO(5|43^c&xE{~`Q- zxc)~{L3w8JaCAB!j|Yi%oo0ymYJ3kMq&wpGQ;0+6<9=0QEZcxUZzffSBPp6?8LiW} zY6hQG{gTnMOy0*%v1#}9c?O=ZYqa&+qSTk3T#%b zuFyA`3Pfeoa64*nxqhkMkEssk2o_0Dgy_*~YKfGmn-`qA5JVZzyShd&U#}6{@#C6{ zr*Rxl*EnCdf>;|*=6Nqy==Zb$`!d*{z($a(od&k(!W!&$U!y!-h@z}roH%kQIhzX^ z)cV|k)0gwH3W#5_Z=hozR|)hIq;jl?k|)_-QY4xAt4u23&U8xt(oZuhk=@mOrY)Zv z1o&g%bxRbqUje8I9AJLQ^1vu{dhXWo=Y*cIK7nTVh5x2r%^T|lW4}71OUX3CXQ1nX zytbQ;AePdTY{ZuC63b;kBHLm+;1A5j^Kiu41SIQ5Ql;{w>`O;Ux(5o5#;FT367*)PrI%&4Rnqah`~G8VI#%b*4EJZsJG^0+!84NJ9T zI-wq_u-pJ_y%6^B%4?`T@_E95FwA@bWDjI8t$sQ#S}J$L$Ld zQUHp7yPF(F7&73EtzJ!2_L?4ajo5itk=GQk`H}~~&J1fk-*Cex0W`a%<5spn zL)Lgg<1vv?#GV5$L`tfy+Hoy4Ee#5V221pH1j7>TWh&Xg5i_rQ06#2LzVu?{rU91R zCelB@00AXSt@t74Od+pZVP2BgwaH?zKhW%(iZkI?-a6iL5pB>$1smKmt$fz$ITcEIPZ4dokz4j8cexN0(YASd|t0~fc(?vR9 znQK0`{FqF%r(XI!=H3psGl6NjhIg#z2evo{y6!|+SA+)Feq{nDBlr@%%2(rx^0^lW z6Q`g%d6ns;neIchZ<{5+-y?XN=qC{90S5KI=Y5{9+XI0Qkc?jH^RzUpg|>kG1;S)<_R{>P;c4y!2>A~-7x0h&LP z0ygM;t4Zbtv9rrtlYVCvJMJq&_rO82NsS!%iFy|p6g?DA?K}# zot-Z2M-Fb|C#8YB9V7}XqV~R5o!vUW5xW-%TOYAd3LfP-9b|6wFgQ+Kj<#O0B9f3F z?9wU|Fn9-A=r`YQH{BI5!#y{F8t1rr281dY&M0haUFROg8jTu zYpGzi_!aIXH~|2p+5)RrZ*1amP1!h&d};=ooZY+CxbAR$Bm^^PN6XnT-uigI!DVk@ z5?GR1kdRjJs>FKH%t_fDL%{hD^kB=XGC>FufVhizsYAf!E=yvn3^E@t~W4I+_ulFuOo2tT6dW&J04| zyKuuV;qDZ300wiCXu#Lp+Esp)$paa$H&h5PDr{eV*g$-<%7S+{10{Kd-X*OTKvUWj zlgVmF^2>*2Ol2J5JmsB)f2B=Ju8g!pL2X*6mEjS|rnz%vI{1he-xStA(he+9NnNAUx)kY;I)`LG# zu)J(Cc#G!8v(En$668XRu@L?X7c?On_{acAtxZ&X?6o8gC{dbR*vV!y~mco zBM?FbLWTh|Rn}K#a#a2vNnc)_vj?1gfb7XNe&|TM4USzY2>)DHcP%Rh9)M9CH5`T# z?TQ)^oIF*CP9!)20P2zTwznMpCitr;!0ZYBT1Zt@Rdnj`9f} zLWlK;29TDZjT?S_FsMw+k{~Y?1`j{GYnKx(^l=ML39AX&*UpS1s1l-f@gnJ?mi=!!gRMaQHFq;j&WvF$aH zE&RvZ5p}#^1rQ^9QP;45S1V@7dondRbO%}qkCP+9LT#Rbi77(NfSnNQpBR01H8U(4 z;<9jE#@9PXT;|x`b_f20>3~M!ZW@_&w}>_c!{Yb;xC5vc{m?reU>XrP=zDM=xZO%^L*OsBc#k-mSn)0b01%V z8XQk4MGm60GvI_1tV-1r+(>)ql_>4Tvphzya;h5X^O!5lk0O<5s&bl>Rb7!6>{r3e z1+L;g3HmAdx&%zuo{2UqZ!ofgQw)Iiuo}UzPup-n#n4`lD9~xuq7%dh_z^O^z+XG@ z(tSAsNUd5KC<&uD(Bc8)%dKAo-1Ll4zlmNS^7I8U4o~l))3*W`+i!L2kn%&S&6pHm z-3Dst!s}{~v%tidbGuPam7oXYryMe7slWyS&gU)MN}M5jDrw`WVi)KHMhkoZxivmf zeE0?GLpWJF^jvdN{PRVN?H6el;D=BfS5N`n3(jV|Y9}}$ohxrF;0Ml|~xm21Ggq zq#F?hB?P3Tl~O?IPU&tzx=WB2kdl%Pk(N-pl@tVigXeqB^E~H${nHQkb?s~K*|TS6 ztu;G;@QDt{N|NTsVv#g*j!nQP0b^83zHRo%Xkw*O3%};cKp+>n!?@7-j?~1)*y7Qn zkxOEH?zIxC?VOVsa>B>3yW$n!<6jcBR^G)~%0q-wQTyczT1;9lgyqAM`Eu$!Iga$* zVjaDa7iF2n@X>uq=C2#K<%*bW`@}26p5QkD`{eSt6MGw_^bX^39t0hhi`VwRa=)jO zuS!9ID`vkwAtwmT$%Ob#GfXA~tBOLAvJh_6o?5RE#+5nmFR@xzg6 zm(PMhU)F~#pt@)PQw&Q~q5`BCDEd0S5`N$L_XHNAY)VyIG5VH+O8+$oC_LbebB(2a zGTI99WrTc&uHWm6wWyVxtAH@ji_GDYQ0pqAv-?y(#T#cT!P z+jB8o$}%s%RU24q;*?F|5DPhE^H8q5z_Q;%bApP(;)EWE1V*HoS;7?fg_JV;TT||f zMZNEe7fxs*f(hr)p!r>&df9Dah#njQl+rFAm&A6z2aaTQe0SI>`3K!81p(Hj^&cKI z5rIhpjXl>#shH)n^zoCsNI9ePE-Yc8xZEWb60&!tf4+m#4+>mNp05RK3~w0M!?ajf>yC`Yux{#GL)%LEw{RDsTK zS<;YZUYbxu+M+<{w#ci{oV$9@#ALxr`ego!xOU+51%l2?d$c%+Wb_8gK)KhOh?=?y zWT;2uIoCNp0}JmjeNHRz`!WChooE4*XK!N`hOHm@XY{APA3p$PD7Zf&$xqW1mh&S| zrR;!24g8W*^UOUBBqzvQhtQ&)K~x_~Ij^+y?6Iun9SA+{PS^Jk_&jaC7@VzZk<(bd znEUAeu1@8t`r^0rKb{uak{l0oRDy2a-;G)0YDiw|BzsrlH8at!5_?1L&fX`k^A8kj zbhSixrEXqP)iSDM3G$x5=Y7vRh|X#2qN7q&RJ1Qem=BlF%v54$+~uc8-%_7l+REvf z#N43ppbJv{ii#f&yT>jGOM4nuwwBckOpA~F8`U{SLs^J>3=xFUPINYCBJ|IH=7kud ztV`4W=+u6DP=FQ zNx7BK4ae_S#WRT4Uu&~gjvJ=v@9VUs$h z(v^K(x~yC0W8p7D-1SyEqtZ8BNg|7G==(LZa{dTMolv$%EA`n(y2GkFYL2g9FllHg zQe2$BpUL2?xDLChV+@h%;q}Me`Cdndc5jE}jUl!?T3$opZ5uzyYF;L56orTWSt6HT zqo@#>b?r>97lnsHp^TVQFJs@^x^B^~V2J(7fg$2)gH>y;hF_p9+q%$avs;$r@1=|h zdLPSfS}-7^3m51K5sM?^vq`*ec+0)0BDHPkcFbYrg19iedzeya6k?6XhMLK^bHnwrLXka7L5i+rTzfj>v#HJ+!0;;|wK4DL}Tn4o2%om#}z02;A-V4?q zv&oF}S4mra!3u`PFlFy*^dzJvlr%UOy`r zCoYXFC0&u^eRXwsWaRifO6w5o?o4d8bF3@*JpCcR7Mp&&_NhNT4oSb(Y>a`xjGV@a z%N^vy;Wye*FRB%u-wjIm%K1)?6bxdq{&dZ~AE|{ucj6B=*100PDZRG#=39ePB-QPZ zbIbPM+MilqUnQ<@XgJWjM{zA4%Y=nmZAvgQDsajQ_!*MGcu<|X1}OE-Qy}6hiyx49 zn6d<9O8+oYIt1Q4{RYs#w?UrK^GHC#zSHmReeMjYRO<%S@S&U~JN2EFeVuNC++B;Y z=Svb-!xFw8CB)-6F#blOtm{<|8`h)tF1($1-p4_4eHNuX&D2+Egk+pbnkqRY6?$YT zr-~eawB<|Q!|XF7Qa3g^?4ZVl0GAs5T=|@*RB~GawgB3={JL2=H!{KtJm$PSrsX+k z5A~%%mLIo2MBFq#J3AXxc#&RzepC)hu#<)2mGF%Tn^}#GU#%Ry^6=i~F~`+*qO4ZM z7n#rSG&XOIcAB;)iQvi~V)PDwIrqQzi-K5JtQDs`-{qIUaq$sFOga6vV11V*(y1gt zn_0la;`0u~#l>ko_J@fxp*Np~gj7=5aE)_b$=eBLMp zI@WME|EUNL?gnWMu+xX1Qu%zcuK3EPvExQi69T=^R2oz^CMG80;^OYrFLE~>8~4Pq znwpvdfFsd;GwBD&8X*tiIlX`XzI}>X{(0Sp^w?P9#^TR1nABPZ26G^uWO|tV)Y6iQ zf}%jbb#)Ia{|N~RmjL9*cDU!jw{NLu-XRJd#v?IK4Gj&T?%Cg$HJ7)=Q6IklYAuY^ zBN2alVWIWI-nfC6_{8a`wQ;;TXVJan?>)Ep%U@>zbw`a0s4ElDxQZU!pO~0Ch)EuF zm+3e5{2bLvws`-$TMJ)4`_!2hU=RfH09!}c?`uh*#V@OYFvK<16<2nt^24GyzpObSvVG$Hvyg>>~p$?#H zKtC`rQouT_p|!*Ct2CG8BFk{>WUnIsiB4r!n=9_;Ub$U&xN_8!tt z3>-3^vYl@Y*3W{yd(%B6CX;NbA^QKf(YevY?A}IllSSf5deWHx%zxnqBYCmMmAZ+Qbgu!= zN7p;jW_C_aPDDgS$0c9uNSBHhkGHd_){F1Q4AMtr1~Z~_YFE5oPYGGy*||9W5ea}_ zRegQ(@9}|wh#!ToUNF@qv<;hUCu^0`Ji4JMydgFISXXhy;b^->VJr}z<^+B+=@oZ| zE{v%a;jekeX9a0#P*t)}%^`Z}XXjSj(9L%}@j2uz=JJdBKZ{>clrMCAd^f?nT8r+G zzq@hjZl*sU7yV10tXXK=2yBakz2>y|8%L((ZXmyEwq3%mG#+I#&G^#O6Mi&6mg?2eaz8Z$#rrdN_Q0t|e>94E4ViWR%lT zx6RG(EKiEl!4Dm-;4h!ahPuT%Ft4&xy}c1d*vpSjiZqI$^Wp!RyY|!=wzcURb8SBh z?cDupCsWQ@`+;YbcS}FE**=Y$?k%w7vnBX6qFR(1|Mkj9#kU%|Q{Ra%6iE?jnvVXN z{vYPIwm4&{tS2k{^){2#9QUfS8Yfxr^< zSxG4=VUNEAMHbW+Ay4_-V!R3&q6Lbg8RYRL)Kyh4WngZuuBL-&522N=*9FMlj!LvE zR~;hAcqiz-a#3taI`4XCLpP;DFn_ZNg3tPFbxiL*cdQz4#Sda<_M+YFHX? zC$3!XORkH`*G#$eL1zZJ=_?ZtY&qrevg0qe^~CI~gvH3ssU$wLh<{V0u2?w6Uf*)8%-Cl%eXnTw_>~0K*X- z2TksjHYKH0N+DVWcaV6$6zhxnt-E5FJ>5r0ed^dW%kQx7V%W_{P`AG|bbXv18lpQI z(6_Kyn>QD7ep)x9qSzH27WM-EV`nG7S)q-qLv$U7v2DeBY9>hGe(FYPbTt{tnPPHy zFA3Al!}F8zW?nz3F(8goP#ljaaF8*RB!=FHtsvQh7B?(8 zTzd8DCq6fzhh}}CkcbFAJ|l@&Z%`T~W3 z4^4EE^G;{WpR+So-K@dS9wHGYtsjCKq3tj!8JYD|)qPh1B{{68-0@Fh6FxRLY&`C6 zqYfqG{B>|ZBT+8j%;;je^R9OGd)nE{7sgyqMO=FxkhCL{;{xa&GCJD6z2EugDgLKn zb76sUevm$bzHr6|4a*$h+B0w^23}{kyR>?cS@D<7%WUDXE0XW0h|XAxPJ-xd-zp(B ztfb+oWChKzFSd*S%qhzj%h%qX-3cTF7M~rbQ5C;OR97F&pYDLr7>Iu+rJR!!6Fili zmoh)s0Tu!R0zSSa%yxtkcYDi&b6MHgsKj0Qf6zVkse)ESeYX-S0?l(#5=<7f zS9jJkA8%VVh9=y8pP+H|k*f@j*}*=NLw+Dv*X5rLt01ayP{Au!gH@iNByjSu$weq`tw&Ey;7w_S@Nz zG}@{-!)0=!g8YWw$#qD@hx1aG073Ry^ctt$t}}lWJ&4RlMr^;g2U81L$MtpR+#aZV zQA%u@Ns61s-tg((L^cvJ9F;5@w{qltZfle~7+yd!iq@W%oKmfAz;I8t7?E-1)oYe7 z!^8&&XE_MNJ4vb0;ueZ=aIx+x$ zZB6lu*Z6Fl%*6-ZhXz_}XoE5aOz(~YJ|NMOwRQ@~Q@doR2;D4Yv`ZAw zd|>PM_4l*0v%73(QN6N&CKkuX&S!YyQ-%CzA!)GDJ=P2JA%@Thd0(VYIGi^TTk|OO z!`1dQrFD5;2Fdoc0Y#yi4np~$r;pWMo2@zcQB}^$5*I%_t=1V1TT)RWbYuGhi3G1( zxPpN&M0y~`b)ldOXmenOmhj)cy=z|a7j?4*f|$gs@90N^W}LG*OOr(E*WBQ%;-{2y zJH8xz&f1=-`|;z)W!w5!KRV$=tE;bvCQy4k*SigOWnDj%_zUg3&VuUz+Qw?xT?L^B zTh=YcgF@8mey+;q3o~Db!k7d`nG*8K8Xw#3Ju)by%7|p#UKVIM(E90ml zjFnGc2L_gDw;fQ)JE=WSDU|inB(Uj78{=qK>6q2EX{?6V_w@9DC_LTsgjGaj3Sy-T zTcMTSn zAn|zaYbs=lbtMXOHw2;MNX%g@#O)8##3tL@p$lzZPR?RFtF6-!bOC;Io6ACeJ2h~B ze?QT{GfCS;jGsU5EOfJ2x1z+a&sM6AF;Svo|5iNNi?2oq`neW^fW-t3FBMNR8c<@yt`|7G! zNvj+3K76n?(+El?Ao+~0EO$FQhH_|Np)HV=l0wvR6T06=rKZv#krH;x5AsM$A;Bmo z4Xcs9t=J?lHTM1CpA;3Ehq&e7iA)OiRDLAcD1_yhQ7{v{va=%&?O05LM`%Hx;60J0 z2Ix~#O`!eZyx_Ic3 z(&U$xMhHg~)m}N={=d`UkaC37X^1lB#U|U6Q z;C_pNA-l3Nme)jCc@U3U;C+y|+)@rfLw)_&_;~MGVs1>Xq`;lklI}2JPoXcvYPo?r zOOI2jI@`YKAyr<#xSRHARlA5adPR9Y7Ah#;p{$jh{ z9mU_F!Jzia)&jM=IYgq5f>c@*#->SyJ}NNKilNrHj-UW-_?tIzvPULo2 zU_&*_H8-kTlDT-CdkI;Nn)VOsq`j8{)g8r#gtHB!vivN}C3;KB?22FHgMW>OZ!Kcd2vN1FWzG*ndPmzNJgR6hXqL}B5ivrtQD zl}qsX`|b#Lk?Zw{B~^`0)u(YtXC5eYv<5DXXjC?H9hpP#9Eh-Z`S?scU7URhy0ZN1zDCzewjz=0@W9Z^sj_^VuJB(|_3pmhQnc5%{Z_OQ zw&&xe@f@6O#V>C@^wLz`U;Rz>3XAI@-EYm|z@;=2MM9Nnp-FayTt{XN$?uYr70*Il z8LyAEt6rz?(Qb5LDXg*IRafU@OY*_gZM{Nm|7O+F9O`A`(2L1nGc))rnj$wGpbyHA z&J`sBM~}aQPp2j#${e++o0*KgsdXdz^QTX~XQ3md>~+->5cIp~En9E_dsgF`TS!E9 zf!LE$K2_5~{-Mt)(2Jw8-T3Exq$&aD(LkE3vGVqC!D4iDG$$wL+1c5CqWEJrHfFbK zyB8(&*O162XrsWDuI_-FpH-wb?fgM2RKYm17lrgTe=k~W%pl8uZ8|a?U0qtQKd+Dh z*+z}+>|fAB1{Ajgb9Xj}BNlVi@lcOsCp{xx)Y4+5!{USBI#?D4hPOJJB z{A1?RU0#iLeZK4c-XD6H%!hA4L8mn^>-F1|%h265(Jb8~;Vd+Ufg3y<{5AYrT!EGa@gJ2W z3?4u3Y`j?%A}-gZs*X$ubWBM}xj9(-xqmb_TDS}}6DMSiG6x`WTLPh0q|3LtxjE2Q z)z|ARCy5`lr_E%mVn#(p!TJ8MtW2xiS8tQ`pJ_|4>(?wa_kt!=&{P4Wa+)?sB{(i> ze!kZVb^X`mRfVzkh&TP8O$V0L38Fa>krPO_fKok5Nb7l=Sy{>H+2!S10(i=$>kS!BfoZ z9Z2#Wz8Xv#YpO?ftPKsC)1T9CWCq`M{uVezj*V@s?0?QMX&LW2*XXp9R#)c&Es-1M z{vAx`*j4d48&U)5J6OBY6teJ-e}45+Qc`Xj#9gn;$$^7BAc=ny6eA;ivTu@;@eiNo zVlA>9lF-Pjx{%PM|I+(U!k{pS2X5J=u96q*5qix-(*eV0&klfLJ&?pF&qBgmrj~XB z!M?VR&PcSMuWta$;UaPix|NOUn{bf*Xw?iUW1+TpS6IHZbQXn?sesOjL$9}d*rZM!~ zKY;ck))p2EMnW-p`u683y{6&zShjDahF!YP-ai#kDr$qy)dVrDcmLO_DgFIxQb8|rn8SEU<3|ms4Z90qM zCLu0QFOke!m2?&%?5>s>$iAnXkY>X#iFWxm#Z?I4Y#WM7#>qzUPSss=S*xK|B|sPx zn5UnYrUABL$?+P;+dhtt{nG&|S`g-awI{a*t{5xd-o1GZp}_hL8G@n?f4%w|QjO6! zG}s8rU*f0%755nC(Gopmhl1eYu`jBN7@Kje5C|OYP{D-?k@-rB3|z#4VU!A1s^NBf z@#LOQ+Q{73Sg4iU(DE)CQUkz;p6b$7I;pWw;!zcWnM%TnzbQNj?M*>`V#OkS% z0rCJ$rT{yGNTgkmb)${Z(<2As=4Xdg53110^LY=l_TY7yC>$f_wJkDTTuUvI^ zm>$0B@LN6k1eLRr6qNFOi7d)qBXPugS%tDod!s0GfBHI)C@QaMGuk6l}xj7ko5MYs=4zkgEH zI+UX8TYM~qL*lm6C3oZ84}*{HFaS`dTVuhPQmk>#MtLSJb}lc+OvTq*5g976=4-w` z9i(u?RyVph8Pe1k?h*HEMaUn}9gbot-$2PA!LIm#g72&pcx*aCqB-h^a)(->sTVC< zg;YwEXQM_HA+50+mA7e}>2LR36{85%SlI&r15N+->_Zg8OvdK05$;j6dA`0ax>3KT z*sU7>lV_u7Lrf4QP@oU!=E{4il0q(m`uA18h+tcw%?tYym1SMqPX_HroSn(Z!;EL2 z7YC+F$|PK|HCk!GEIN`z421;S(=Z7E19H>|o5J$OcwkAek1D2JG!EabT%+S(NaO>z zh!ox9ar(4bCg#JtcSn)DBownepE65+aA#*-dJC;ZMq6r;FBai{?PwMv>1&vyyDd(^6|1=vvH8;a8lE38LR>FIC~y~THXUeaG* z`b~#wvv~ts-u_92&6Oh5T@=$KhZ&=>OZbn35DEmBKMa4xz=l!GkD5W!XM(G4!J7^qwc#;Hz>UaFUw#vxGhB+ZPEsAX0XG+>y)p8 z|LQCt%DG=w#JqR&5u~)8Ptl|RqhQ)%9WgFrZTP@;6RCmjoOS)FK9P=);tP%I^q+E# zn;0D@t#u1la%n~}=lMp_nBIuqFC{yGck?+&RhuWcRfoeTaFDI+*k1gAgDF?O%p{Muh4?c`*(P?Zz~_>Vw{H}k`-Ky) zF*W(KeJ6lPfn|=GK=H_&aPykQ^a?(BBpGyyl8kpHIDw1eI!?JDM@o$jW4AHm5qs(O z?*#q!f@_GIe|E=cf22Fd{v=snq{t=b+YWEOuoh**+70xTlSbe_p zzx}S*IQ{^}I2Cjl{M2`x0T@`Y2I(w<^8=+*s&FX_Oa@d(;(COD^_H2Z|r+ z`17QR=e9u)_BWsJ)Gj8fDkba?N%2O$AT6%_c{3)eui=9=^Iderd1w^4DmWlw-VVaS z^XBUtJFg9S>ASpSxCMC6NYryF50M|5QOgCxqFm%6lhY4s$k)VezqPN-1tWh?B_)2l z^&TBTd52Fk#wlu+K{?p#dp`jEUzXp=RU?owA}iy}U0gw4zxwSI8;O}Y#f6s-+VAho zSCm^rX^&%fMploL3U!qT&e%`+)ojURJP)ioqyA>+r!Gd-<$Oth;#~JFXy)Oj*1!OI54RT7SB4$UTV)k}F zwM+IN?0YqaiVR=P*#@T>JJJxJ7C41)Re*9%@_`OE>o9{ssK-oNFq@qIsS2N^iVF`@ z>r9~-54Opw_hI{N<5VX_tRTgBIJHfra0ZIX9c`(nACrExJd2Jl)9`Q4ltH9-pPnT? zFjRIwPX)3>mQ0m$=9a}M6oYn)OzfBC0<#aCndC7)7T=7U(AIX@u1}Th$&_UXsRaTNz*{wZV%0pV*zCx>CC&GyY3V$3Q`+KUr zCG=#M%_t7{Je!I>Q;#X>*ikD);;-qeG55*X`DV%z@wIs}l;NhCHPzHB9iwf&?!xSA zfN{vE@p8A$tU=Hd<>D>Lt9(8B^Apl=nu|{Vc;sEumv64vO**;d{g_%EMvJ{>)?hU@ zrQ&#!`J$E+%HmMFj$HAf4{q#j3dCC0s&wCQJ{WtgEG=#I2M=qlYm^b5IM!AA@ubKjD^>5;h zM{Gvk(x!7C`+Zl*NLN?a$w^R$yujZ7*QXxIjp){msd0UBi3W9A!qaEsE#cTDAfWO-)D(miYk)CS1*S>kn>cVz`HEV!9(RU+{=` zK-gz`zMt|{mwpd1XFk9gOmk8Ev^aNbeDFpZ?kskrJLdlE0O3RbL~!I_XoYs)EQ*P8 zjE0zo!X}EzpT*|JG_I#ceq&XPNMEH8b+B0Ia4*-I86ACno#bVmPF_A{nXW;0n6NdK zdpH4R3Lct(cUo3+vHW#`~X!y~!&ZmR%bPYBFY1KUX^w4?iRX{w-?)J@eYyqP)b zXM0Z4Sy{Uy^iACzt!|KxAj63IwbFoSmBi{D(4oIR^EHc7ao-;Ous@Ea$4%+oDF?KM zsHnIm%%TEUOc-pGwYH6W>&w5;+8HER(J{1MO0}DO!YvZXe&k)dXtf)OwEE&IeD&6A z4>2=XQ{OF-x`kZE=8_@ypvJT{KJ~A!^6&u#5qL)4wtM-$@OfMZA6Iew)4(O9@2x#$ z9Ai6M+bBO&M@w{z6YE`(8Lp2p^Cq14xXYOao_UX?n^6dE4|m6=zjCOCz7WNyM>ui#!}PSGP1|3fbKqiz zcj}X)!AZQP;*Fj2^R$aS$uCH^zV^NssFVwnmGqm39Fsene@-K4yfSv6xIJ-|*mGg^ zSjYIgC)bYcE9~2UFq%w*F#jYP@fsd-^G2;f0nB1!j9~j)?~<{}-g4v6+y#MLqu)<0 z8%0lTDE+4VO*ilg>;8Tzu)BYUyIQB49dR5+M8f@a!345vw( zP0WkwIWCtS@qBj8@1NJHsGG^JG}Tm|8bs9ng6jDG`Kh9!qDi;6&D?;9#|gFB3dWP! zrSBw`$GtC)lg$r#kCT<~oELZ~c@nutbTKfxwzl3$o|u!8Q=j(R%u3ikWp~@jc)0P^ zR(V)Q9e5%94yB@YlSuYul(QSdBaG8qvX1G-J5v+HatX&7lSnnuI&*MfZ&(#?E-cN^ zHj#um$1kSrccelf2*DBw@k=*abLCBkAe$IY@TA#k6YLyfM$bhPe?FZW&zLwp>WG>B zBRE$D_?9|vecv88m?FcC8|bL5fc%5@V0~lbRU1ly(e95UnYg>R>2pdJmXUg$2)p;w;k`K}_xf{dX)Ql}(bz=LCJpKnbwC=M9}zWgb*&y!Q!yTo z(|fW?dNa8^MPRCxSV)R?&C)#2+-~c$=YN0H>a)2mbrZCOrG_6r9XCwU?McLb6$veG z!q%%RlyGHhoAs+Qn8m#4>_erX7)qOXKEO%;c)<7*=}s{d^oC6>fIu>iP?4AH&}~q7QX- zJe|*f=dZ&OU@6pf*!`k#9t*zSwhJ){HVPg{zm~Cg0lad1Tk=gB2gh@-=4A(aA z!-bYPN zVU(xN{_*iog@%&S&BR%w5AQU8h|Ae*e1lw%{PBqjZ=%tTD%1`>a2SdW-?jMXBLhZE zEEki+Z{NWEB8;78FJxpY`F`(n@o*&9y~@)lnL7q<%pBOeP!CgJl{no*>u!EN#*wEP z@M8ceJYf=<^gfVU#NUT`$HPgD{2bT`OgC?WGzU64XLtKhHEODNQl7GU!MvE^h22C31Zx)Q-eJOzd0?J&2^xVyvH2tA(? zxj*uxPSQ%AmyeCvjqJ=erOK^wmj%31IK^?t!|eO!t=%MKe+ostTr7O|qAI8PcL#?~ z2P#XDs69j1Uw?80F(L3|aeGkjBCRGG1h|5;W{z9hj8Zm<^0-)eO=o-|UJNHD-bDW~ zdz`;W{mjm`Z^P^2+ac6Vc-S(`jLXV@o*JCn{z(;`yh!_cq9o8LQ}+Dv=E1?)4{&w> zTGQ6qFARIS!Jfatz@VHa_8ab6(0u$jtGkuZZYd#wwy%$g=X7lB0#II~H7B)4`1dh? z?o1ur>P1`H+_6;H>^}bAUy@U5v_86G{Y}Aajpo?2@tO~qhcxO=N&g9e08qe zmmYQ3|C(mXPRg?eNw0=hX3Gxd;ZVsiudUkrlE+LB+!c97n9i?uLEdAiDV5%?uM%|x z7gX2uh3;clM)VAI=qoGhTUl8?=u5-ES+-45p}xv8q`vda?5G*FbsP`RtosT(+iNf_ z{f0xDWlmR*<-Yrcg@M*{16kpYdCIl8IMZC(B&*fqre3=~x)v539*GiBDefw~8vLgX zvVcea{?DfsM28Xu5_lap%+=v;(>OnCgb;i~ZUqZROw1OI(29h!BER7Ik2fqm;RMNB zx2j`9D*_X@^Tyze*7Cwwjh%2?%e_!g6uq%`qw&+s57Q)F-ycs%U<)!otk9|-g(llg z>h4>@BbEzfkL zTb-vTt&a9XXhGQbBl6;Vzo1o2%%Po&Q&4CT`!DiiD%>{>D3$Kf;_Tmrx~07?B$xZ$ zd~C+vI&MClT_A>vC2%K{c${E1=ynnF5)o3WmnA>}zeVbzst> zC2uvpZyh&m!F=9We7>K$w-ed?US>DcphFCnQC=cifC4 z881u&Ka`E7&b|!7Gs)g&kE6&r>sgmy7dMO=5p+`-cIbp|ySKb&aj0P{lpN{m8Kakw zBkoY!M?i^H0&~dC$PcoXzkf?-PxlcKBz{aGwc~$|+w#wUe*DjWVi=(D7;16D+plm# z>A1BE;mUnX<&DzjiXv4d?TjKNSFV{J_b^WzNper1Xq-CrlXv8A^p-9J4yMWO?rjPl zfq(y)g~*Nc6th&8qK~X`TkV*_u(f5+3L%O>hBdv~$Y{EqA#N5C@3}Y@}ApDKX z^zG9gR2t8vbw2RXx3S0mkbxR!P{dE-7YJi7Z~gcCgD2Yf+*%4+WUpMlt%Q6G2a$#M z3CDZFFA`;6f8x?_{T;ysp+4e2vuD`yULQedMX3Lu|MV?Gx%@Ru(ra=A-Qm0mqLJJk z??3Z_fSNBY+G^wF<6ivpTXmw)JO z?zfAIXiuBc4d*e2BU1!%`Q!kHV#Scx@M*(p2>1)URE8n6ayZo=Iy}cGg#gdm*}R$BWR_SQS(^@okeQ(nmD6>jxe) zB`qK9%ARbS)Ok&@Q9L82p~0g?xDgQ{`oQ|a=f#~w%JITs$9JX0aMwxPObEm34T2d zDc~(KCHS?IapY&6KdKd9A2m9@FHIk~dUx{5>?yH3E(_!HpR@P&Rk+)}?7WG0Y9MCwJuf`{+9oo-R{W+;G&KDyf2W7+ zUZwt9{-v#^0kL+%L(XrG{oQ7BArhXb3mChqn?$+O7i8j7dBWV%v@{%clYwXiGm;;* zaA?t%$G6iZfRU2pGFKGxW8Re_arDwmJ*PrnvcA-uQlMkDDt zXt%Ro=OJ-Ao{lSqbJ{7g{0Q6bw*SXa_b@R|Vd1)h0=Ng|>O=eqJc$lh(u>PwNH}eH zxO;n+>b?G*7yBfLRSZZ#A^L{8sd;uPfE>#Qx`LL(2D*x+>ia8J&YsE@)%r>zx4+35 zR?xZ16Mn)OA}Ajp8%wyTjUE2+LRrF8_xlB-D`H-f;N%2OC#dfS^ErH;o}Ww6 zZclt-J>>b)dhrQ8$so`t#3_;^EV7@dpN$l9W z&TOEK+hlcd3T-T_Ih zQt~m;AvJWY%zT75!`8SZ_wVZpRrY{PutX3ZR6AANq5hTl#qFH!%^|+9W>}#|XNaUq zOI^~nXJTjBLK1&$hh|B-ji7<~rQ~EZr@owdY5b=Q zvm{ASx2mvl-in}Gj)W2`c1AG&*B9-1OKcVFK|KA$ro$XGIErvLFk0x=cGl@FAb6p# zddXBi?x@ixJ{7fd{epvTUGq6ctN57>{W2++)~^f`oP47CIMjf(6Pwa_(sx; zJ43Da=DZ6AQ6o;b$=s0?TeX^k)0$XWk~0FkS2mk5@AA#cF+L5DkBuCbh)+^bTFS3% z`AU*O(nR@3iaU#)Jod16at(Jaw3tXH-m}Ho)%u47h6xy z`r&WvqxB+xc6?&9$6Db!4p$?=t~+BgO8qP9e}r?sbd z7{7w6NFG0a4EtL~1~uu|eI#My&gT|yi%2VVBpYdb_O5S7#L`4-zQ6njg`&&BQ7aK z?0I9kP%u{VXa4`s*iX|ERWw@0!}`6|P$Sy4KE|inF1v|ijp2l`;PY2Q5ed-ox8`fM zlY%*CI`1<7zR6r_^L~T9ni?@>xfVz1BAzDKVrp{J=BGSybpdDcp%=7y?0(GUn%#;8 zYy~p&&pS<8mKZg^r@NifUXbe+UbMLSwC-h;HPhb~G&>jr+VhFxDdybY1-Mn-Wd>;Q zmy&TZSBy+OAEPLEby*G30m5Dhm^M$GBuCK@V)$HwRQmwnqz6}{wo(tQpA0s{JbmrR9?*EdKcHdRQ?7lAvrIsvyoo8wI?8I)$X4dc69M4s&Xdx7f4le=(4@F^b!1p;xD&$r zRj_zKh4*J*R#EgJ_Q@40z@MoSNNHA)Y217xy3nc$k1}w}7m|sC1S3LwmGllWL<9vv zQ8vL~eJA${XUf}((BTm&+B(2<4mJv~GKi7;2kF(?1UTs41TzYQ*+}|-?Vwa1oVMF2 zKpVJ-h|D?KZMixmkKO2d2}r_kdeE|YFG&N4)hLj)K*0QFA$gR>@B`E3oZv%}Pr|}Z z(3#Qb;|Z0TI02Xs2`kMG(EPo@B}1f2WtFns)aPwrGF?Z7sFW3ud8>-AMUV*%ElNce zTp=TI-j)_eaJJ#jw`1@ks&-r*K9vAGels-F+9 z$XWo>20#-kpZfLUB%W9Zz%mi=VJt<)_c1fbU8H~#y^dmzhdYlhg#%>`K#)M*$qI7? zG>v6kEcgx!idV0LZft^lR15JJ!#1A3HADq`0;8)AhydV=qCRqp^NAXP+HUl^Y$^h5 zFTg)<^ZOme#G)u?3Y2-iOIpQyEORW5O#Bv}){+dVpJEQBZlG4xl;vATzx;~u`M1EM z043{o3K*)<4yYczT@h_~{?ZDv<*!8$R5=ZW)xQiMfRY3N`Akg}z~uk+OO2B$(MmF# zEId8~6wer-G5n!6_W@I*Xu0Zr7wGSWFe+I9)>Ux3uwlZ2#Pww8KeX5>gFFv60ZnYN z!sav7Zwjz@N}zail{4j56|OgZ9x)g;SLt-OSd0@u)Z@{Y2IHQ|b`#3Z7<|SkNFAnJ zeE5yKiUeD7`9}c(Gz}DQSFlw8>GL^m^t)PtJa_yd;__EO5Z4C|Gptwvb_*?36j6m*xUoua`Y#@%)(6lBl&RyGha~d!P++as*ddI8jj828vy0NhxhqCWfe%Sz z(T_e?Li%?g|6uP3L(ZT^}X99rU`XM4C z=(29@#!3v3k3Jb;@K1@$AFB2|xlbyx(LIvC_@kbutUVR(pr&DjYq%Okuga2*PM(JvmIPiFYGZw)$@{fR zZ*mJ66X)qJKhYz%I8*q`aIAozi-PV~iTi6S!`*nqKq!E4$M=>uy-Ya}7R1_rTJVr& zH1qT_0~0>FvsdS1@P1k+*25N9NkTW*KEI~RX<&iyFlyl6`rcppII24<&R+*DK9aB8 z0}u)H%_dD&*#Qa2x9eib?bGzBa4%IlNaH}21I+fjr>n91jmqmg>%lZO`Lm?v6lm8-0$>Ji9%^NgT|PGI z?6$)gXt@EDfLYGO?ep=2p?Xj7-2huOZLk7;*1({)h}{3>hA$ofSN|Utyfd_;g=?@l zgWpuy9q$^gcXme5oPNULh6|_z0|OzT1ONhfddtw3o0llT5$>(zg{!`n(HI#Sd1>z{ zD|`K3N?g-BfvX5u_7c#$1A%+T!O3}fC+SITR+cf`P^eyWm6(Y`x2B}9@cuT``~R(n z4Lpx`gprL9Pxaxe_*{MSU*guHgvioxEG28OOnAghRJfON z-D`_8Df3X(ZefCWoa;2;=EGMHlNy(MN_oQJ1`9%=E3iy=}!TA z75q+b3JMCAYdxTyfO;m}M)g!;w~ucSrsGlHyTZcX^L_-2`r+ALAlly8*hun%o3s8u zQrPWUPz&G9S2-mV@gJ1Dc(OfwQ-$`yH_ol0m9>xCvovoKzr!tUK!Ck<8d;G}*K1Mn z#9Lk6nXBMz0*>O{Flh~73jq5pPM^cOutj~J=9l&$yH8Yh%Ec-Cq^bOhpf>>P;VglgPa#E3r%8e| z_dYgN#Q%&t5qt>Hihy7UIj1{IB;3~;bn&z|!K<|JnX&PweT@fBjNiK6J0U|HNgDP4 zi^8ovT#FQQru!`@Dd{zwB?l7j2TAhbRE$Tuy!lcWJ>pZ%vEGM=_hXrc0r9c}g#-XZ zN~ik@^72?mv>Nu`ZIxqlnMg^~vXcsbUQRb%xHKcg96-`y^Jt;CG&4lFpgX}xn>08P zphSv-YXmf2u` z7&B*Utlq}Ps(q6My*3O=r_uE`##~w&5_@b=;*?|WhC?{yN+F*9#;!6lGLSsMowaaH z!Ofi>_vfGxh#9_Qyo07tV~QCA9bN2b#tu5jXPg1=R#!*I6U3FN?752M~(l3zcj(<EXkNq%@1b=iQJ0w@i@WayHl3*Wuz8 z;dLU1|C2MBXXo348cUAN=3Ed(A1h&bEs6%|m% zeAL4sL7YE_2pe2DcINd^5jIe66OfxCPHTAlF@*Skqxy`W2soJuaLFM& zh2981$Y_U1JhoEV!WYq@%EbIe^ygDsL&U%SYB0(LW(x=>b(%}96r-3FA{5#CWY#Ce(M6BT#DIqgGF>CEX^TrXdHC4Wk5(Siv)iOr zz{mwMNsq&kBGkj1c6v(RL%@GG`aiGWggl4c7h`l!@0W0UV3GholI&<}dIfAJ>i?DY z-QigN@860-luF!WC8O-solV&x5wb#dsAPrPERj`YkB~h|x-6jZxdggM%&)2=GfvjVI-a*7oY{X1Kz}rZ zAx~4l8B>)>sQG=56Jx_UkW!Wt6(=SFj4g0EYb%vwf80J!{L~kWUaL7L^c5)fLg|ax zntyr;$yXpmmRPQ(q-0msh5j*`nh~tyz%B_T=29!)SnziL!31%6s~Lde?6;C0n3|fJ znQ7j>ebEh2iVS)^3~<`MuYh>RpU%8E!X#U7G0d2l%!T6q}EVZ z*A9J@TA{b1Tm}ZN<&2nkZM@++MM&7QU6GD6#uHpYYImknX#NxFm2^wvB)_4^?Rord$U?6UKr z@Mmgm@;@R76iz`w0h+)9?Fe+Z5Pr~iA<*-XU7@+M?8*G6;$mp+lB!nM)TFXPdw4@W zbwvUl)ZAgO5}-#ZV2i_<0H~cG%LkzHf4Ci-E(2U1E+3Ut{kdc&gP#fRWrxl8Ecyhi0cn?(=9%Z|I-_t$|F|r(M{Vb=qCNc>C;{&3T3o zMf`Ed25t2}0Z&ZzBbPXGkG#a+0M8!%>+;4qB$w|kp|&;7_CR)N;-;clLw?<*=Qe7#>iVI|hW8!j;2+oy>s7~2f)aCLVN z-MsbrL4UQEt~~nfYpER2Xkxt#_4k_VAG2`!-JV~_ytAfC0@kg)Spa{8&22mRqkR1~ zRDrnYzk;&!F~1LO6CZzw21Pw86ZnD2%XPespuGhJKV@WOR8}VMah`C4XjY{Ak#O8C zF-USkFBIzr0C;}<80KaV_kbzZ_3;fO&$Vu-75EzS6BG;^GRzF7dyHXWZ>P+-jWwJ; zDO1$lwU69uUYqS@>t)GV{Nl36xQdaL^&KJ$>p)-Vf6bAUtNy7wR*%4pU(M|8W$@4B zuFF2$0h`2XNPW>^{AG6JU6)D-^~|J)nBTrBLPAdjJ_A3udU8AjcvG+M@I+XJ2D!{w@1dtWwEedWTmBbPlwn z7vuw&v7($@TMO8qfD~?TZ@J;z~G9gp5Z4vbU2$Pft%0+62Sq zojKu$H-+vq(Dj%#IYp|YrpDLH7KBbP!vEynLhHV$b?gwu9B72DF0JW?^w(QKsS(zwa)@=PSA+I6I7)F9UTl9jlLeFxRoykyKm-0W3}Cnu3jU{4;NdGU#FI%m$7pDPRto%ruggy{;I_vQ zD_Hco*S;zS7dD5JwE_Z--A550)~7c-KOaX)b<`_bSd)yO74QQPU-;JYR2hjj!4;1` zVHz#fxle87bS zpgzOsz#iHj$bs_vx>v<%UY-HLkB)d|K^Yu{6D?z8rsnq%2qonba08=D4>TciCsK-5 zg))4THok+2$@*aMD^-^>L$eY=xpW2EhZnhOVmG|He7_!|J7sP_$lzOsXmR=>`i00b z3xX!V^aYs<4W0wbh&&7m93#)&&Sx?+O_*I!yg@KiOJX5Sx+i8rom)+Ch?D#opnP4M zz#7mz??RIRP#HyJxb*Z{%aaq9ie1P$T`FbV;NFTO^g?Q&MV2 zpbT108$wFjN4&gT8D7w4+_-by0HRuSt8o+%9p87xAQmeGzzc;$8d4Y~ z6kRf}q~LvwWO;|e;@!cRgqK4I?L%2IKyGft!n4=rp6)B5azm4p<8TU)&jdgKY!c19 z8M(SqEW-G9M44g=buKx#FeNgD(xhTae#Jg=1o?0A2^ z#zywr0*vL-JfxUI#02i)6i>o;0(09?10RA?@FD8IkVsycP%zs)UJH2ewEHzeurt*k zoe>{-g}(+NAV$2fQtY5yS4H*-oI1jGaaq#W&SIqj!mLWaGFqI$8@%}Rg*`Pqd||!n z(o z2B~E?``P2JcWSH73%7I~k$vJuhCca)k?0FEGi@sAQ)E`A6idfCT;W8iDz^45KLNa5 zWa70o+K@MI(J^o&~*iB9$}>ZaqF4O#(0EdW4P z_w-NS;7^LMQ+NE8`EE}}11nJQ7KAc0?NSj7%aT&@*qQr>Ya)HW@{W7Iu-p|hQLW@7 zRT4D*9M@crqKVMN=o7gz2T%)>FgZgArUnI_KD6$Am;qmP8*23mA4g|PakqR3g%V~k z=&CW~VlU1eJew=SSFQkH;0+R`IGn%U)L*cCAoxoHJ_&3@)!PeCUAQFqAZ&c20{Hd@ zQoK{P@xhP`{jP@{DLs`q1y$Lg?gV~IB@YI~nfS%Y2w4Z&eke1BUBd-@Ka?YK1eYgf zatF|Y5TwhN!lmWoB5I+0TdD+|vK!y3Up@_s)FnZYkjacLc5=UrKD|o7DCEa}MHEI6 zAbGEg_V&vQJiWsd8{2p?8y1`QAW;MP-Kkfvyc$}LW2FPl2%K0|*Ikd&N()Iv4EPZ- z1};J30VX05M}DwtKKni$Y5rYzJ1AJU-frXlz=F%h@-J3eGHVFC_Gnr@p_kyvS>^AB zDu}!=0Df^x_uF&>@~M%G|K_QKQ4o~KOl!gM1YlXV)E*FTT)UVK;0Em!j&GZbkyXZ^Ja%x?Xdv^suuh;svfH4TQ@aN_; zqfujNuHT^Q`Ma;<@b1!u{x+zneC0iQb6N)u@L8ZQ6p+ljZaEaIrw%k1FgTl}tFRMr zbWeN33qg~=b%W)IJ|-u~rD-9C>5!Tz8l#FNia3utpdIudpgA!)vgMuQ=*_Yfqi1>~ zjpzkku8IBgY>3UT;u-wn0j>Z+2}msbJHocJz&d10WR2>bH~&yFS-PVUoTIF`|>bhY>J^C^P}5*puN1OtCg81&a;ktLZtxw`7a&QIl3o4&6#>-R zAT#$_0H^?q;r0HLE@b9|oFtb;5n(ILC~6zz|2yMHYbPu*4Z;fWcyVtcMH{Qy84lMb9#dF$rgq zz#1tmd?`%`3C5PR6r1ZmYI+uvnAd!C-qWa+cy~**tU+)k^fY32= z|6=5TH-Y9Pe*th;`GkWQWZf(_uTn8i8^I{ua$9xSg%~+yQ7S z?^p2Ht1fH5%N5`^v>Wdt*zM8>Js<-FVwEZ%4}olm@2^y4gRX?717goGapU!qIh2;a z83hjq8Tw3`US zPNjV(Y!~Fpr4xmuL;6B&pf+r_Nf$!_?@>k^=+px_R~po|hcGIj<?<f?vlVDh1+-1a(BYZoGo*EZm2;se}E>yL^cp zD-;QCRDh_SZB&$rpNYm-{o=%pJNk@p{zTo+7QFRom{%fdsnG2G7qiCxx$SMtZn#HR zBqY`#;z@I`cy0@7P=||yYCGh%B#?Xy%3g2)<^dKK$d>^7=g4+QO6P+FW+p2a8oy)P z<$mqKdfe381SM&+2e(2869OdZ;KHv6xMnmwaQPlSc1f&W%~0P}z?hp_7>!=Ov-tX} zN>5qD`+%zExWLH;c_Jd=r3M%HuFW|Q@2U6J^4c4N1$ z^_3o$aEkO6L!Nl-xYwd|h}>NJ6Ct*sncrQdtt%@lK*F(=wvmlh`ewL(QgrfDTzrV$ zk2iXYr?@U6UpehgKqCTJbYLTes$Q_N({~{f5P1^J+Wa%=?FKZBnIA`#^R0NdMb=SI zp1r! zEuEW_e1A&!ldZ?OFsW1_?st&YylSVLv0cjdh$;`G<$K=2aR8mb=BVQj*e zlKSG#EFKI5httNI8QH1KG_byM^wn%F<(U{pI!} z+h$i7a{@qGxyfEd{0&aLmt1TjRMuzqqU&aK_zOYG}JT?OJxkefo_E7bK>cuoOJB$_7dy{Hh0`tZUQq#O6vTxNn;|j32=T% zh@L@^0D721^I?cqN62tNy`2A9Gk=Sb0A&vRiSJH$R(CVT(waz4WVZ0tYji8X$WS4{ z&uO)G$CBO<(&`ywF&?J`rb{%2A6Nrg0qY{frpz{sz7WBC08S`2y_NYq=(CFZlg?>H z565vx`9Pl=@NJ`Xnu4F~8vl*3b9i%7-U0m^v~9RaoVmXFJ+KyOARQlK@!XhjqVFopVWK;jfkMe-)d z)E`I~rV4bi^3?2eJ=k;P+CT0cew39#$>w99yL86d&Hg%n?C4MR+7T%2U)pL(aw4UA z=rl^`Sh%_HiiA{hp-|)Ar)&J9MTLHh;oH-I`+aB#`C_PXg1kA9IzVd1{5VVtOO1Fo z$mPIKRJ!l&?m|d!5l8d*4W)D{`kLRD)b7qNtK7bYg6o)6BDp2P_JNszAw3#y-!$A; zU$cSH;!DS$a-LDUwtShD8RbGLOCZZig5kO=$gXoe@a=WGj4#~rKA2Cq zX&^pK>tigse)Z8^Og(Luh2z}(J{_aTx~W-aJ?72M5>~cmEaFx7Z?$ zrf}+>AEHTN|Gs&)AHA1rITq|_wl6T9RQs(lF4+46SIKAl(Gb)E&(3_1+|@Tkn=7MH z$vPayJnQQVyKC~OL`la9a9Gx**66%OKC@JakV|_thfR(6GGKen`phQxI~|Lq`nJDO z{$A|ephxU-#bYJ5?z6VG=k+Ia6l{Hpc0c4cbCDd|+#rssdGZHU z6&IJ|o$0wYo5<`)1Sq{0veblj!m+5vCh--6pEK_L7L(AL-o<+>A$LFjQRrS2Uz@|P zpG#F-?$`*vKnAI)-;9&>ujcMox*C10Z`$w9fT#hM*u%Nf&QlU+vI@hOKNCwMx`jO= zJy+7cg`MnKFXNOr$#-=hKiN1M|1;0_!C>@I*(@%9-FH3RuyU?aq9s>!rH8GsWSJlKwk4BZX4n=!3Oh>RI(useY6b|I+Tt z15$L!M94d?(CLy1bM4U8=#BI3A}Rc9Q^&CzIeoU))`6MZ8+rLJ`W?NGAx7l>{y?XA z+BuJmIV@_jKB;W&YgT?-5{;VHUOMtk81&CCrnAF!^V5m# z1}*)R(95`wP%MK1TNH{Hv0HP=A984rjjM0^_sL7EzegzancPPI)N$>XEDNTR6_%?z z*?mjmE%AHmQakdp0B%1R#G92-PwcG^y&c-co9NDe{x3YYpT=ggZfC)5w^0{0R9%$^ zJ)2D_zFlwWcWgl(Bfwr-N3`38b$9{>Qkw)mP9f^oB?zOj$_H4o&KK}@LJ~YXpKP~HS`DETp z!zqFm`~9;~M%>f}rB2rHPg`62;%g=Lh>Ekw>N(Vcb{vD(c`Q-<^v+@~R$1&Xvvi6a z`AE7P3PeaR`V1h$0 znH9e$hMV95aZzz`R_8QO;sR5+_PaZNCpdZIjU8s+e@d{5`nhd~9bA0*Q(*GfBL;yd zANLcI9|(C<8yB+fy^xvkvzMQo_Uj`Htx>9Rt;w$G>)jEkI5}yprLSL#H>)Ey z2*8cXatTHAf8a0WSI9uV`qp;M$jr=^AapiR4(12jmYo*l%TsGqmqMj*rv9wLxq2!ERDCucQNvDoBeu4- zDjBzdt&JJMlgcvWN%zUja-T|rcPPz{O@1<|NZ*#wg1PL|3Cfua8Uo7 zq5aR-{$G9#=LX{R956ZL2*Ipj+AwRx);mzI1X8_ajXI!vk})V`Ih;)7a+970N_~;gjgo!5qWZ9G@Q;v2j3_y)BSu(evv{QjP4Q zU*EgG5h+xPNvmBMnm5S~zFC}C5F|U1x4o)TlWg*C1}%XzDX=~QbGbIT4tksO)p`0T zqw<}%Xzw+O2_j@HUw%!Ky2xXTM-x!<0eNUe-#Uye>7g^OW@91+QEk zHbX~;iE)qZ;&PC^zqa67wj~^;NMX<7j4Z_Xv-!bd6g4#zi+wV#^g`|RTN559FTr=H zAYH!)W~2@@y!q`*b#*nA-g!@)f_?+!XIw`EFI`2W<9H8t;3yg}x1+#(Up)q-p-y6ns_Y2~ z33POHl!3L|yUp?FcTnrL>?$#L`U#nM!oguOPlLg3K%A zP-;L^zcStkdF|{?U2W}dz``=F=fgoR^GmEzWMx8D`jeW|Zq(8RO^ooclP2&h7w?kO zi#&#UBdE5594FM-q$#{C9S>K5!iqW#euNhPA> zDEvdkV<{0y=pO|X@Nki>j%(Y{5W;xpvU3r5X@eJq+}G!z3kN{AMGB2UQx%q2NgV-M z3}e`TDrxdO-;cI zsTE$k+r*5nzsSrMpONtfnC53^Pd%qpBf;uGyGL#9C?(eL#`J%lnCi{{8O>u*?OR$} zs>>anob3IpFG0f1;hZ}td^(ZG_rO_$ejW1i^7reaqp4RBliue&KTr=&vAb)`L6O@6 zK}Ihyu0x>U@;ao+(a_}pjxqezgThC{rBHEcY06hS2PZ z*$qOF(l7Hx-bFJOI+{Qq7==(q&td1@A4!tpj(AITUENrq!5!e6n>fmIaOR9))204I zYEtv)Yu&j+J$#e-`Q5(-&;+Rf#6|e0hhraeL4g6}%S|s=$AsDBXmQTq!XBqg z1qBDkENGS|H*=Z1P@VRpIK0@*b3T9mth$(|U&N_j3vCX0`fs_OH>_Zr3*(R^etZ;} zusgiZ3X=~d3sB&A4!JAqysA5wOkKHqx z3Y1eld7N}$TZ-u|78MowMZ>MgJfpu(2_8X>0rkOkc^I=m#(UuE9)yhu2n-wd2~4HW V_6y0-UPGMV8}e#$A7t)5`!A2hDQo}$ diff --git a/doc/source/images/functional_data_model.svg b/doc/source/images/functional_data_model.svg deleted file mode 100644 index f6e71dd..0000000 --- a/doc/source/images/functional_data_model.svg +++ /dev/null @@ -1,600 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/images/sequence_architecture_cdmc_sync.png b/doc/source/images/sequence_architecture_cdmc_sync.png deleted file mode 100644 index 16e00007bc96bb3db7d772ca1a505eff188086f3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46795 zcmb4r2RPO5|F;Guq!gmEM-s9lBP5$jDC?ArV;r(KiBj2QuZ}%JIQEWY9p{)QTW0n= z#<8CJpzp8m@Be?E>v~+S&ei$M`@Y}f^?JSEw+|0h<;c#_o+BV2AXAXP|CoT_v>yS% zsk*Z#!8a=(s;a<;pq=bfyO&n4oGpxu?Fi(IERAel*cllz7&tRP?d)EO^6}uVnxe)p-QL!;P%*Q z-+-S&@XO&heC%a=L}Ke%HbcZ(flS=L3WBOenL^=6}y5= z#r5i%*V4NeZ;H32kOfyKC>$-u$6gBb^DhCv1#$W zM>hn51auN@kk{$HxxPJRD3zUkZVp}h{`Ae?p^u;TO0sZ^r4h@t%9`i^CkP@+bND-Te4jNipF2BAq7>q{69RI6u^oPE7=RPNEz1q zYu{HMH4O{;DIInu#Hu4inat-K>|51|Qr5yaj`E?&9f+8ke;lf6G&|n;9GS2c6!{`D zUjIs;9|2*b_$7&;y_JE1ghYn5-&1B{KhNVXm0O&kef0U~MDs;ulFZYk-^~?Vvot#! z*fCCpTHE9(-xo%_@twk$ec_L`ZOsP)o}8m++~1F1a^I96U%Q{c_vyVJjwP}+Du5ER zH~>8>kvhp_(K}L|o%Z(KMd625_iQ>8V{+Cu(FjSCSbf5 zm1eY4IH(O{e)!)7&H8!QJOZ1a{?vc7Z znB8j?7x0fI=6b?ST8%AT$fl@O-5qdbJ6M0CBi2RatT|=6`+H=gYEVQ>%@h7MBpSa2REmK zDl(-_NYdj~H!x9gC9mk_NtRToBmV||&ib#ma(JkNdrLcN#4_y(mda*_+ ziyB(4(aFvKZUblkYeGihRXCcQQl#LO)$KJ^(IKf6!$ z&usK9FTc{#ik!&xZmr=YhN(5zC|(&Tx1U3$H#NC$4wblWE~cfiQKAfuRX-owd&652 zYG6{Vjzw;^RV;|K11yaACArvgZ7~O5f$emAy)U^WSpS;|&24e_jt$_2{8c(ShoNEx zikJz3$krmygZ(OX!-}uSCz-cPJ0f zhxOm_#m^BzNGSSbigb*q$VLmqfcn@XMwXYCYin!!3QbUPBI8u`fnO?>C}MUw$JKUI zwcLWko3aew4$7TAyi>rTg@ABFL5u%C1?``Ra}ekBH})~D#_vD`g>^eAE&cNPDN$p;g& zs4Lsdf4*xkwZtVvpcD3Hx$@$6x&C~8K7$iYE4ezEAg2@icweN;koTxBqU0LgtZ_T74M$6U@0)iSUBpWb^&$S(Tct?>sE<=7zJRrdgx<18$q! z%&s)wL@GDJD$;v6z6M>np-S`)P04?!oaQ&v*xLIjBS{)K@j}jq`VLQp=J3x}$)CC3 z3%$I&z#2;I#jEhSC)#&}EV#1%xu@Y~vi>|!K(UrcB?v_yKScqRUWWcDSf zwwUW=xRwX+7Nm zJ?qKqogC}M%_JeYWZb9rKvOzQzIUT&U|1zii+iU_$I86Ms(UQj$-bqPXK(65_C6$9 zeP4X9#hg#({$6hgx07X%uWyYjW^t$@HYR2w7;0s;G&lE>p7&uKDd{gB<5zb4XxWD8 z=^-#1O^VR#BbkxHw&?w$eBpQNk%^`&$`4rlv8pp33+pe7^Qx~-pF3Dko}PJM&Jb{P z>*~T^b!X^5$&8KGd2>f*XJ`8zPjtUj?$F<}d`M=`%|N74I3ef&4tWg73LgpnX z&dK4$$CzIei|pNdn~4XT*F!FK{(dua`3l~eKGIwpBDK57f`F<(53qAu-BnmM*zhHJ zF?A)=!m6z>-{HONS80`j?!L?{DR0Y0r;xG2tu#}w&Mun!)v7XvUX`@q;k!Ra-fMR1 z8EU?S^4*yKM+3iFy>WY3J|@ccaohXhrDR^WvuMW<;Q9LjaS{WcRvlOA^mjF3qbqG( zDdL+*`fAsjPYvH|?CAo0f{h=(S{pXWce^c!O=e|L($1||%wkU{HQC_dyAi;_`3&(}5jC^DeZXuNK5kCu{q7vLB<{H&8!=ZdMa+EU zTN25uQ)Sx{2s>~K*gYt=b`5fOS8IhwMMhpPnMNBQP@Q~Mzd8xw zEKpe9W#hwR4e$<|UK2y;z^QuMDvu4u{c2UO8Tj`Q}xS8}NuAAD|L`E{EPzT*> zD9JC0TXPV}(D*&!7yA(}2Oom)t!?NbeuI;<^k84`#m?yuyZeU5Bay}t=-(Agx8ZNM z-#_$j_$KZu#QijQ(!3(Cs`ts$D4vn01|sn5c|RHQ_78N{Q8yFH_Gb9)Pa>Cyc_};F zH*2i7Vmw`zdIOgd9b&V)pkAIkS~Auvk&Bguk6pL6#$#=5By#q0(umm{g@2Xpy!dQ8a9%n%>p&iJ4w}TCk_*{@UGjp7{{D#n6vW4ouXw zag=!`0i=Os+P>?ALtiy5WAHgU^V6_k z+5O#VYPTvP2<&cQ%nEiRV4!O$M%+;s?Ppv6&PwCSWz^`U{?f>0ml!)DEA@m>=)#7P zG+m44$(gtQoL!?>H3{os>E*3z+~=oat%4sX zSW2%I5zUMv78J3qr5K+nb8BxQVj1bMWS5?-Jf3H(&e(?=pXN_CQ*^cq3QqQWCT$Jf z^(8-0E?+W99HNttZSoDicHlKH=#6>;S>Q;N=|49KdG^chQcT{1WgRcefY0;4x(e{vBp>wPm>nan(d6HL9_QAx9 z*HyuFxx60YU9gOMftmM&7B(};4Q(3(Xtp>v?Vq%lYIr|xi&~Tma+}NfatnLw6r@6b zqn|lwFcryj$>;}7w@f#dooFP|txB$h@RaE}TrR_gUfmuaRXelvyV+T*j00$J6Z`l| zEPB0if2Ob&<&B)*)$w5w$h^|L-XD}ubY7a$3%S9&keQxXjd>nV)q3X4nV>6e3_oT= zm^aP4baYD0YF5GlzBoUFDl|6hmfg%bz{mr_07E zE?qj=cXyR0B5cicPtkwi{$3?xaFy|LwAt6Y>zv(IE<}>9XyG^3gf2pFJc89qKYa?i zrL!SfVg+GedgLjA{GZd;E&7qhGkJADscj0D6)v{%lWpPPHLFl-0@+t;zMq1Pc_!s# zluf*pO=)`@&0ZGmrM#y+C#wTnXiCf;ex%{wa$(p@!D(J^@>&D_o>#GW6ISludfD6M zeffUF=uT+fCUiM^GW*z&_i9?}9qu>T^+n)tD;onp<~M43t3UqMNh@4?JyCn*AUql4 zlh!t>&9^6Lp?H~9U?DcuAL@NAV>avlc%ql-oNg5^u^Yw(?9pjsJ}iO1G)JnDY@fV# zPDxP0cgp?Nm+I6xlI2$azxil_3jAcp%tkhcWWy8n0=Pcl=D;!c|YqiI;O-kyl4M66P;7msy%VyH4^Hz%gf#JK;ekfzh`xc4mAdO} z{tuz%Gf|{`6rZd_y4=MZ$pmw(L<;A_>Lbu@*TbbOz=(fV(>-}|`QV4=m-EZDWs?yF zWLK+MnI&-@HJTbj6D5&>z&m$ZKdzVGbJE8X)^+>^E;dQ0m~ ztioStnEDp+ESG)P@mxrGhpPC};P$D!yE+N0I6};v=!jFY?2acJjELjmp27f~-YDFG<2E*rzN1FoyNa3qrl~1CCee&MMP+wU&{+dl zpC0Y`>*Li}{MyXA|Zt2TqRkMvnz);y=R5ChR0iDTlA*) z&ny1cz z;UFzN-4vt#P5@q}cc0U@4?z(l73;$YZ%%*o>{%TEBwgJ8nkA&Ov!aOSQV~LhBUNkt z<|;VXD@2`a9-G>xGg>K}o;wRKvwn2c(A8AP==>sGygZhDaaZ%R4)bqcRwW4@UvJzg z=2Znx0PP0ty6jIBFCfesM;Fd?CJ>pQuV;@K2#A^_bgH^rTz7S)D9Es8cQulH#^KKg@v9w6qPA*9kWB>kx{#0%db z=f<*_xZtLq=R{a1nE^iSjr>GAaU;|!dp5nEFX_PZ{0!X2Mgqp?UOTrpXrww*Gx03D zhsnH`du+W@BvU!W6YFWjwCrR2D*b}DU{mpgV5@o9!aneaUJujjqhr7&JMX^DkW_J+ z%Q;Zmsmr(1w`t4PTP{p=mpfnz*bKxeAXs=892qA(?5&5vS=VylC>M_MN_xk z4669CKa`gMI+L(Sc|JjS`LeF<)-q~P|MlTiZ!GNOy9vz&{idlz5+tr_pc=zPhA=PD z553lXPV|NXRAbRrdspAPz1K;DNKgHOPPh+Md*aWDNux7PqZU7xb5c@T_xDB)xPo2Y zC+#lc^5=WoCJ zb{Ro@D)H%G*QgWc0L@H9T9{QN@(+s_{~vP)CTCS|Li zRg1m7{NZ~oUZ~*3KTe$(_UqeQAOz`seJ*u6VTlROi{`kECyaO%fL)JmS-iiCJ<_vYvn{bXjwVzK9a>n(&(iIN9D^IujtEQC5w zys)%fjOI5PGzj&dAFk4&_}uoqNLxus=?oFk;FO5{Y(&V!u3g@udXX1jXxw-Ae zh0)4v&OZkRbRIuWinw{>#=~r^uA;?2F&8tljB%LfWYzD&IFGk>b#~`SNiVaXIz=4d z(^y+OJ*3Nqn4LAF^kpe&Dix~A&wnHx*Pu!O2M^C?dG~OYYtbeXXXGYRNax(SnLp%{2x)7p9^jzyrOqa)v_ zHKL%Ppu4+UC02;AAO{FW`^&zP*XZemhna{2j$ZGd8~CmJY=FnIv9X!0JWy6vZn_-J zJ?=-v#kUpy@&-lB)L@xyKZ;?sNE);0O?m>PM>P;7b<4dkq%?e|h=t(vi*N{OHl)O>F!8mlaf@BCMnz4a3DsisHhaUIxy*` z6s00lCsX%!aJZjxpYM2;aRJT9o15u;(EDg?7z-kJZhl_OW4CnviUNx4EVU`j)YjD5 z^Z+mSU%a9E+|m!oE(7-y z0e?Cq_vy@c;*Ubwc+qB^!0uwmtNT7>6vn-34^*TF?pnS0wiM5*|K5|1rOaJQ0rw*~Jh{P*^;L=cUyr*dJzkph&td$%{`=sY zKi57a<_$rYR)-0lnLP0jLGb&;g|+b7p$+`gULR@F42QZh0g!$#2nJpgDWIx*wf_52 z3o*e3Z{mQtyQa1e>CUo$pjx{?I@g<%97iBge2=x^x%4<)CPL%}xsN2AOB<0v#BYxj z!~~-cj|QXffx+KHQ^Yj*3-hXXy08em4&wUfHKqMN`)mYQxz$hFfRclwgTn?EOGZwv zu17$x^O@p!B7KB}iE|K$bsz}lLFatC%nl>1;lZNa*CMpuwQKnSW%tVey8Usi8$eUo_knw ziLK8*2=6oG<+`~b#buFzX+K49nhQ?Is{2+Fj@%M#+t&&VMLR4G_My?}{{H^HJ_Y*| z_$7M%iXHLU5YF%}+Q`^!#r$k=v=q+NflGstuI zr8AT-T4MJXdEV3fF~^#n_!SWWlq4yvGV7jx{btR9d&Llls9l|%hJy=xF783VEbg7X z)GtOru>bfqvC11Xh!5)OVg5?~(RF#(FnJKqu?AwA5SHY{hxDU)*AiW3ITQ>c^<8EV znv){3Ows-$4g)YgkDY~*)>5iY(&3j3#0!mAt-j>u>X+Nim`JQe2YIX~hZ1CR61+F* zl7|l3kr0eVaGz9$=GE+%Z=enNkgK8s(H?z9k%R>l>0KPQic_W*7ID2YIE+-ta}N!fHYWORB{JQP$t7?q)MuTB%R&1(e~Dnm0f zGx@lGDK`4Pdw25L2P#ytaam@09m8VXlRn49QP0d2_z?8GDuTZQlbaT)aMeey(Uu2GHsj;`We?ndRgK7-K z6C0i%P0CQook&{6kHF8oZm2*hQoA{L@=!v;!Vv54lx$(mmL*pG2o6Acw;`b(T1LCH z!*S(O??uC=u=;_=?qM5&?jIP?+2)$tTmCY zn2IqsyZzt3eF5R~t`8Kd@J`AA_rgy`pQy|F@A6Ie@v9@jQIB+W-S)7U$Om~>t>VIv zioL8y=f1D|G5 zwPm0H^MPt%VFBb2$}aW=wj6fY4K7epSC&0;_2{sNym#*0m`MUfiA(>S;0y}xH_s7V zAo&v^31mrL(-H7OhzDWx__%Z!YVWZIqwnNGK>$5E{NyAt*?%3(c>d2pX=N6H{~7i1 z`QSK0%R?DWe>krVKEQwXr~U`Zx4hojxOcw3R#jL{-})Cqv4G(!DJlkDzbh15Dd3%I zzz4Th;ri>9D)~$KjisdOvNW=BJ3B^hZf=$N`PM6A-$5EK7PD{CJ=Eier4Q^)B}9!ec~>sf)p_j8f0 zlhIv+tDU>sYdsd=y##@9IXT&H(HhA^PeZd3TR_?iqVDx;$7%GQbj21PKU)8|4vM>) z**%sedD=yaXVB%@ldg&675h z)6+`9>XHTm(Exe!%RlqL)W%C+VE<|yT3naH1gN0 zZVYDUqlTr@%(mXC!xAib@L{(EXe*(_ICcV{LGY1^K%Bs!n!@0h| z_8$ui7{|w;qg;`0-<4d_~(v3EkaKqLU87Ujobr zzBDjMx@A;p4BOtLGBYQqrLU5csGa6j*x4DvUi zI+7ihC7bK(87qK4q@=KL!4*m$kEg38Mi>>z^6~MxxVmDA!CK{1R1Cmut9$3MM)w7C zGYKb89L{XUX^I#}BFD!0-@k8gafPRRijJPK6D?v%GvSLy%;1b7x%K+HyIGi5mp)Q* z4ohOf(^$fYKV3IJ?Em ztd$$*@pzvj`p51sK!|fHD|IJte0Ts1%51DL;3^3TNlzY?TA~Eknv)8YiJjeKpcsbH zbBs6nsE&av&~-t8Qj64w5K)qf-Z_SyjMWs{U_t@Y?6Uy1<=%tJhe=OdT*{jbLrK}z zwiKZhs9RKi4L4CfY*`?hQBVvnPX4w>R_i}Vp%Hi8Z`%tBs^Yy5A(EK=#~))EbC*by zJkpi2Y(1s-L5EZ4)M)VO!(fMxX14Q=1LX4ZVZUCx&tw->fKDaZSp3?s2((2Hr!ns+ zYbnUP-rBahdJA)nGgn9=^{7wg-76mf8d~l$Tz(2`++)0ffKq4X<+=;A@L%3q*&(3q zKm;Hk(A*H?s5?S)?qGYZvC0!mNZEXTKk@AuiG0L%7^VhPf_p>+`mbFJ{R};UKJ^-8 z9Oob6k7X{SXeFGG@V3sq4-nkh9f3o@vNBM&dtfP5VRRX;ry(x~P4cr0IiL zZyAbzV0mEnD|Juug$^a^vUq{y|Gp?7LHxjMAy?84+x7QMz~22g#vYORt3BfwY9C?! zd;e>o|8E5UkBk3PgV!h{`moKVnCy*`Tjw21$A+`kM~ z2n6DjMTX6xcwVf_jf$I!D40Se<{8IEMP(eS85m=oYPapCruUfC!xg2ykRd$cwibAI zv8lbiSUbm|!o4R;gYUalZ)jWuxCkjEATYs+!4nh~g!~U|d`REa(9jSki0uW2A244g zZOT^H)>xY|>3L(6+42fQ&;~`v(J{j(N(DN|EEqa4Xd&c^)OM(-Dl2m~FpzGXge*Lx z(AEt<^N(T6oE>ljQVobzFJHdolMoPiqNf+1n3$KKC@+7yo$H;U;Y5dGDkDJ*|4C`0^rDg6=EdyJ8;iFRpaw5qT)$djMGkd?IjZ?H5q>;3}zrb+P_gN8|G};0Oin>(6+f zNhZb!xh+?9L4g|!dKfl;@Ek~Khtrtc#mJ8}TE^=Y4TP0*Z1uCpW4G{{%y-;rG{wi?+7sJbKJ_*5?+ zqW)|ADf@KV-rHs+Va8o5P;*S~P=OJU_DEFD;RFupG#-TVUIHyQ__-k0+bjHXhJyB= zDf1GNOX&t_blnuNT|iZ}q{nfp{nr4`1|=lpeR$h)8DHA?Y@0X_5(R@ zUReo88(%Q~7U3!X&-VzLv7?te9st7>rxy)dzStht@Vp%;XU+W+lo64UK>-2I z)tNonTI}rXt)xaqMg^vdg!rB@g8SyuVXsoDLLNTR&@lS?p3igNEwWWmL<9toN9yX# znF?&fHmzJmMMdE5)n+|6A^ff?P$F9@YHD(|kc-YOusZv>UXZ=X$jF-C;Ip{Sk-@#M z_L+Kh2Z$;FVD52`DV)>*aKJxK!d<&JSq0V9^m}0eE4U`>>3GgMwlFzalk^T zgmA&3La6@tZ|LL46se%i4lu|hLAbr0-KU5M#jnk6ZFZvZ9+zTTCtFac678H>lVG+m z1rwBm8=gmj8+LHCBPe5XD$#woBU1&Sfn0 zl=-1yVLc5#K0csv(RnDiEm^Om7y!V%7F!#eV%UJgkUeHQ9)F|uv9Ylb4*fitfl}+2 zFJ8cLk0xij(u+ub9^d+q5-@=}Q3rb*0JmSh+$!vgi;vIQYq4INLQ?sCH%Ejy1NfrM z7FPc&BQFnwVqQ^>2u+!GG)7(Y7BM$B@5!O%ym_K(x?71&@;qjM-E4!-Sp~z8v3DAfRx1$cuD)UXPD@O_^24}-eIgiZbQVI zj=eOA%jeY!X{KxV7~>2gH9`d6y@*af@%hZXHm#GO9TgTB%#MhOiAhOGX=rEwkg;T9 zoXTt6Q(E-y-2sFAl=SqapZVisko`$>*gi1OY)N)KO-&&_zNDP{LWYHXVAKPgH1za5 zJUo%x>gwuXxJ4S+jg5#;pPuGu7WU~re0UpxEPFqFlT+@r$F7E+dLQFcY>cEO<_Bmf zG>|2WFbWM2X?GS2A>hf*@ygF)O0YYx(C=#zhKRJU4jX>0|76UPZcnVj_hW&#FkZ zU@(|aEYe&bSh;@QX?}TVbvwK{D<&4->}9CCkELXAM-m+e(=l%N32=+FT$!*NlUPv% zAqiE}V~S2g(6hn^a{<=gVjqhB%i2xNhYv0J1yK0?6D8dHG2dUhfI4Of2Y~baaRtN+ z1tmbeH|-iK{%tOFlumxjLzBwOwUYudt|AV0b`w)mAa(oJTQ5~@nsr^Jr3DIko3Skj zb{m{Oj@XeqcW7`#LIStn^u$CH6L9&I2C!1katyils;(JbqM@OgH3F>3{O_k^DB#so zknRqN3}^`U#^+98s)r+ezpXfW$eg5436g@C4&dt{RU(EFxkuE*+P>O6e`vV`!~wl2 zZ=^sK%j+)(!uQwyaha{-+;E&?@Peuq4FMd8KGCytbJx%N0`w&5xew3*pnoADA&lg< zwzhciFF5e?=Ti`Vzg?yY@ioJ3Z8?GI1@T=)rRN~PG~Wd~J27GCob~04g`r^s*D(uo z76N60;wCx}#~y+7$S^^?kQQqx>bN)v*1L^CO6g#EHakzVPy;WIP?D)}aC1lanz^W` z1c-H~nV+x#w$66|1k8qp`(S}IG{SYHl@t^n@fbgUe$SQaPz7cfz7*r|M(J_%fJ|ZE zmqwV74yYH=MqVVgF%tuX3B>2vT*BKGL%AwPW^JPc{lmHv-9gJah|+-a4>5(U$dt@z z+wez0P=-Q&0PHaT{CtCX8@!F`ws9pSsYTT!g7ES>!4n_z zBVHPWX2Z#ODXwOgLQc2eqACWR3NBJVO1zL){Z;Z;Kg9i|9)qLHY5aNM_q0fYxLLfY>4ulsw8?(HYvdZhY+Zm>Q)tT{ zCxp>ffYv;gOaiLaV*b35{8sh#H$otLSS#&`BZ$^C8rhLxt|h(`EEc1zWeLGP2%p~H zeQRLAI2kR;zbEWJfb39`@F6;OHrV<91!?_BK;TUe1IWM8+Mj~PaT0qSm3Mlvn7sd* zZ8UlcNut5i-P>ziG9XjparB6AKv7_UfEh>yJw7Uv8vQtOZF%T5ttYqTp2P|$vkzB= z>fv9R#AFSmKOkxKl=`QnrQua^{GdW;bKZb@W z85!^A&_|Q;NbgA0Wa5Mi3J4e)8AT(ffI76G6&NdQRs8f;_2T4HfF7d@Y$jWPLh}d6 zx^NzXd9Wx)@%Om0^RMW8j_3~%xycA>+xTn-hHyCF_^2AbPA~>4XX82dcdMuRDmlDs z+`u#(Mw_FJ6u=eEzLbtle_5pM=jYc$FGG@~p03(#2BZy;PxSTm)z!N!Y?}NdTbi4# zW20hY?+z8IaKg7;#u*i%6QYOrGGK(8j)TPOIB4x_5*i)AZ_<9}S|DtoII||K7ep8U zN7#u~_(H3#<5bg?iS?xUoyPHGK^`hqi-$Z05^xSGKiPow|3)RBjbgzxMROh5NdKE` ztvpp#)rc7i*#{3EbT_Q7uHtYwQ$mUb`1HGVlU@H3PX6v=Ni~HI6b^XwE1&9^btK82 zIe!h^%P%CP`RrNrZP1PhkKx`{9DegfOi-{0?AHliz`2iC7f%*RRa(k|ia84_D=QmY z%fNa+ACXQ+3F$E|>WD#?%H0Hxpa!KNSS%e6+}(AnrDYYCbPfALH!x^K?l9Hrjxj77gbr=&P=iUt(Zxrc8!?sr^E$e_CuDzw! z55DLO;md{S*xMIz9~w$2ypeiMvOLTgtoFiTfAi?~okD@Eag=DqU9U>X=EDP3SR z-Z{T@-u;zm?`ukTg*{wfZRK%oXA|K9JS5=mwLnmK-{1GxBL$2e&lZ9L0~IA6jT;yC zS>0I=M^{=l!HgjT;e9N%rCjy^R76BXw8#Un?eJI?`5Y99NR~mjprA%TdTOfaT;xpu zF3|vXr_mkOS}5fPlhqHOo_u4;_3|sFd+XmJ18ijI-vU2CjVrQ+az`pP0;+$`sy}E< zR8*8@T3);9kdNqxB?zrqw8OXigQdQ87th(phw`(oUAyz@-hQ~XH|FysCnOKKHJIZPra$;7po6$i56(ma^6xbKub=ZORv z{^4_fOd$TS5-iuoF7V-n2ja_>!8TQZyFU}^r>uU|__ zv>Jf|nWKkDvotNWpVJIqS4|M($hH|+e?v$z5w0X3Mg~+58Y}tL9uPthn0fPxM)pr) z>E@VNh5NT|-n0SYt>H&y%+zLup11I#xlRHx5~JB|5AD>&&7lXi8<#YX1LgZgkiRr9 z#WWh^%hLrz;R!aP@c`rjb{F~p-@|~c23E+)*=ivKwlSJZN<6V<0!&@^MPz1|cc_(Bwd@E0#p1;!4v78uYVeus^tA)>KvL$?$ zYMVOUWBb{oM~rz=Oi2S(*Y+$i7`&C{NtUhmWQCTpgqm*JtXt63=m6vbTpn!50pSN^ z6Y%>VoXPON1g@bCsaTK;&8`5w0Z+hwQ2s|tI4S}$vBRyG?Ekhlumy#GzO2dq7-jYk z{!d}RN`znvP}pZm4*FC~`-sU$L?p zT=d#W4XQh$5X&raDJJbum3MaUwJZ@97FGjILZx5>XFMQArqo}ZQ$0@{6Eeo(OSGHv zsmP0ohmw3M2eJHLSrQR#RPK=Zv%sizH&!`AL-$?rE9hW4INlreQnl4%XH-AOaHYw?f+grU>)s9wOKk~xT4Y!6) zArXv!94`Jy6G>u)lU);EX&Y+++m5T209piU9$gXlthq-Z(dpx1@P-4v?brQJ+QvogE8QLJIs2Opunp#iNI>4hNZ--PuPu#_PH!`ez&z7{slV(VjWP zr_~hwxY?y)i0<}p_I(remJz{J4-Hm?`&2echz^cDzDW!kF^9(Q)7)Og25Hy)+-;uQ z)U%;v1fnGa(Ir|*bvW`O7qN;;85mq9()GNaev)e1-n6^Qzs4s1*wpsEmULH~u?8|3fPI|3IoiQLwdePOW{2H8VT= zbpM44>tXg@92?4Gia)Vz>?3Dx#Hi@t)lOIH9L04P>Qthj6gsk?R#SsqGZ1dyX%HQm=tF`1KZ!eo(*R~row}%y_31(F zAg)PHLRxxUhYTN+#>C+Y@0|J5GJ}g97W&I_&Nvst;c$>p3Jb$to>@12t%cEdD{z|-ycj_?Sm}URCra$;;f#S>r857H40 z#F=1Qp7U_3%t8NS8xPs)cOH*?(CuVs$im3T$j`4HD+l1j-173*HeG#vZdA)IzPRVG zs9Lix>qpgGvx=2XqX!tGacynQz3O5g6g0ecB+HQr&59I`@Qe|yajs3HByFesrwgiw zKPOan1Cs(#6z6iHx)Fp~P~Epwy3XUBf1 z`PavxSsa_UU?oFr<0T^=75?T5ZKE7T?*LW=`)ENo*U+naven!785c@w_Hycf1!I*o z<9a>oXcJ|^ctTdcmX&Rz;WB(ST`IyeChCy1p)XJKr4w)+pd$$_L*5M9krR}4i_F6P zOAv^rEq;_3^EjVu?Dp0QzMR`@?4VPlXtc_u*$8A$L*3rdxrO!3&FmWC=U*S?@k2;Q zpM1b<7d1XBx63@78xy}ANX;;Vh!W{qT*9YMEGwUrlk1=R5_NvF1kI_~CC*xh+r!lQ z*{a3dnGXV%`iqnT($mstFDK+-jCX}jS=f%7k-JUfhTvVI1$etz&LOha$5SHSfhk~9 zv$J1D_7)xa_HlE0t)uVZ!*?BC&$5Fh&wam zHYuOqy|P4-r;$Ef;GtU~8<6*H4cni#BUDxF%+N#E^k8ohZF>C~o=t6Odf&+daX}sm z)Sv+#x*nd=w4F}_g;KlL$y6vr-i&Pm8sDr{l1H1GnmW54=^(8f>uh1swxt2;X6Yn~s_YT|b&i7HDHz?V55UBRyI2flQ|yuP z@{=p=a0Ri}rlu%sv5G6pdDr4q)AL5$%N|Lt+~nqV*va~3Q)XChmOK643iO3%&5a@BjceusFPAyxQ22QzlKiNoq7uHc zxCr`n9bJnnalCMx_2-QqNJTD3Emd!qheF11HbOt;JqNFZ$!zJw-vALrf79>9g_g?y`02SQ@d^Uf?mpapJBU0m8$jD~(rs2(_hx zOuDKzZY}$;>2xfbFfaox+oMJ>(BI$9ei*{e!s0Y+Vo>*na1}5A^`dkEVT+5K-&{O9 zdpgE#X0!a!wsEq-Q--Tp!NNT2@omZvf%;3A$L7e&i=x_+-vnT&8J4tbh)SH5+0>1V zr{FTa-3>r`&Ay~!R$Qy|l=;!a;lSxBw<7NWDzXjPQDUUluWc7tmwRlM;!wX>@ZtCz zGjuo!Jq+<1l}B~RTN~yiN$Z zgU+tX%0WVJAEHv#+28@ZzICbZ!MpyUqjTAvPd70Y=`d?2MP1-d7w!NMbIj!wKPg0-rV^r7A93-&WP|)F+=h5M0ECnTT6Mo&)r_T54c+*58DGGE z)*D~DXF&ke^LML3&2)>g^Ldqub|gc`3<^GjsTUFW&+6hk!+{lw!CBea@%in&VLB+6 zl;Jh*)QpU#CA*UMsmSD-^RV&8fVXeof=J2dfm=6Sh!KVmt5ggt!&-q@fWnMAT#u<_ z&Xq}4J0%ar_m~5v46mfBO8a&C95o;OQ!*SW%uh=CW+9~HV_OlKjG3{#sFd({)8bRK z(ZvOhL-o6$40wRELgT*}a=#1M#l7dk($31l@`^|9&kz_@F9H7mPoWA;@N2C-q;@x| zCjkNm5+v|T@C!^6mUfGZ#P#%{mmL}}6VtKJ!2*xolNr7zJN=#fdfoI{P+K{3@0KR0 z7|OL3ivWia(#mEP@BHI3`1W>S@BM?=l>2;tUIUNg$50d*SGMX-qE;HfzZgkk6m zUsD!=!;}fjf7;pAZzm+Z~~Bk0NHKY?=ObM*8jR4i!3WEyMFz8KM>Od z6%-V3lH?2K#?jnsKk==NcGk02kF_*4*_1)C`?sSt?7o`H{K5j*ZjrB@Q}6`zP}bJI z9*DE#j9%;o1_h)~U{GMET7v~xVkQ=NYDx+y6E?k{hz2chG^Xa}VQp#1uRtdSy~g1> z04V~z2#Sotuzv9OUtD!`3ylLB7^5X5B)A``kcjA;M9hH7WMpMJo#x-g9S>?p*S*l2 zMr3BX)mmk`+|LYwc&D!bsvwwDgm2f7=@RM=cBij4Q0RszYQ}~avjQapYaIh;Y zJOn76O{5f6Pb@py`4sFxz5KA+1KQ)N)>e57l_MvTK_6Qm!pI0{XFW?}xE%T7HY5^E z7S^q`;0Y+`TT90-_ToeF;o3{bqJb_3kS`$ZeEj?%8^>&QtE&N2-YgH^ZQs{V-!>4F zU3oIDZvmdiGG)Jl*J{ld7Z!TbO8=qpT^SzoG+TZv74~?^Wz_tHJoGF~x(iWM1e#43 z@Y}v{SbUQ`Xk`en^~2D8<^^t~E6H__x-0 zy!>TXavsmp+_->Ef?^%$L%j)6kXBMv?M)LF5^6agn#bWG6Ip&xy~89F0^?0O%%+QLL2N}k4^{=Hxh~k z!b}lFwf=EeADwhzeW*J z;?@E<6HrzrN=&q93~7iJi3Q?D6tVYx2)jT=w#4-l*hD+)0ZEu)ktl=R4p%M&cnaq0X-OfdJ~b zj9r9kFi%%dXq*lD^-ISgsg?piXAJgdHs{Va`ISom93GwAS_vcnkOm;uV&;|pKtMHr zfoR`8*NB=vL%vzV0SXIb4naT+7xy!ri!HLYtaoIRVb>CVae95WTcY;++{JOo^V+`%6U^x!*p?x-fDYUEov zJ8MM70xl1t$ppxDM!TQ)_Mn^*5(sYnb2UH=IKmZ<5FvvZ zWSPVi^jjaq+*P0{!4fPJJc<-RnCrPjjjT=B0T>b$!V|$P4a~F#3X}~H1NW=<;FE{A z!I&QDqi{a%RzkOog(lp7b?8=Vhg?{TO6PVk$BChWYa+u@xV_V8A%rTUbBFsJAL^Uy zHW4^TzUbp`kWTy0@&y|$+UzY_AO6k!`J#kEJ6f%^A7X-wKZ-|2lkWVfz>o_SH@Yuv z)NM!hX)8pG@Z8L1%JcEU&aLY7o|sS9TjspB_O;+5NeN2^02JL~q%Ly0a4DWAz50^0u6lBZ0OI_;LDkkfWb z8SrQYw2QYI@rQW@osHrdFP3i5q|eR!SFWKJYUX(UycxBsy$2^ zq-L+hcn!KS#=GI4++8o;C>S6rAwZ*Q`!&a9K1N)OMqpQ?K%jP;7NNjlOVyXjs4@g1 zLyE|LYwFKlkI*q)gesPl40@Enl2HPJ`hslE*DkM~PoFskSB0a`%NBpbzZ3j@i+n#wl%{b;mt z?kS4s8Sd1y+r|hKl5H8<%#<=~rG&Jye~J^vWBbn|z)+a9(e_-e<#zuOK|jA-cvtv* zL6G?%(Fn?MBCrZ;Nmlyt;|B~`fK|)PoB@WgaS=R|5 z_ok+$-JuT+qXl)pt)_?jB&#O7ogfu$BDRaI4(QTZirK4`Qa)=DHPq|4*Z z@ftOS3hx(ZY>(S^1Xy^5)rMA-2yU-lTyq#ou4cBo%RmlLaCiQ~k+ttRHe=_~lsy@O zBb|N}y9h3mB)M*{kLtOWkZ`!}?&R!kjFuA9#R)8*r)3ACPdbY=RdeKDzSt9I#_2oi z>XBPRcmAos2)nZDV!>hbA)Ys~OgIFlIv_pLx|att0!p6}dB4WyN00`9fxv95n8 zXKDq#43J$5?4?6mLw5MueLs4By{%FSXwb{)T7+^)HO<*@KA0@WeVm8g;j(LnaG+u9 zk^vPJ6_B7T+GD+?2Y4V2%+F`~lICki_I7=?1xE#wh2Jrb%bio6M4HKEj3M}Vcx1R6 zVjfXZM_#=mGA*%U??{@1Er992vy>sBbn0H|X(L#gB{%GS}7D%Sso{ zJ~?8sFd<5{^I}n<;{%Dxnp3W&n;zF4=Be9*DJ5;Vc!s@8=nZ23~2+>3DF54*BWG9H&&GUR*-^#_U%O zyfwWa{pihLbQsTakaHdB0`uE=TRcW4CQi16g^Z%*^SIs?fyl(j9y=+(z+(@o*#}jh zO?8*#Pad(I)83x4wOe8@T50!32ltI@xD!bbVy8?&GzbJHbl{ZETj2i6I_`9V3VN{=!Ip4Es)bi9ih}zuYF_vt3)7H=oJ1oyb5Sfn9{`u{=WL-1lv$!-4Z+->715Rf_D~ z>I0Cf;a$C`D9t~_jjlvQRV{ixm)cs7AR%AT=GvYg;k{fV(!W>%@^frsWrV4-?vdi+ z!_eq%j!7#Fkf-xUqhpTrMFgWaovSL90%@ zObtdMBB8_mXfjMB^uQ53_CtNd3ng4K+zux52ZxQT0xzCF53LFO!8|l)3U+1=h?Xyt z_TGJp{fB9=bsh8$KdFMTh-n!H!g>4>JAr`OpZGBLK8PC?W?*k04Cr@O7p6OdX5%DP z=EKBOAe2E7z0+hYC4prGW#St9ETtFb`B4%R5})e0VgL2eIgJ;U4|=@hnA&MdHmg*M zS)nNNWcvIvcmJtln#xDXf=@_bVvL9rG%ytLm_qT?yS7x-tp1te#dpk=FRwc-Pa-o_ z)FOh*aF877jQe&B_N&@|^!OIhX=}1C6im?Jnm_KIDR~ zv;8+O9EWQ#IeiLZy1U@U03!8wn$CC66{={v=Kc5-b&C!4W$fEc9O(A4DC!;H4)X@I>N8;*%%)1gvJU zaYCVegmnvSdMa;pw6#4V-8UEBASo#c@D`{H;j+z6HXl8Dn)Q68AN7I26LN<2(Zf_n zVob2mE0v#PLk!$#C3SVDmD&4U#}kZECKq~7F~R~~`r4PIWgoXT`d;7T zdlr=bpvb+*`^bxCzYFooDQHsValxG!2letK31{l6VP<@8uJW(Wfl_x^yT+HWc*MT2 zOE8@>S~JWP$?ODfHg)vFN?xQ{g@?UED#W=|9izoFwBSRrCF^p;r{M!m!ufL-VLdmt zvKCTKo-EvQ+^kuprIU)V-_@gQbTF&%52R(xN03P{y1!L4YKGGJ3o#%4(Iw zlt@O0Pbq<`Z6k?tRXv?B(MS*aP{c9>HlI0OUoY=MU3B(j2=6&rNBF&So|euG!`)(d z8|mPBk&)pmSie{FG|;myTq(aIPgL}f~n|N zYD$NFLj;TNi*m2863g=L3$hXIeYxmb~WKVQ0m~GZ%EEmrT zKkG9fB`wsQ`(@+#BPeVo1KqdHk%2pvUce+PeqF9@wa%{i!AOmq^NZ{}SC!ir*IU=N z-&4fzJ9?OjXaCNZ?nsLEw~b%i=5j4keK_~iG=24F%$7}xG%K4xETcN-On2XwnOwc^ zix;mf3t8emJn)GX3{YO<=MJDHbMf-ql~8D0ks^ECc3-2faPUx zyGvnKBg}SaaS)g(+g#pmSQTz{jS`-iW@2?s^eeL*x#M=Z#Bm*=xk`w47dlu2VHmJd zm|J&2M36dwQB1(Pk9boXR|jiOZhO6z(T<7T{vv=>g8pN0UnW;jk7kNOT_)6HjG=>W zqp~b~2-bOHJadz)>Xu1c+gou4gCo#nf(k56J@0*DBMfU05k+Zy|63{~Xp5>;&%T&# zb{#@5#e{9UpY(L;t%lJjTg`|y0YP2!=g>G*w%dB1ec+h(*66nM&-~`}v-Ljd1^FYo zq3s(wnziQv^=kf7{t%H$9&JY*zSjg1vFNr^7X9+C=uy{G9on1=raU<_Vyz#w(^R!$ z%h-yCk0KZ_#Cr08>oyxJbDR3Z@ZqBiZ9asr5BCN7F9SveI!`rsgLkjm%I7oA(x~{^|syGH`y$$Eph9MH^J{{E;q2Tf4v9 z`LTI$9tm5}(*wfbYk&Bj6>L2SK1kO3#v8x6H5`)2n%j;Yy>o6>b;}jj6vTVLkToV_Ak<_wU_% zla@AR*PT~AA&}p^63Jy{cWQ3{;vYX`ESs1W8+&v0(%X2taB#z8i*A43^>Z-1rh@4c z?_VPO6HfO=zl2^31rHOlwlmD#L-_i+gp!%CY?7pyHeviQL11uO|8UjS$F5E!x ziUhr9nX11o?8N7lb4~DeS}sd>aBr_KPnkwv>wIY@$$2I6ax_wHKnQv4MrjU$mbbUz zPy)U8gM>6B{xX$^^|QXS?L>c^$3|aoiS1nCuAKr9mcZc*0``$zbEG+JT;Ai-)T@G6 zcKk7R0)1-Gkt=ELhC!hULz#%cN{|GJt+bL2| zD8Bgu@eV;sSE4|$5UKLC(i%ua2q_8kuLr{#sBaf~S5i$$2u12ZD#Y%T!&L{U*?(Wo zCb9VRb<8F=JDXI{74o&|y?c)x51a>2Ce%Zr!HHwY#ridT3KJ-&Y&yD*U7NuZ4#9vPYL_!tKThY(_(2ScW%!`J>;x9lV6TBOc4P0u#~u%4g^ zaGlq(hU)4hdw=IiKMnhE-F$QK^F?$qXhA`K6IdCa%BJuB-nCkRpbNeqX!Za}07H7& zqC@!SEHs2j5<|{iF8_)m={RNmsoeqP%Or4E8 z-7}bnDJPLUeLA3FYocU}9^f!DPTI?tD+|j_0Co%|uy84TOO${Sz8JfWIo6|q@mGEwazO|7y2Ify~Ivr$M}M6)Nw{h#m_;+i_TB?K$mQ^n#8jjRjY};t#0+ zmo~&fpJP>J1R*79xtc!E27N9JZ^@3(8B1n%LIpbeolp)+s)U?#&yE;V$7%ur8<0g6 zZI`8U#k0?08YBOfBKR1qKya8rAdRt&06<1TTIugmsEcH{x2@G_C z$)SQzuuV5^PuG;R@CQbS!zc_J?=ldDhuY9q59b|5U*>KYq#$?vO}VPVX%B{d&-$!d zx;(!=W4FhXu^NPx&=J*Ajzk~N0(V$2O!H&ZD(o;7nwXhs|NQzy8DYf1cA#&r&y=`< z1B!Z{#k=08qyCX@l2ovdV^wjVP-lblNp5cLAmvChd7l$0viA=Hp}lbhMKfmxdm(d! zq;KW$L(@yRQ*i7?4k{Y~sk?L2l9N3gk4JNixFw$c^5JcK4?4xD>$-9D~uEYxrR z786|SnQ>JqJNv6mgn-!XV?@c_lSml-B0>lQMEDC5bCJKmmPB@?LQDe_7>g4~lNrkq zbe(b1U(GN+2rt;#<$kCa_uv&Lz})T9RcxgcbR^-6HTdcNnz8saa2WS2bUp1F*0tY` zKIpM+E^w150r>rItU6dRW&^Ge&{F9tiQH8xWH^H-+@+*;rz;owCt^(SgKiM+;9~_a zcmWv&^dbaTIo(TQG&hTk1rHlb7|R*oG42MNHPQgewMVEHfyt}h9dey&VXW6L1<|c>3)0J~wtu ze&lf#0vlF7??>DNF;*uOzp&yeBopm8A|PGzA1f)0s2p)RdTF+&mBPH;PP+tei0yhV zkpX^UW5dt(+j*znp;&1&<%>A9QlW_b`AM|9tLCR=e7pPUOCLH0?(@!q1PQ1T^|~`5Ibz_?Uv@Mi|2H07dwlpz;+|E;v`BZ@jAs z3JL->aMk>iCr=6s`C_V7Qt#f+>t0`97ZVjdcI?=zSFZ{!$T8IX$Jf$;ooHk&tsoS5 zmt!kWIR%)?O5vdwVyc051iFBr)vsT_f~Gd3d&fQlC2Fep^$p)5K}0+GfxFYPo3d1) zMZ3yBM5kG$`xdu$Q$rOL6!pWvhJ|n~V0WY;@fP=8(db5ML6Iybz-Q+q6-tf4V!mEY^SZeGyCv>*_A z0C^tp=jl73k^po^LsK^J;Ka$3s~UJX0)LCiKa~ol2?4RT%A4BL!^2;TZeWYTMy!?0 z%W)JZUF2ncMZ^wr$07>F>7q&N@3m`RDJszjN<4pLB2M+`_fgEK7<%9*^DFXTmjaeT zpjwuY==}6BjzJZW@3q8tx4^*xD3#U20k5-MHyyzmiZhapjSY^XX|)9~;1_5^e*FT0 zWUKs;t-dPL1jzRTD;M)V3fdNddrSulYh3R^#nFLrFJAroAn}1In6$JP-&J1Tg1o$T zT^V2Rl>)n+`wYZ{bkKu*3gfmm0NaNe@o2iMR|CQ~*U?f2N-9?v7#NtDRjj03U0vPW zw!upRux#Zv_n0Iw&!c%L9s-WLQn!_aecFTdSAYHRygWlE2!0mbQ_D|SAPkL@D>gfP zDxZ^jAH;ZN0cvX5L?tqKVwc2zJ-A*dAi3GtS9AbdbKZE+X_gBp2Vw*89DE?aDLEt@ zMiy1a2#SJ!zKzxo_^rbk8N3SS2jGQT-z%vLU%XOGv(_Z%y>DZ2Liz!Z7YbB9xN zB7Jf$PMKg~ZC1D3r;-IcHQ!6UmK_D#HC-K@pc(e7+VSkJ*96cQ1`I?VCB5yfP34q3 zo8aPX4Pq$Z^??L(E&tX8X@tK#wG+5t$&c{Y>5K^$f(VRt2&$9*Vl^wPd7#3=&eYS| zTDow2vxoOF#lTytKarl?>N=@paR4gB&+iRWMoLPcUM6r2V`8Q95mHULt&u1}L~0DE zFu}6HDj$4{*Msk^;m2@UM$hw0+gWXlbJ!vT8xU``l%LRMOMhoa|TlVy}P_e%;`s#iSX-~nC;xL8IgP0RL?a0w!94r6}@&EA$j|V&Z z9{Incd;1i=10A5<54bF)Q(obwQiCpV%-fjl{{SB?G3=YKuH44fhe^Qd>lJLtcIp zu>oQrB!2v0z?HfD?b}P$VW8h17`Siy9PTv=bx>7HUj7Z!5geDl(;!%;{u>IfXmGCO zOVbc~<4}lyCW#6tCgb1EY59p(bM>)A;+lJ#!XIM^e@8_XJM7n ztrDL?xoTL4U@sMT>@d=jhACQ3J1cWrCRjIldHNt6N418=)*U~Zr1dK@0f%8RhaL?Y|@edSg0lWBg zjAtOGw0OC) zqqnxUz+eyxlhjoF`f31m7IUm|yeSS%8+h1}P1c{eHiT zr&=NnJxB`<9W`(#z&Y8;^=;E#)exztiYm}RkL&8wu&^|amiC}8bprp=`oRPFRTggU z{J1#b@X+UA@>W>9i1QLb`Z0J1d{}%=4otnD6Bro$|8oo3g_;Sg66NI;1$qw$mlXib zhr$3+?@_T%boM9M<&PmH$E}sl61S38lRIvZ%YXx?5Agr*-$||}O+4-fa6lhfYnB$- zXu~3TZ#Ve)%!69|fi?i5sv!%CO$ZVQg&Gd>{V8Qe6$#x-V~6M^jDT}9J`a{b9>Sn+ zp1<%pXrh6d7_4}a_gjNZ4dz#vKvSUYp0GXZurNZ`S84;zd`Mtm-0M5M4}YQfIJ6v* zIfBsJltb9s`Tnf%UF2BL&a7GkYbayZ7{$PzEyKC7(NJi;yW^ywj;=2M^so_iEC^u1 z&q}rI3=z@QbA=ZiKDA+#nDOyp(g{G%MDr>>CBqp|N9buF$Wst%4hRheFJ7_c=byeR zCCa5cIsZ;`jo5$M3t8g85CnUcg1!gjv8e@(bQv@0VGfDtVrf&%1~_GmTl+VO3}EKZ zQVh+y=UfNTQf{`nGbhY}-v=Az_pi5cFWZ|Jb*99ag8h`O>ux~=`RUwQ3m%HN-svF& zCo%gnY|Ae}I%h=62CPI5PzlVZ>UD^kT~zt%dF)3G*|*(W zU$$ggzp1&=?&j2^3Bhm6cF5(Kr325wGPrCidZ@D@53<;P`hBHoP<7Z{+th~Fc%)rj z%eBCy^*u~%Kq~)uSr4uQ8M8b=wjrQ{`_^V>Vxzd~2jjy_a1oyYn0^|Yn7oy^a2#+l zM2~$?=TDsD|4Iz28XHW6lUkrohU9@5*}<*=5_u&2I&m&)SC)UB+|NS9=f4m|usk1MzB}|p(mJ~#n3F|We04=H_g$*O8Dv+`B zt&O|D$3#LMP|fg_UKMzV5o0b1bM?^;G+a$s#w7I9_u!Vrb z<0g<%Ve3A8_z)C~5|xp-GP1 zfyb5K_1(k1qjJHD7ev*4eS(;{BM(+Ow!@%0&6^+ly(NCXAs61LI>`tMDw5U82eEPIofH%Rkr%+ zERPC$MjvJW`!<1I)p#uVXsaMUAhiE!+k&41)CaC7)A)$dQpd4kR4&ELL zy{Mx|p!V`7A60*8gX6uRpJz~*VF{+71ipQOU~K+&nhAm0b)Q3j{iel%vmTD~wyuQ_ zg(7Ox1%@wz2Z0eag47&)hAOL&#I8=9*q{cm>;A=jcIh$I&&D;+dp`x zVmYSq=;apzHE0ikDfZ=jxI&;b-~r8Jq_9vfT=2K0p0ckMS4(lIRd@yDZs3|tE4VJM zokGV2Fs?^7WJjNSeiJj0Jb=_c{le#kxME=sfCb={1>jC<1$rI)@afa_j0+N`98oBP z95sEhaWX)3iv+%JCJj+;(Xdn1dm>vExx2zfm)X;525or2&yAdrFwXgAqaLe+rnV^u zRe^k1tw9XFLOV-z!=l}>oktKG){tGt0m>XWPryk}Ph@0RJ8dW~CqkAdQ!Q=UX%X5Z z>MvkxrQbVj1)R5?PZ7DyKJ3WYejdIIO*j;@`nA2iy`bVvu7+mW2@H7LP18o|1}VqS z>Qs^0rVA_iRBiHPb6by8DEax(6jayOqk$-}GI|KPiZ8eOGldU2GnYSox<9LvLA81_ z+8yZSK$Zs=d8iKV+)1uh$V^sFe; zcSj;FGQ!(!b3|?nMJ_s#SrOhUd-_rm8>9UiPR%6|U>AC=)YD`BDA9Ce9s)@jA@pEE z_O%P2CQt2yXll`N`M;%Ies(rHCFNJ0k5H@?XfS0sL8I#7>+9?3nUb2iSfQq-@ZsY} z7U0`)Mg|0AJ{%Oy5PO$nr+V{|UXGd!4dJ1^-}oDEtiwrJf+xU-1*#>$^gu=YIRf<< zG@=aPn@4x$$`xR*mU!+2MjlYL6SSV7$p%{fRri+e?i7nQUH!D8Y7@_M%x$kckH&oj zi?N-|ii2D0^Z-yq#gWVLV8)~`uv2)ElahM*`jVG;?tlPty~P9(wFYqYlb~7SzKXmZ zLAU)F)jfz{J9QZjt_<^a?|PN#Lf4dri66QGglbq6;Ia+iz$pNlo9bscjaqSF1W(vg zMjFEqk3L`qFX7+VCXM+1erJ(-us^il7Cq)T^qfNJ>j%AXS26xY>FoL~h<*`Wy$=7b{1RbHvs8Q}B#nBdk0s{PV|4|f@WH4NNHlR~&_HPzLg9v&c~0%I`15hYg? z#!@*2SYf-uq)L^Rk#TBuG^P(|tiTrD2n!E)fHPL~b6`LR{_@{_{4cC3Y#+UatpGNz zg4g4)6~YhSdt>csvtb$p?CmE2mX(osD9R>U!vRDPrP}1E1!KHL$A8oB)w1G~xtmD?Wz`4KNmD1YJkq z6F|fjvL|=we>iZwwYkhzTjlJ*ea%6NJU9-)oXnwJ%BR(oNazqkz4(4uekv z6e-l%vp-BB?jq^Up^|Of7cLOIVj!zj*%8CUt$is+nJx3UFFAimN~MgUi=CR-(Ve(^ zKd`~3P9EGFc7rbIyBsyxQ-C#)d-z)$sf89Pz_DR)mBtWx!YXWykesdQ%>~J}Dk9)1 z5u;i>6WH7WY@|S*j=WT+s8^bsz;~jdjQl=SG~m2tXqZ?NbN!*vgd{hXT7<#FAAgWA zf{hNtI)g0L%#igc0>q?7NeQkOw(0x#ilG@=MO``{S2P$QT7pgV?b~`6l(MGH4c&-to5t+eK` zboGXtPaqB04|d2Ja2O7Myvq>5u(O)?m=0RWWKY4npBv$P-m1S1T?3~0pFRWI2~y-0 z8uq%_Tkoe6UDb8BW)G@u)>sN`c}(`id>(-&b#dENXh9bles<4wcl> zj~P48Wl+tyw`XB?AF?-(I+?34UrDU{HXVNfQ+Z!Ub|hn0k{doFsa(W!Q^@V_cC5__ zh|B)+HpR-2Vj0thgS-6yk$d2WpiaAnY|7i$Fkc->m`FZBXiU3RgSgQ>1_X1%rk<0h zeUZQZsk{D$5lkuHaT!W^1L?K3Eek^Ami?*u;e4{=-`I9XR^S~TR)(T^G75rwJjG)X zAt<8fr+o^h*mm2e8$;CW?xk};Bo2fZ{tX+4u5IGh4qE}cFY5ZkZ>AM=jEpkk;t)R| zoMt9$%>xcr>9kwCk3O<#Q!!3!?(7T?4LwIlsEbh$LSzr7QW;AvyL$s3`pzAsc~y;f zIvz!{%@b9ZnwmO?YyRWaSx_REl(>O~-LWIB_4I~}@c%8v7F3 zOfU-vs)WzDGNinZ7^tXpA3SK-{2AJQJTE(B;Z%(*gmP=2NP4ML_1zoqhnnT@BML zKE@r$6qddg3&>$}v90e$N?-#@gbw|jARW{y;E^CIDr#E^yE9q1;`dl7q2(XR;K7bpBAO9t$^ove8U{mZNj=b zdL8*-YqO;~S|#gY71qr;|DJ40xT8>4@4f>M@(#ZWUB}YHGTK}gO`-QMq&LuNpFY4& z@$YXk#ilWAjeY)kxnQgOj<2iDO~4EMFVD}UcT~SL+}l&>1U&4dbfy7cEh)h(fxrAO zyhDZg7CK;gNtjX66H+LNr}H<{Wwi5)%UI>S4dE}bOk411kVS*32CJc z-jH1?aX!@)y|I{sLdL_$rVGnOB`{oYOxb?^gPv@&ef4Mc>7DjGfjc(2E014ud(Ltm zjS&yxgt0@4g^ubM(C!C=a{vx*4nUBBoyGZu9ot3Gto`X8VfF0$r`vgh2@cqd4He*l zzdwX==p#ru|KwGOYhH(T$DzQ2{ROhcJ>3{YNN|;f@UW6YihN(75kUgVL?s*s8~nFS zf3NI@JYm-fE=YC&O+llwB?{%4?#=AIaOziCqj6}J`{8U#@aKW}fPqqWqD|NcDgi=* zmK(=bwfA@pro`|&0+v2t;0~ti|J$MURDcp1>LiBxni?~6^Wk^Y7``6%5IUjt$m(G= zIOz*UK$|iME{`B)x_V`%LO}Gf=81w{pNfhqvzVI4mI}fUR0Q)W2&l=x!-VK!k&C*b zh7x4r{r){em=L7me2$ArCAc7cPeuSmBm}H7is9-4GBGX+En>?s9IzP*1R9m&*w+Gv z(-_&%vI-jpVgU~Ltfh_BjoDO%AQP!8h*_H;(P=@V+4oSwA&;b&(BJW51S)B#aFv-Q z{de9(-$VIpZugg?J;a|j7omG2p~2a>ZdgaEsx71j*92$t)K4^M=t4=~9uy4QeNrtq z7fxTnl#LjfdPJ|S5zMN=d26ULuqs1MUHx)exHL^Sw=_4u-K%?Xm#p0_+` zB`t?twb-icZ<)(pJbZ=i-yW%Top$U38^oQU)~2SWXt%9ZfX%_GUx`8OI6hv>6Dtm&k<(Qtv$9Fm_3;n#u=DcJ}V1HneFMAGP4P~ zz<{T(xtS`ZvJE^qdfWdIe;*xL$Rqv7IGfsZPNiT~vn zQ+*|Z(5~@+fbkEw&{km@^-~1U0TRj?jVE8Nl!7vhJ~nwYS&ze)yj2VylpWIgV=GKo z{+}i}xp{f?3#?Q(WnDhS3oP9msf$wA)MR9XK~Q$58I*EC5HbXfFxfR_jklEl_TjD@NUM3J?RfEuQ?b`-)S zwv&RZ!Gu4`n1*Z%Le|!pF2mXUD5QEi6%9`bX|@nrK9-t7p!Q*ZaQ#VwflCGqSaWsE z;bG9(E9?bNkA6fc!+{Ru-s`8#KCy|wudkzk%a__Hf^wWfx|Aolw$-W5Ke za;?;xza9W3l6xuT1o+oOI82Eg|DT3S3;o-c!>{4IDt5efoWhKt3sg4LHQ@On#S@TZ za4oj>HMdfyPy8F`dNSzY7{+fFdDS6Up2<%Y^+NlP#2tK@Iy~g=rY=4Ys1;Z$SzUJC|A_itAz5B;$3bBqYQCz zc_J{mMnF_`rhmW!BTc>$*?hcOjxvk96w@lEmg-(iLx%2%`&;3W;mVvjcTTEh&XKts z%!okc8dwPy*KO&VDNGq@X;J)6xt(URa&jsq!=D_NOyH#AXisWnD^!$1vK*oW56}4T zPM^37xT}~Hm6U=@!9z$f)M-+|6rMe-+XQ03P6@WjAY|_CGv3<#vpf#~#Q3L( zh6Rt*>bs}63LSqWwBTTkH{JUcO0=Ef=G* z;-#1>Tmh14$VC%?V1S{(fb#lpOs?_`4-bc9n(rusI!7&2Ph#?XxIcODFa#|ibLf1e zrlI)^2Lb%cpTe;0aByPkZk}9`HEP#(bCHv)hiAN@{GSG&{Cr?!B)#C(YFLs(Vz&9j zUmzw^PN30fU+}Mh|IVHTLJxAe3P`+ofOPZ{R~J?q%{b{7b{PQ|aILDGq2dKf)Mt&8 z_W<7?pU2Xw2#7C%_;xI)bi9(AtiHDK`% zQhsLg?-H1TXXl)&en!Ess01y`5eyLoxFP8*nHriU7;04_5W7a?USA)0a2;DDVP6A$ zCCk-H38OuTC)~Lzt8hT>c5!#Bp#PApkQ!_E<(Th%3@j784xNtvJImM)$Cewqfqsy_!R!m zC9nsGw-G}iC&h$69k7uz!f8VQsnebR4X;4eBOqR?ynBiRlZ@rQ?5$3--K=tFq}SW{ z=b4vqF)=&lF^f5F+RPXK@-&soV?Exo@_g^cM?N^R~Wjzn+=VG?SBe z)^1ZE%<`R)6^>F=!x|o`!}P1S2t^_x-*zr^X03fyvjAUu@FPIQf1u7AEtj){e+X*2 z-MMN0Z8)GdXu$y9^gWP9bMBS1eWpUj9y?sb*%0)`)S_(RjlE9+9-`aEa;de^oOTK* zU+Dzg5C44$i6-2IvJ7F~BJ^;PHuDBW;%4kmh-e_eON4ghDzNLwDqq7trm+l|=kPxU7^XH)Nt5v8pW6(l_7 zz!%kcz=PgXJirq8NU_7C!ydQ@o4|E1P((E2GNFgPf?^_)aPmngcjZ zrQz|O+VM;FW%fJZ6Au%w5jZe+cW_gVFLJ(WzruondHsfkx*ZC0Fed>q z4Sy^h1CbP*)FZ-*4!rkoOj^U0!8L|a1sp0uMHQEu8*=W_Pl-03{OP&aytR|*+oLL5 zvyq1clep13$t>Z~oAW2#9LJzv#KGxr2KzJOYe5YloHC;SJz-%Z`IV2DcYCon)TJy- ztiD7#Ar5W!JJoF(hnEwRaMcV1Vbsoqh+>?e3(k<`H0yZK|7Bm-tB+s$fOy~=UGil!DB>We zYKQ9f>+-lDW{I_p7kZ#^{mUyV>ziL|UP~#~241wyf@HXgKu&c@jo3yZE78HisJ4Rl zg&|7i=_5vb?th1tTWX-2o*%9bMU>2Lv{HyT7bZ2(>Hy`jQN-)eVQ{=(9_J@%scr(Q z$YCu@C31p+b^ua$po7OHaHSLulR&p`+lXRV?VnUtY6gvHaET@apuxGxXJ9+MK3<&S z${@AHGhe*UGmW;I>~iVv7>_#b$}G0a1^7n6#ETmL^5g(PSzFk4(b$Zhhh7$3g9IPR z`SWi)&L4NSDhZ8Sm&lr6kiCrbB-}-ywusX`8M8nGD1q`7+g2oB@KxF_@ z+ww$L)(3F1L`rcSC5gUj|3}SwS0bULs*~7{HbJ9G`9<&lD$slt7 zlYX#dUwiGwKgY>p4+q=HK6`1tSPsS_D73&B5ffE$AXur%7utmK|7T9}- zU6G|pFk&*0Fk(MSNT@0Dn`=L=`S=yx1I;!vMfF8Brr$*#va%@DAU~aXdCKx2LDy~C zY%XQN2ac8;&9`MO`BollNA1rzP57hoy9i4&g|u`bRYJ;XaU)1aL&K{R+W2fT!z~Mu zCM7(Fw5Vua1$ubsB-{)Ovlo+Ap#qX(Z5WGgQ#;x&lA4`ncyo5~*#r>trKGCo2FnXR ze@66>m}{hY6@K`>&aaE7O}tOt6N2fRw79sRoA9;RLqr1!KzKhSH?to5=oWGuEyy52 zOgYTh2tvqzFJImig+r*OtgLKnyY!mdT4>h!9Ikie#QOaSxtX4ix?(^?ExZiU0MM}g z@283j@j}#wHaqC`aut2Xr)?APcZ3i@NVaBIuXnIe_wjBkn3C!y*<4IWfJJ>qFQJE;F%UP8r1ZF=8+rcz~Zp;WqQ9vYy ziCBP73psPi#{3+v%&$(kmB_5T$EI0^Ace0j9IAX&lE44y@KBrxX!4fK!ZzSomK4_? zO7pSV&nH#L?kXx`h}Q4H{@#Tt^PvM10bP-WABu=+#yw&fzheD99E+z|1R@WRJCAbc zyuBVCcEx}g<>6r`lEm?}4_78r2K9wt6UB=Lrv;_6OG^4i(PxLnmPmV6MhT!vTT&paALc4ql>ug1 z{mFoa-)F**RsSk{cO4oFnVOktoQTGK4iT(&tEnVSz^-1_*uUuw|S)W1D!;7(2K>Q$qQaw4u|-u-zvF6VF= zy+Gt-S#lAW=w&1XiMcDd zR@3#jMtVwyuaDu%coz^=ixChq*Ho{swWSFjk3(^mMjn+inwG0I6KFTt?2zHobXv2q zz72DiuBR^I=hVS@Rio{Bstd_l@Wy~>x+kMYexYZ_pv)}$8I4rHKEBZ(%@+ms1%QReiR9wz= zeX1|;{n1Vo4M}~~+OIRd6qLO&2bUe_9v3y+$ygLUpWzisa3{MeA-S+YPp`cvb`dEH-Xmi`!QiGaYE^bIr zTX?&~oG)5er9Eq2`86*Wze#OK?(9=E z6yWvt?Zo!1d(ZqJR6mQ8S*BCVhhi5zu7dFlbMdEEmjNkeb2+)~nlo-yjh0sk)E24P zZMVF+#;vVJq-Y7Na2W*!oq9g&D{($Wxw;W_jK2FqKmt!k*V;K~-uSHDCJ@p3Fn;R$(Nm@|LHEi_^cR$=%K1)a#7#f<`TO=Hv z_MH)3FjZhFrgIl4mVDrng^*U@ZUi~zxvVN|1+RK+53{?K$N&79(c^+9SSl|k*N*a^ z;~p0DmXs=)DNfk93U)FE3;CRpwxuu5l642Y&0ibODEod_?~rO*!pPPfZ?Kw*tWrfu z$y%!-`apN_rdG1}@U|HdQ{lG>RjMfTi!CC(5;{7G`FMeK)yPRZXO*|x52853BkE6P zn(B|VBquJ+7pJpZJz9Q8b|iUWyn?#OO|hw*++oNf#(cg(__FduxpacHbFm4JOSY_2 z)qq1!&Q~V&l+#|f--uBq1@aATqVbI{K2z>Ktjwk+r(MGDV&SlHoNMeyzDXj}`}ky5gGEez@d@YSCVz-C$7_f4ln+`8UvQAS(3#U-wb&KLQ_yTIUE-sW zXnHL5&NI!L!$)XVgGqBqWBH!fM$SCk_8ZE{Ycwtei?7G2mhpJrcKLzb6!Y&Z12>yv zqS-nm=e-CB1g?_*YPHo95ZH3=d@9S2QgC~_?NB*kO7|=>*e*ncH%MV1fEz*akr0M6UQA^X6v%E<>(jW1T{JAEIu>%ujcid1mNj(W<~~S z^REsG&x@{2S#r_QKC&omoc>tbnI(96>3vXw8Kt0Lx9)RTFY#@`5rJpp6Iy3Zcx`q* z>#TC4C|amn;OenS3(73Pl`%cuRGz#ZJRa(h0Z7yUk2&ihT$$e1yuPiXYUb@%^KpfW z^914{evio3sqp$ZSH>qh7mkk=qo=y>#mnfO3P6|mtH>Bix0=XZaBn$TRg!trjW9x) zY9;lCujqpq>fDRmii1lyHeH;LBN`pnKxREX2d?y*0PeEic-uB>HG-LDN#tZq3_xqavp_cwe5$2hYy_FGH@g ztlJ&BXV-$wu7R0UO9qo&6N!kx5|e6XgFSs^O0LU#(fC@Xyw>$7$D;*XBBKidZPttS zZ~PP9%tU-Irg!c;&3g7+-oTq6!^+tI>U8Lb&4YB(g!ucgst&T!}f8r zW}Q`W?LMujvqMFn?Phpv9jq5h$L*Z;qjGZ-f7WNE7|3sfHEvK~3)Nwiu1;w1eL_vaY7X5(%g6VcHz5(y(7^=12krwXr4#7@uB zUX3aGE)t!Pd3yCk+7|hT?* zWSdFpV0nz6uf=#d>!58?-|*qMoAEKX1@4d-zk>+iG8}v^rCMJrSK)=FO+D*3N$>?U=o8*%zUuP0e0Er0 zb{Za-r<8US1^X`n`o4?^mfm@-ZY9I&wc7KSaBB^jS^N{n1l~xV36(JLLL=*}q&%(< zS!erN0+e;vmWbMB%xpRJKbS^{ryNdBY_%j$(>A`7+4f^oLV~y2rMv zMf}a7QX_+r(UZ9WHPkd(9j!~LR;(f;!L8Zjl%(r~Y4uWS^^Jp1L?fOSn-Z+2&$@^q zy9b?NByrffeRna*sTRPz<68|59Z z@Q;YuF1$piQ#6hs&y4Y9OjLqGYe^4gs5f=b%w zTL#kwGh(-HwXaXUD@_+{Tq}%UBIPGDil_TsAOsAqPF|gXZ}erG;U6;&DJS{AZh!{Z z|Mv3l)zHdj>lnRCf8MqVODeipi9_N&Nr}-t%rBN#q93>8f6~fZZBv&S-)2ju7k~0O z=1hnKRbObNz0kt-3ycehmm6dc)bezJYz|A$cHi*3(A)y@__`FTXOfUOdgO9Qj0s~h zB1nVRHPd>Xvl_*n>LStaaKQv2<@6%`181t=ZVpYG^qz<5`^0O1^SMw>C1Y;Q39ZhI z1(9qmD&KAS6O+cDw#u2gCqYbit#xi(kU2`mxQL$i zkC7r)n{pb{P zpVcURwF+7OpkT~alPz!#7>-XEx@HwLUYN#}5kLzr-~I9QkaqEWm1pbrsF8(h2b6bl zz5*06^7=dv?!`s)_hd}+fhFKllZ0&~c;vm)mM6PnDYdj6-f}kAvV@CcDX?nu{7@ep z%(CQI^@fJs*VAa&d<1OwjRWb#jaZ3?M-7>^HW-(_84ZWrG&h%n;e@(o=V;188OhQ# z`Wkx^jevH}^WzzQ=^IgY(JGFl=)t*Ft0a;W{j!v5-u+qo`H8=}eBPMPsOY}AdVq?x zsn4c42N0llUvrmI3~Zu{{g+!h=g)ZECF9ZwIWX&JXBI{Kc8R!hV!oiy{Nc8&8;)Fa zMTX6|>gc;UwEZGUmAIUdUb|ZGSYw#0ZQ+`Cau!{h-7s~oiJzm|=CT65));Lo8;M%5 z*#`>SN5o$y&U(cfLZPLoAgU&@O1-uabB^9Fw;{v*+LT)b(dfIrq?~gnO(b zhG->zUHkv_b>(46W@|ie%~RrtspL{BS(#dHnah-0nK>omlDRgCr6!b1xe*zdnw92~ zHWpffq@s<3jk%TUSS}eVD5*)PZD^#Zq~Z$PgUx;Joj>OO!}EO4bH4X{=e%d({NCUD zJGmw#X7m*7QT+bk1LqD{3{4xy`~F5?vERRpVecx%Lfpk>A6yCFPS4AA^x-<={Bg1b z!R>mE!U;*gdy|qO+2qZ1!eS^G`)nSoTPYCi)H`!?$)nFJy8_~z9J7&+>xGsSjCo=f zWDdPy;l7XU##igPA38h9&%pYo58w1-4Gm>pI6o&}Q!;R`B%pP9;w^lCDyy1 zuVjKkP<0^;V{7Q4@f38Uqq+G4{q3YU1Nt7bbv4J!X2P2a)>M|8;}!d)>d)1t9hOu< zhIdJ2|LCtSgJvnTt#9i2abfT!w_>e^62t8#w&1iSlouBOHTt?OxwlCPoD-taZWcQ zM7%F?dGQ*&85*|pV2moEaN>&~At3=|sNO)>2c8HwGA8#XRd73(H)|XZdjZ?wv8bY} z%qmiy1%KN=6OO9Ozi4$(Rjf|mtR>3_**?V&ZMH4Z4~s&?llN6$=P``LuDBYYIiQ`x z`jXh;YB_@T4o@?351Qgcb9eOUO~a^EBvGw`NVKOB;a6v%3sz^PAWF32t|~yo+F9PL zHyWWK;kFA{WYjy4s3}e!5y!BZj|LAC_t9vhC_2dUvo+}2@vB#03pqWR< zF2_JTSi7Cmd_b36B7`7^9UdY{m;pKgKU^mgMSl})J3o5U}ieuU^xjE@kgJ7lxdeWO(V+1t?5QY zmxL66J3JyDM}vB#)tn(SrCSxw^oaoqyhqP_D)w4Ys0|=xjj*2W0r5{O>;&@Nd`AEtjBKxDf|v~PAsPm6~7S2*<7a{%o+?|m{<#A`e?<)`yR znRU`RGgRQK)Zq9xiz8Bt)Xx}heTHkie#Gjareu|^#hvu6#)Km~@k$IJ(|3dq^ilzI zcI3#*49Va~eygKrhm3|D`jG=NP4K?ufuJ2AIf{=Ql?zkaBF-=r#uV4DlbW|qb* zNnF(8m}IPx&0b2Qsm-b_sY%W#$gXVKsQVL9e$%2u+Vu95F_hwCWmeorW_HicfAmDT zG=|JCS%#E1*9s;t+}DbqN*JoNCnYb|mhqjmi!H)AYP}lMh9l$uYIjPDO~VO{a6(0> z%SDrIn`cr=#}1bY?d9ykCc^WCoR6{|H%QC_XTJT3Wk7T-sj^k*%JcPX!sDKv9-v{= zmXUJB|AkLVZ^pjNtV{u1c;}DIix(4;3;#m~9`>t>I|22rm;|1g*b%xHMjYhIi zyk%hkRupyc0yNdStgLKrk5;=}nW1?h@l$%(Xj9|e3Z+8LK&_qdPj-{qXW$>+Z`fEV zBSXF=(7aw)!0anemF%gTczKGHYJci-_j<)Zkkg8*->xEM%3O&I`TdtLWhunG)4M-e zg`(c3KHML@(4|+k#nMJ`pO=Tntjl2oaHKB)-1OpN9j@rc z$OLADDVXG(fwv*}9g(wm;~ZOYQ)DQM0B#9|u}gCXyo9NZLG+8doq+6;{aNi zOqGUjVR=&2{QIvP8Uif3p#$5hH2|#r&jqB#4pH<${B&J1;Jy?vVIVkjO=`#uol_yK z$#qB5JWBRTbGRt!C3Xw^Olw3YzFAx@tufgm+|;s5PsNX}qj1j3;;&LkbMjSnca4_` zysukI{59xgR%Z#!^c>)hY_{1#~wx-6|=0n3}szBhIn+PD=h57lR+tTW- zp2jwe{;~M>cyHjSVQ3nvu=U&0>KJaXie{9H_4BX1As`wFdZOr}_3V45?NRVn~8v)(oE@(C6QfV=+NKD}fP3A*r~5;oKYMbPu9Y&k{Q36>3sdaj_?gxAv!vt*NKI9Q-vOS`XFnk} zxV9bA69cY7hxzH?ZA9I-$`xFjLm55f6+{yOSogW5wn7Q$7%1nKoHo4^b>HsI@DNvS z?c>Xcll0k*Mlz9;MWiu#Q4%0$=Y>3O?ISlWUGC2Qv|E9*i%(KbjaT~KRD-q)6CE$^ zSR<6t#kc|QJ6>N(Pv@35Hw$yHN7ud=I%qsm$zw0S^FQ@dwl(6(p7NRx{RGxWLwJ2=%|8G#%e7;PTU z6t~pcB2acK3m^A5vO@-;oLw|=S6MB-e`@!d(QVZ+*hV$qdET!WeZ~#WvH`L{-b(x4 z4%&E0zWm*3>x-Cycg@ZmY! zyHbBSY46`S_?gCVv{mpLS=}ae^-8Th;`O4DFAw`xNjkLks=mn?V%^j)d0x?c_0$sh zw}1(S`MKkQJ=3aQa-bk1P14A8%E|ZC@7wcKADEC}DD(1OElBC?I3$e@6|7A&(ByT& aD{`sz`32eP77gHO-SML?4(0X%iT?)Rgi(qB diff --git a/doc/source/images/sequence_create_and_launch_audit.png b/doc/source/images/sequence_create_and_launch_audit.png deleted file mode 100644 index f697192c664c1b97d9b05882975d186cd65e99e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33521 zcmb@ubzGEN_co3J7Kk3C1w@n%X&p*LT0oGJ6p$`~0i-bqNm05bm6UE!DW$tZnxVVn zT?2aJdCvFyd4Ippd;U0Q4s*}k``&x)wXStt*Pb^IrA2YhlbpxEz`zj~yDx`&{j9h6sCkW#sR_V_-awbbB`}^X=B9+ToBT0X4^$9syag*JV$g|HJxA zm8O4!2@RJPS&{lzfnl2mPdr(j3^O}7GK88+$h9Zkcq{a9i>Gw5?{af^=yV~J?|~u2f-Kcit|5b+fa1jSSjCjcJ6(f zA}M%XP^8Yt;Tx~HW)B{t-gTi=W-kUc!a*@i3kzHYcIMeR`Z<65&3M7|4jI2P0GSXe(!8Im}DC2QFKO&;f6(im>CD!h1C{otjnchcI zP|sB49?)+s+?r$;dXr3Kf z8|{d)i^da6tSCPba{f#nGWBaL28Jt!_zAp$&)QxK9tGjaw{bua@oMB|m#D#Uc zRsQcCe8F+&I3n{0_{t5$B~~XGB6*L#071her5{;OSJ&aY&FaFcH~hX;bH74qga2~S zC(@gZd*eei>xlx-4vsFKem%z;WkJm&Fux)Ch~vfK`EW-zhQy-n*b({Q=-ftud!-R0 zcfx(JjxLURJDAAM9F^L*bTumO+SaL{k5%<4t~Hyij~LDY_6!eU1bNj%;TNB{elQSy@53qi1uK{#M>bti;Tf zG|0~=jQ4!wC-sFQ>}^#V?H|^*enVvrw$jqlhK7dL)||vR$6w65E1@rMvVC0`uW)RH zPSJ|E?FJt)a;y#4W?_I%m&3@&==P_hE0HD}K~1!dQ}I`%etu9oG|@rmEaJ&(Z~MEj zn^(Hy$ceeMdf>d}Zg(HGwpZG6Wx=)8@ip zb#?VX?9mF|t<}>+`Rw%s(S?68*K}yFLDrPh#8#x1L^bx?X3=9FJ<_j^V$(t%A@QW%%^z(~loN7?d*93d{zZqIkT$z3*fm-4Xf3 zCZy>jL#jlHQ|JVZ0f+jI-f+iZkXfUc`u1fMIyGWoAn<+3x;|O%9>~{v$Z@=IAopk6 z@A}dED&k=MyZo}57uRj;RtLC+WAAD7<*w4gIx(Fo1LZD*GUP?UInyL-sL#op{C5}_ zp7s&qY3_`FrO9l(@?O$l=G*Iz*{-z5mGO6vCySTEMl~nf`~v@*=Qs2ETU@4C#>Gh^ zSCr*vJQxzyL?k5YW;#>1KPaMagMq-^mD2C%=(s{i=#sy$NqTUpWMk+%EP9_SP1l~Y zVBL)W#QmCZPU8eMccgL6!eB`?y|!=@!_kMyLidx{Nk~ZO>FF&kEg^)8oLA=Hv!Y}A z)aoIQ>TUjtnHptUl~`G!%F1ci6~{}_)dHjCh2dP0B}9B;V&a`UcOqh(_WK5j11cVu z*{=^DY!qx};<@4SZ#`)Y=R~1Ua?C=E4e7^Q9{JoRs>#5>;A2JxGZWL`WljOuA^hvO za2YJCONI0m#SFC~2%o_cTgf8qtXlky$yMwO# z%{>tj5!fmg78b(7ZpuDtzxM765&S&fwVD-pqHY&u6QKU#`#woj1MdhgMdvUP3qnffqoU5jlKag5kQokpwhe=w0yiqkE?wrY>xXJrf zbS=fWd!}aacRK4>p2J2_Uqt15?;Y0A9fd(`PF6fpL`tgh)1&0Nx;j5H9=*Vv9?sSI zD^jsrN`VD?1&)~7RN9ZUU-8mjL4Tj?MR(-!@E;QYcb=q-I?kc;?wVZ3ujqvkN4W&! zBlfRAjb%$M z%FfPitT^xut;p#195jr`$;qjHz(z7n6kqB&G&%X|fk^w0w7{mwrdXvG*1K=n@|u;u zp3_ysjKBGQrslfC1N~`9m+wp1gtbfSKA3G+;XCJgvYZ^s#c&-}N=v@(c5CX+6R;=x zU{PTbP4Fyv=JPpMmpkpk&h_&4W}v4>2z_s7IGzEkzs~?vu<;n(L42;%?UkU^Zu7>s zR>a%e+prh*wQDnQPSw28<=WSt$r@cu@WJ9XaFs1T-MS*@$52N_{?HnW1@l?v1# zn26RpJXmMtlDJ41Zh!DeeUkfBZczCt6obinCD=Ik=LL#v>!4OEx$K3%iLAeLKBInc zRaYfOQ(dxuZwG%lSlKl*^7#8x57#yzE48(^SH}jwfBy)IVrHxkvgv59eg84nN{F%4 z!%afjb?oM);_Q@EEdK_00=db!z2a~4F`2tRsJa)%Lbvlnn}s;O&5a3OexEPc3UR{ zGX%BM%gkM_RS1im@>wA>zb_fJ>qpdz&QfNB#Zf%A1cZd>Nsr3WalNK%6rc4HudeXC zh$MBVukgmZrrQUtG~M*`TMw&)S$r&s+tP{Mi+tQ?6EL$0561df+HG(Uxq26fac4KT(Z`+mmWFprK*^6FK4!v--xTk zzB;`=B_N=wR4{yxbE^l(eL>Ox;T`O#s1gTNf&Ggp z2|iA!&AXp70stl-c_92A7ydhe7q5*r8T#*x!Id%>^9PnmWI1flw9FhYQN8) z6}%OpEFVbOr?|H}U^V8Bz057u7k<%I0!LZ%P}ljOhEU@}9`$Rp-gno|ySbqP0;ufQi{tg1`>PrrC1*cq&uF73CSE)kCEP?V z4oU6hDQAsJ+1=lrj>jC04+>pe7&7k5txZWiT&>{K#$P6uF6Df3x1wV2=1yCpiCubO z;V=qGe(~Z(5l+p|PrA02KX7xnzH9otZ(d=~;~sjETy1x$a#NfTD=w|$MET(X9M!Rbg4M(%ga*4{p{ODOi+{IFW{`&)GgUQ-73_b_L#O{s?<(ar-oNMSCCayzJCV8 zVEc3wf7vXJKr4M2LvvP(k5B4PDk)N&&|nX`LTr;oz-10I>(HIe*6uY*XmBXSK9h~!rND=mOTX3eY z*et<(Uxm~2j*;fg;g(|8LBAFI{c@g#LiIg{I*2F&s^>am-OS8<377Zkg3K36+okMn zTVh}E*dC;3m>m{b#WaU=+ST}+)AOKT=8iWX<_`;#G&ZidbZVkq#y@~?x3?K zwW4${MurccU$@QxmConDe}$Cr(2MMif{35vbb5haow1wy!OGUMX;S6GyI%$!HmTn? z8N^rY&&(5P!@f_h5_Lfxs^0IthY;F3cLN=~8$J(nvZ%4FSQ8|5Nf}JESj`v9OKj3F z;WyRo+;#K>{1rB#P&Kt>I~3+L*`SYuqv^Cgky1-RVYZIZPm7N6Kc?g(J%v2v&^VD! z;IJq&l#biGkZX5u71x)m&|y=cic*;jgJ~d&GGiewF8b|a)91HM4~;}>6qW*#=CK}J zh{$^=UeC(F@>xqLQhJK=Vset6KL96gQx)xXF;hqzrh*;4wNuiDMDVPXgC>Z0OsVu8 zO|GJyvF-X+ce?#04r8dC5|<*(yND)pCQy6Z zeA*A$dF1)lT4Jqd2_neyFJ4t%jS(ClhfZwqZQZC7Jtp}!%Z9oO*klN0 z><=q@IHaR5UoGa?CvfreRDL5Mw|nZj$Hf>`N=O#)A8bA2=2kMiHPe^p-TOh&=d)f6 zG4=k`yWWv(84AKIPS$g<*R{)Ozt@Y!%!}#g9u7MP*7Bgb1lTQRVxa527I<;Kh;#U%CPpC-g4><42q(Q!<5sR!UEzQl+ zp|G0_%TjD7mT3H5c%Hv4chJ>PD~EvI@Q&UDrkc6~iE|r)E^F;fK%cX%dmBNOhL5{N z4$6ms!@5>nKXCseX4u(EO<}`j5sVU&iE59an1c@0E6&^XF_K|TnH2|OI}%#YpDzVa zktS5%yGMTMl;-2!4atz~+1UyXvd0PHj?Z#=?ZcZcQ+x~L-%%zyvz;xjdN{9SSWCps zTs3`oqUx&ZL0TItB)p54jW*I*%EjJMI2)qpAStlvp^ESWkBf=SDr+lvGOyqGx30MP z9)|~$j*`(#-1M|KOBsjkfs9hBCRYH(rIt~B;)A8i3NdJ(9l|CUW0TDk9 zD!)Gxa?abj7^e+eyN+kbIr?DkJx{}}ex-bD)vsSSZ&H^##|!PLnm3Ni#+vpqbo8*M zq}U8u)XuwCNYU15`-RFNPGJvj+(Jf_-%MdO>JYj>crfWspOzbn3XvjrT63;XC6cc7 zGbNyM?>uF)j&%3cQF_$=;>#C@>Kli0@q3C78YJ~7FcJDq7r4WkF7Zi}d#hfNjvk<_ zSa0MuPUE5)qu&y1w>g#M*4U>Z9v{lNmo08@Upn&4XiN8fzN9{pQ}6vibI3|#E$O33 z_YVY9*Aje0Qccc(_K-brIha~Lhj(w{@a#MOQ;pGl^VNh@6L*B%w`!~Rr89>^qV}^! z{3~&YS{C2S1plyg)YWxr$&A?zW;!P!L(WEKd$69l>9Af*MMsxZY~Ss6!J)domfWFd z<~CIAiC4HoknqM^mpC6|N$$LGn6n)`EDQ_Z5E{#Kh*=K|DX=q5)p6e^y zrHC_w=D7R=ERRruN;@&mVLCpyFyd4S`2@F_4_>_MT9$Y?0gH2WZ&Aqzo-5=~tHb>R zD9&$2Oi*nC-{VD8`gpWsQII}HB6k;7@g#S)2DUdVn)7sj;!*Gk3Axzyn>J}D$6a$L zSLJu^?3|C`*%Wqhp~%W4=QG;{qRM;J|_|rD}5CW=OVeqg!?F%;nS5I_w@jeN9`0wd8|=dBeJms z`JWxPW6H{QB?LNl7D_U`gg)~f1!ETy>kF-UeR5b6S3pxN?s0EesoZKtv$ne^u)H?6 zUri`}+-mZ|F3UEp#U2)IY1LYGu!Snr{t88?t zqCF9L zIw4}muigsnkmDSdVBLMhtD}ZK>KS9UrB=-Q_p3{lxRT;HvRsa{mAC>|Z#{8+|Gs_D zc6k3_cd`D?%h_f-iMinl7hva3o;3n>TNY+`*LH+8Xos zz$R`3cGT8()37}$ir=ZvAZ)RG*KBWV+04vLJ7RO##YIBGYb}G)P}%Ek;2f7Vw|Q! zVb>_Q0*`=+@(Iy&wgW;dZV4vhO*XH~?#4VnKR;x{UbhQ`HuHTG6BD|j(a*;NM9-f; zU#04@H%6hZrUrRUc6*{HOGm1mSO9?9ef;W^z;w#+t#<^{J@N@IIy~4N8y#(m=Iih2 z+3Hl%^EIqh$+PU*t{bZW43j7`i1%qGZTI+2&A9jfgD2!^WJt?cmKxF>}71>G&1S zinr0xW}(rvw6t&Qw3*^Vn3gNcKaGfYbEyIZ%0Lptn5mh!E*+tC!xTVDZ; zN|cNFQ3$}i_?HwFi4QYC?nv#;Q&U$L-pBD5ZeT4Dgs7EKT04)xy8HHB(-SpJAZdVs zVl^AM9Zw_WdB^hnmwSjx6SHsd??UOxp`DF>b3zu4y(&BzPxac`+Wzd!w{PE~Z(Ult zw=z)=@5%SYPav^QUHV5X^qogD9#V+0j!%zl2P{OTqz{JG(L*($u+j5(n(ooA4}Rps8iiXbY@f^+f4(UJIcDDs z%BMA6)MW>iY+t4^$IQyQ{NfDx+euuaTTdW3S8Nr0xHzAFf4jA{Ra{(bUcNmU?XVCq z%&B5LR^uCYFiJbF!|Hg2_SB_5(?I>~Th@I@EWCfcI|c9arHX@fmiRCz;w(Xj2b&eU zTU(*~AG#D}^i2kft$RLesifz_viS=f6z&;kl5&|$!#?iLP@kYM;v8uD`s&PL#o+-E zCXkeF-nj97ktF>lqh|R)cXxMdYwMba3bAX;MdqlAXEQmc+H!%`Dz>vg6GSQ91fI9_ zb~XcZQ*xY&<9^X*Q-3FLn7dExW}6R}H#9VG8h72Od~HC`dOPmT5#IypNA%Wfm742q z#I8puTHdHi+g;3%#%7LBO^)46&R~4rW}gMm1E}Tqb?(a2$lKJ z`B91}LExi3dmu6W_&a3>(Vaf90un*_SH(`;uq~@HRfu{lH7vyt`$7m^IoJ z-J&{laFp!7E7TXJha*M2sNZgh>R;2&D=tOkA!I>~(wZ-c9?_kkBj z2R|&HAP&m=~tpqOxl41L9trjmHn+Glb7TpO#r;*^vW*d`FC!+m=4N(dp2?vllFhH$?R zXxZ)VnTiah-4k0oJChR=;vj52{d8^8b35*#y!;hn;@(uH%xFFb>CjsrV1bI+0>$e! z%N^~SBJXHuXh1?;EM4_;JW=GhZE0?9o~}rG`IyyQ?WUlg1Wu?yYXtl`jyIbiMl!-d5X%TIdJw0j? zsj9IZCzC+J>B`g;u$fbqJxq>qwy*UgD@Y)oxM|93lXX{HTl?@}zr=cWV>RZ`u{oL# zl$KLxE(RiG1O;D|_QVN$4-5_pr|UIE(is`ZPet%z#ueRHARsU5GcZgUZm&X=ve!JOOWv;$glddnS{p^9ljvN zg1d(G?&W(c^&uMepKKS0Dm`#zq@~NBjeKJ=5ou3<%(tsk<8!%@*}ncI=7sx8KAxVp zZRSGPEFpERXv+nimJQ`Lk8aganYiMaBq<^B$KYL#q_dY=Kc|x-r^g{+wJxFqRt7PK zMZ53Xm?RZJEfdXnJ}WB=00)Dj+APj{YN@)01`d_%q$H8#_OPynVcZ-yADL)!x zUN#FHs+oMK9u65%ioXkdvQ^6g##y^IVpHpI7PZ1_883t>uU&gmAIyZV-nsyp_xX8v z9JZE5GBPsSe0S7KY&ab(K6V^Of^eMwwwVdvc zxMx)UeFeDt;zvTL0`pA_AlhVQWnr-7tlCv|K@3tAm2J_DA3JauBb`(c{y6D9{biAbo!u)YIRS(bK}Mhz#F%g`c94x`}km6 zP|}o5%+4|}`!q-1v6`sEU(R%tGFW@=4LC8pEdJ}9q<}z0Tbo>JcbR)&3T1}g?95Dp zrLJybZ~ys~m6Zo96;c$<7DBag$g^BrA#F1Rw5GlCLPC|r*0VJ}#DK(B8rYkO_<7>_ zB4qW(vihT&-lpC-pJb~)Fiv-=@g~W({S6 zb$z0_wKlwvKi=Kc($dmiuc@hl0G8CW&Wdb$m?HA**|P@^d^9ovYQORHd^^<~#;!m9 zJzxco;6Y}k?~M8PU>#Y+Y0?NuL+cB$0AGK+r-3Jd{fLwd|Jzdl1W0N<$!&KY3zz&V z1;wxmV2~a(XbOFW&ie103Lg`Y2f&JqO;0}uIV-i>W$;lTE*auRS)TQ*qJV$^4Gj&1 z;p^9@w?`kU(UcSxerI7(FJ6PvIEzFmDJfZS8f<&ze)=SCQa3zY0Rq#B6DNp?iP7Xs z=l8}&ob%_~jPizthTgn+v*KvMNs5nulYVSr!EERK)ueUqr}bI1WG{Ui6LX6(B~;5P z=X-tFNc{&lknU^s)NzU|Woz#oH(!_D?T>odg@Zuq{Vh^XAhVyh5A1lRGNjT=))!j_ z3_n9#0%9lY8Nx9PDxSCZ2v-Fq>L!->_6b-^O9|4^px<_-XUQ};`7`CKOcs%mcUjMm z6%E;}w6Yfi$$rdRLdwg5yNX9rQP>V@tK3PO>?tlIav0!3l3-d=wLf z!si-HCt2S&&BiN?M=SNo50`i-O|a!fE77$E!QE5W#^-~>oj(S>dGm>n+T%?7ps2z; z;!CE8pdm-n?SLyOKk>%-KNRP;z#an^Pg0}#s8(_aj*FHOdx z3-*Q&601f0N0CoMiF4UOzLo>kp>mOvfZ!Pso!_+=vyJ$!K5{1K)oi4DhLz*@B^&IQ z7kz4eS6%|%^H-*kAg%U2t9{nq&w3Vdi{#WVc%ZT*lzw+gPDxP#G*~_JTJ%F=1nQy_wf)XU%Xs zePCcdm7BTn_TOHArtii-jZ&1-Z~-%x11ftB_MDT|=whN-@kuM$72z45XGfcr^U$9go`Tp%ZOtY-D0)&R8kl+Iqe$ z*AO^F#%a`Xfu-~z46~;Py|=)zp;hUgH#ku|!9?14Sv1Rj-6)U4AyrI5BAiY4d=%PJ zU{pr`;T*<5gSEASf&ysNi7U`|KqWLT9FH(Bzhm0R@dJ>KTOi_`)J08{iDl zZ;r;JegF;&t`e`k?5r$S1VV*zabzUx`LcFAOGM}pR$GP3VZ5k6iJ3m6r-g+D?F;Ps zO;=QNRD5B)QPNXCe?GCxs;au5l?jl2e{V`)I7eSbPOk0Ci&J?jrVk%Jgn7%(Y*#M_YNlF?U7~HlPty*ms zV98{*q4x`KLVJLqh#jJ(R9O@fB_(UUX%}M4Q|iTPz%Tb-a+$dKhr^HSpM8_WC8oJm6er-#!yZ( zzwuh-M}jtDa6iC4u(h@h+UUe-prx-4=U4&O={rOVSnrdpiPrhLMnUZLtz8Yd7 zWD%8fdvuSILiu@gU?1oa^4l(`0g?pM831gM)mIwA*ukQ8`H&=uX}pA!Ezpv?kj{%c+r!2499et{Iu zTHz0$Sy|OLH-{8d0DlG7lXTtJP`@(dxGGiOc`K#J*PE_PE&J)4k2xPTbaci>Dm?%U zn&dZ0coUm${Gf^9wH)_U)&qoQJ7ky4>jJ5e!ex*3&y4oI=_kjjD8494$KCk?*u~}Z z#j^1tWC)~~voRzG*!s_`tvikK_IEd9?pRF%R4^Hs>q=MM-`j&ew6wGo(8>5sh=5lw zUnYP=^WZ_?6gLOQePQ7@PDLdp5eL~nI%k_(kroyKnq8fp#CjUzFh8o#QHdc{U5rYV zA1Aq(lh(4K$fr##E)E0Cb9HqEWR_#CsM27NW89sAA9PuY1BeIM!iIWUS^#z;+Snfx z`<|$AA$f6(Gz>U-uVY@D?&|$kXg;(c5+5q3M12OUA4$x4e3dN zj`JAQRPjmde!jYfk5LQp<#u%WNwI<4w5E{+-u*0t_AVVJGBO!g&QsEb z-(^kwTaABWj;?U438{~%lU!sj;_4OpXY^US!iHNq?qp>U@j;nsw^cC&J!~G9O}$-n z!b2_i_-0VC@rq2nvi=BeGs>_HuU9c0hA&Q^r7f%wJcK z9$%rl9xVjrJIMxzt(5H5UvC5V4PT)4sBG$?O7lokfey68H7-X$pQj33gqM(oDt}gQ zX#0H*k5Ge+Cxy|GQSyKHgDufM{AZKC_mJVAZT->h;hvW!(Ji$}&2tjS!!ur=^8`m71AJ(?F~T7>I0R6<-P# z?vU|rEzesn1V?H_o<+rc*Gj5lx|gTt=R<_C^Z3ZfGT`}0G(%keAX>nvE4_vr_I65T7iRQD6D#fIiD|o$j)X%Ao_q41gB_cr!{n~ss(02!#KFOnk6<& zm1i+xE1e%EoWsDNuRDnhJw+Ks83vUCs_uM6(@IT!eZ6p0jQtEN-570e%RJW;DCLA5=eY?|PoaQYuaR>e^a>-Dl36S%%$c2rvNnXz1eF zlVu3IE!(t(g@vKn088c!Jm~9lcqyr=nFpJP4wBw<>1N2R0$?O8!je0UAP-ntSuv%- z5F$cCXqetcMl#}8U1c3M$k8)4u;*l`fUd^J&yS$q;rVk$jnbZJMAuqNAhUfVxi@7Z zS#M7QfuHd)kQhyebmLW~tl&Ru^7@rL^(<|-`KeSWBT$5cG{enmYY-7-0^dwh z!_#MQCM1A*vEN)U<&3ge7`Ow?+sl_HFQBl)Jfncx4?|B=GvDSw%)uJX1#4haxaNOg zV4!V{KPyVtyHqJDDM#DZpo(Xfu6Q}3IL!tMfyYDX0?zWhOeG;9QN=boYPvDk+qNh8 z3Jc0jziUzl$5NlwtDd3!Lw(d-K^u|OE zrNOB4^77(JO?ufZPMGIDCN-w5h~%*~Q&v{Cvn%Sc1T!njI?YY>9Ciq+1EYJO%~P~^ zOKqxn$qiQXu|_$NlR(-5M@J(a`TSse%2YtJsj11@+ImGYr$4K#N#fku$`FZ?TeEs? z#G}K|fb%VVITgaJo^8J&tjo*C*T|fdd)eX|EGNM|IV5RoDK-Q|8*u@Vzu1Yhh#tFI zv?oT&oZ?c5My)?Zh~~~e!Gc-xFGgY}N7p(_0uY5fLHCc9iO61SZh(+>D^F!TeSNL1 ztll4FI=3Z=hlPa^kbJ#|GZFF|sl+{2iEO&{w`Vi&nwzH=sUNoPRzG*{+_1~xzHc;3 zx}tGt)2$M+{#g@n$jmXPxjfmiSoiYsqRD(z_lk(=5G-37N|0&4^1!>D*ynI>9~0e&8c6NA;@oZd+Qj2t$Xxb(ISwTpJMs#9Z@Ru!(k32Zy?ZgH0`UHYtBAN)bc!z(*fXD9D0df7NkaAXZE z5S23uWo>>YX`cZ_aWiaa+ie3K=0hP+4!jY(8pt<|I1bohr_HFnwmBP<%p4}ZuXD^M z;`(C8V2WO#sC{LSSSizmbegbYs~QNv43!cd@aB|DkbRjGfn}3gY`QgIX<@PKdg2t( z;r8wYG@}6sh)DH>B$<%f)$lT&%B94Zn3%m%At9J*@TKztbPQrps;XE~X~W}A%P^}K zPHQWM7zbPVix=H~l6w&W0hdWgj2v)q%$Yx?rExgeifCv=qcBriKjhtx`jAUS8X<4U zT3Nz=AB~L+?Zl5!&^U`!k7YTvbR?}n-hD$JuC87K*2G6THiX=3x_GHH)duATUmU=K zmY?6XlEWVDykmHQ>#0Ut)1+g zZh?S&*cad{m697u`#l2RjeFR!ePv}d`>~q2cm80uZhe&Pkm2q&`jh~0E)yYo1v)xW zz66gDoO-re`={Ea64(rsDdv=@Cba3$2rFY)Xm*ns0ep z{5X+ha&i(26H{E^eL!d^Z8ITZKX7VsbP#@0F1=N2f1iQ9dTQ-@G&|DcJuv|d3B#nf z4H4T@F-67T`6)@Y=1o+)=t0}--uWJ7{rKuYY2RM;fd^n>%VEFK*w`2^;>+f!1Qb)@ z{zgB#N_hzE39j*_l6^6tg7_AmD4L2cS#9DGu9zPz*@QX*!YoZ0RP<>mrCZN@!I7Jt zo5ny$O8h&>TQi;?*Yg(_QS$}E&U0C{1|D5E!;w(-McC`xvfBnlCrba&eDqKvl)d5P zwee2BBVaPkSS~Xd&^GZE{QAi+4i~|#osTA9T%Ev438xO;IK~v~lx>WlUFDgdaro|~ z5B=@kB6ydoaXSFK=%HOa0^v6Ox6r`v<3-RMzTmvga0v|!Mf@CJdg8u2ouYn*Xc_|} zfn5dfD2W1=$@QAy^JueouY2Q@+fOdFxSv{DvA3^1VfZalivXkwg5M}87B9ex_>;6y zx3$ajZqMnu`Q39??vFuLnfa}%APG7hE~d|tNA^t!Rsywh{5DCzFVO{gzI(Br$jqJx zORMP?WCbOvUwF#ZFB72eR_1Kuke}eMneh~Taq=J7YQ#h3h==*#&Hcdc{8M-V{EUWV zr!cgn(R7pon#B2sRC6>298D}rFyb`r3doFKSke7RKr zTXN|$sk`gk+tWitM#dDpxwNEQ`aG*OM2Z6|Er~L){G&EP(=ZtTzQJ~_B*6a`au;)s z@a3`Ekl^5%^&N-_AUh15|HKO*#PgW;<@V<2dxwpV3b3-uUV&p9_G{C!s;Ufi(3C~o zu}W&yJM%AEhozSm*yW+C1Sf!Y{f7@9l0KceP~=XK%pPb1M@3wNT}siwaejGIB>q!c zT3{VO0007Vy+D_HOC*wYz)s zdjOT(9d7Odr#4~<9su@t>jgAY6620KtJoR856H&FxjB<9e1B&3V$U$>t(G7Gxk0za zq?!-@KzUHEQK_`<**Q5LxD*RBGxX_0=;ptq-6#(f@W8sb7Jd|VV${i__TgT%hym-7 zp69VKO(fC1_p9Ho_jvs}At?z9OQSz(i<^py3gj6itET2K6tZaOKY{-r)vUz62l|J6 z2D)O!gOxhka0U{*AnHcsD#stj@XMi1XrAU-qlyreX>-F|V=*^Rolc9~h%45UG#wIsp@;`9q6Wwq2 z!;{E?|GTf-K>o6gD`--6knYPCZXd<9al_L`yP!w7w$Smz!Io%@8bZk!Y;vDI(NR)< z{X3)G2EFuEQPJ8*$MKDG(D=Ac++*60^OUHr(0AoV8ps5^s{yi^D{o;=1P=ov7fIT zXM-)#!jC?GY3Ad|QzO!SnN&>_4%k)SXGV*C^KT$O{KSHjnCXW<;yJ63R$1x7Fi-)o4t_PpMKvTohu4{<`j{AErjm2?lJ;qhp*#<;9uLQQ;M(;g} zqkQBMj6@$4kxtu3I(&IFd(}h3;jBTy6E+fzW7f*C1fATVdHv+K923EL^(6Pk4n8r? z??)j!&PDhLcVteDfCq2X2tS$^ZWWMs(j`=j%}Q}b`Z z8-3R9AHp+G-Y`%!)&J+2)PGUB$ua*Ui$dM{hfa<@j3wC39G@;Cgl>nA`u&2wGbYmP z7%wyUMEWV(iH(|#|7zDuwp<$xv7Sl3%%~hlZy6oSYdNVm%=W-j3|b;Sy@t@y(a{YP zIT4*-4>H8e|0MNjhfhBd6h(|d#~?QX;xXVxyXg-eeL3PsEXxrS0ADSIhheZjyYZXU zy{5s%$=MhqFbwT2^tL-|DEqR5l&|}k5}IFcMVc3KE?aJ|Oh8YnlFkK|Aa3q%6U+u? zoqX!Q<>AAFl6N6Alcc8kA3xHvv1Kb|YVbL1MBdW6XKkG~5e~RdL{!v!f{&M%fJMUu znUKWP@)GI(P*rtyZf+nNHa8?fB$lgRSko__c8!gh*{sU*GPt{dxQ>G42i+W#{)q=H zgE$4$8ese>&&`$3?5?e<0-sStx@)g)L)Q%i0`dPTH8=hxAm?8N03GgAxLv{yoZV^- zsrhLvEU0Sm4-Buc7g+<TSq$9tF&?r;F};m<+zrV9c^ahw#-r;D@1 zr#IZBFV_(0!njMMq_dNg`P$WayZh9~$$Esy*vJTph?)rB-Ps|(V|kx`3a&6i2z3>h z52GbYQuGNyM4$B;5GaB?@O_htiXJ-;3~ohI z$ALOuvT?O8Cc{(>}~8KoO4jpTQPy{`U5b_aX4=pVS^d zHe3%QVG`4)?F1zjq*#TWI`{uM&*!@JhLaxCwf6T#n4V07y z5Hz5_>z$?npyY~6OZ&ygkHJ~c`$d0Z{oZudf}QPcI3NLF=gquwx*aIu^74UjLbar% z1O=dF%fTuPvSMerx#QtZ!C!c=uzvfD;@{f4*OiZFzJg z){GVEX#)_h5+(Dk)_%Ako(BCvtSvGB2XbxS7mnL{W(P_p4ScHGJA-x?LFWBzOXwHu z#5#gITYLs2A75r5P({0cl@SaxH1U~cQ0_QkFzU{9iaNmtE!8FslHU_S7p*Vp=%NVE zwbDrP>Ea8NlgPW)Bh%B70&9*B=;ezqW)xJ9dC*qduqpjNDko*a^BJwkcTxAswmNBHn*Ve;rQT^QNE&o$V9XWv5;Tl} zBT{JT{+G&W`?lUldNtk9_D=|o_EG$jf)}rabVg39wl5^G|Eg6G1eaC}mhm%G==(-R z6FiN#Ch9{R92|`E`eI{a2L{v(o&Q_ht9}5W4snQ)X-aZzM>|>>as7d{%SIf>uAt7@ z_5X4OMe;e=K%NEv5CMC2wTrfP94$DF)~8$J^Yax0Lzq;#tR^4CH(n&+pk-i4EiVTu zbQSzZ;jB9D6IUt+imWQXeXFaluMS2Z{e6T>V2J}KxoPK*&Rb(xYtGs3QFR!A(DXmJ zbO_wQlOz?*`}m5sj?Un~fMQxBTEB;z)y}m7_uPz~kG(GK6GHdy$vt{>>Qp9QfbegQs5lf1MS#Bl z^vP&peaOg=rdLu_3@?+t&cf2nvHtVt&sgMHiGKNTb8B>y1nu&!wk8vPS(xaHaHpcD zKi+-*{CRJ0@0TBCn6=1$-gaC*8DoX?TsPbp z5<0!k#&7)pf(r-nS58S_KWLo6?SWr_aM_EOfgj%h1AXh?GVL$H$zA3*(Ia{E==VuPhljUNr<%PAx~y^ew^2%w(yo8sgxK<7O>fO=d!E%U zxeOg5MTTqT$S6{JawDwH-O2~&{x4)2^*Wp!lYZDQ>(io64F8r|pQ*o>l??MKgOb?z z%YXT~b`!uD3PM+(X=T?PzCb&r`(D&b!;eDFc!wl;l3%Nn6Nr5fW|vcWFK(L7YUf45nqRFMP+|Y<ec* zzb%xmhmv%mRe5KLQe&dQH;6grA&v}nE~y5$8$ruDTKpq1N&N`N|F@X*CvzdP{vYPl zC|`H#qfuha{Q7NzfBh|l-?G+u7b?!KLkZ?%mkHVAvp%zA(0E}>-gtTJmfKd`4gCN< zlT%#4p(xn;s9G`&s)US61a;4@JMFB28F+!nqn(~qDO~g6Lx^NWO--3gtjFSBK~pU* z8th+8Z)5EU{an~8iHyH_;<@D^WZB8s!5nk_?Tq z;>xSis*4j5SGD4@^AlT8QtYzH*8A)+q$at?eup*XTxC==s%%>jP*l`fYlNO zCv);Yd=RA9EPdWO<7jchWGl}TsGQKj^2dMGi7qfBrA`=I%AUzGZb#6nH*78rAA)%~ zC;a3shqM1m`fT@e%)1=l6V*AZ$whqY2}S?_Fm&fT=h@267?|N;zP5n<+ez84xBVZ^ z3=PYATLpP^A<4N0g#?yhnh4Y_T7ivk-zXW#?um(s!7mX#`I*Pk=;gxB#&%Cg$b4jk zNUkrb%`ugy%zj@sR)&B3#$Pu*_Ct0W?+U=dwiR|Z5nmEr>w|YUCBJ?9RuxQ2N{UOt z&*&srS%Y*>PS*d26GP$UFDJ&oR2p<}OcN=kt8jufI@Kc$dG_A~hrw@^td5d~5fZEE z3-Uyu3)WJ&JV<~>wJE6S=Vdxb0z8XE@Y^y_Bor%xo`Dbha69JE0_Zzi+rxUzgHdpV z+W}&FU-)x#a}$hO4tMX~6%lctNBJBVqc>7$WbgYub^M6aH_THpH(vyQRlKHBz|51K zN7s^Sm6epbLF$f+yIzkTCNu+netzKE`C}p_gE!Uez~Gqbbj zt{?0E+vuU5F@c>%mZ^g_hJgE{z^Ru{C$V;^k)rNMa3A@ZdTy%>vQYh{uJ#upp3{{zF-=K2I%{vi8yPQYQwSKX>1 z8jVzNX6T)8B{=p!=6aOcF?3mz7MDBRD$v3B(l{m?`@4RbW&4=*^ zb%RgBJ48FF)Yt;KShgA6yAv3K?sg6rY%9Q~1!-FVs`_MOgeWAPv0!s&LV~^+$`G_2 z_g8e<4sCQ3y`Az*Wji)UK5y-8X}N@t-bR)WzFpK6cNM`x z7!*+uDFta5LO{BtI|PBD%K@ZO6r@C?q@=r~OF$O|q#e3NBxdMtINt&H+1>Yj&N=g! zADp;J?BZHSERrWO{LHwQAX%pw*|QCT+~BUxVgw z{XK@uDw^N9kp6RhedXT4`uuOqS@t&Hc^K)_7&2ZnJoP8suI#0KETv-4VDm#MPglG6 zgR$9&^_GcVWD|y|9GxGrKKbe)^>b6bi(#_2747t=aS4(sH~pUxJDoLR<1~SB*1~SO0T&5GD>jp4L ztw)~orSyS%(ZkPGRkKtW7#Z1O?SWnaIZ|@?!CJnjYW^Dqx+Y7XvN4XP@V@fNH9kY2ysclFR0I`x2!>Z>0^gG1f!z`MIxeFZfwt99`kI=BOI3x&te3!fg-^C;LvX z0tn;b*~5cnHr%oFMNxXl#2Sn7Xqm!4f|gGm)}s?d*eiWsq`vz(tQU74x;35Hob|HS z(?mo%rI!6LJNf#x%${efhHw)E;FeADZ}Xt1Uz>4brpbZ)tGQN6M&WCTJqIBU;6X)1 zto6ULctX?-d@D5{-$#uY8>JaWWT{SN7iZhLuR{&&?c@dxwymmiCWgPnIIU|iOQcRFQAq;}W z7n9@TkAF6v&vxH|3CY}OobJw!?8d@bKYQj(C?^>i85NJM(AHC*AS@9!v;NB$?AkWi zOroni3c4xKy8`Sp6FohT^YTwX#wW`|2htw+VeLaVu5iwYQ&5fr7Z!Mn7HhBXQ2t35 z?}mAAPzMvzyb0q1>j8{0O?br3h*SryXlsY$AK3JGy%O_qtQ|Zfx*|27**N^|+r9+h z6Gc_#ne>4sMa5;XF5R%E0GJ~~ATk9#MfI}CM9JN^m!HBTD#g8xiek8;M8)?eGP2BL z&t*;bO0UivIn%Ivzjm1xlo9{}*R4-`iIlV&$YfBPheHRR3Oc_X?>xrq&tew=5vL9* zmuj}!($Z3CIy;wT-?>YdZf1ktQe=2Wz*PWe1No`R`_ARt8V^^|q{dXTR3{Bx0-@ed zL8(>eTl7}o`RXbf*yS*hUpFBsF?Iu~8{;QDJ~o8V&XZbkt|u=I5?d>MiMcEBjN ze_QAW9B^7Xx|m^A88HKnnAB>{sz0sJyC;@`hGbO9l~V5r<n@vSUE(L^g zH8;yi>Hm6&HWBRN-Me?g7H*U}ZoFlr{4uk!we$k}xq3PNNZat^#MQ0E<^!GRvHi{3 zfdjL;I`1BCoYT)xV=>-ZC7#1y#0N@M3igN@9Dl`DL1a%4uI5q4=U6#Dh6pA6h~VXg zar!xK%sL;OpE(fH%sTt@uscFPkb2u`r>1W}=aqrP5MV0)Gj<^}Mws@c;QSoDSHhtz ze_R86h4?8(LV{@C2@^6TUt5Kjvj3zjY$j3|dWz6JH>OOz&&GUgo3xu)ox5 z@L%sCzCb~NBMj>UD5}&yKT9l!LeCFv>8jt_VUf7ez(z{k3odLKVBnV-{vMBQBDx;3 zzL=>dql4V_3Ec+S`2p2Rpj-q71_E;gR{7A35D=)#p>niJ zvVd|4raB_`bsK#HHV_r>&IQ;rC_+GM1Xx@E(>RQg2?+9{+ZI<=Le~IUXzw7tC68KN z7`+D?qL3hgQiOnj03Tn>(foZ>?L;&^V%aQ^s(63`Lk(;(4tOJcEp(F7J1dNk-c{7c z{@UrD2|A!=wE7wPG(aKO$W)?@vWHqO#44qx-+t8W%!ASxHb=9_$bYPbk#B+x)HLxx zP7K00rtQgkKMHEAm6rX5cpf+{t+A0&Iea}z?H^x%HJu$qVgs7ueC&S?x6yEYi`)r}>p(N7H0JO{Zvi;SrWr2Xc zMUb36Z7+-T)*-|aQS*JoUOGH_3A>6LnyzaPap1o zh5hSsb|9hQ4{@->$muZrw^V)4ll*&KGp=JHcsFSbJ9dI_ceV2YUQM|KlZ`ypLqvSmo~FBX>w7IW5ae)cXIAllde1lS|69kj&=&$B_VnFjWYVbe)AooI_St}cPUT11K(N=_ zNdR+E);(4M8|aXUARmVePZE;KItQEZKMVZvo4Xx}`m@QgA3|+slJz{M=VR@f8LxdmLxA0s@d1Hj^PIxwZlf=~SWo)2pmdLNY{Axa6fF|w+3m{j( z>@lil8Exk2Hh~;-?V_~s@ zra6#3?pAr+hv)|~rW%OXz66i~@|w@Qi+&F6b4f%)qYs7;vZO$7HE~%W3Tmb&n)-u; zq!){Zk`{RXnXb&xJ7PwTzlQO|g!G@oFfuWTt{F^`3I*@MI1V78QltP6&nA@kn>JZ$ zcQ2ef_t0@K;^)-V&eoP$I{WbxCyWD{@GSZT-nfv^(BwcMErEdWa0E45)yvfTx8j72 zPVvMY$9tNqVfP+_I7w>^x7_I3>T1`_TGJCn#gJ|+<}3GZlsGgvTk}R0w6wGUND1JX zaIq|v%%r%s*K*C5<+$C;utY(a_r}+!wht}8m;2Gn{lg0EHp}9`xIDZSbBhtmPWT_Knq$&Csr^29?-_% zuid50oe1%ng5tx;F>o3p z{o`++k;2|mbDNRTWg=W1cy#XW?r`+y$Ho!`_8?eZnQA8?Ce8z1A_O`>VaiSWE6TB0 zgeb?jo0sGoe_!JLQx6`r1KO%O%J-+P%#9k zfmStldOiAIZz<04uzY5({Nf)zKI|SGX?iSC*!$B<32Of~7ASd?DgUS6+LZBm#@~|z zr}j-7{cyE?mV4<&+us2UoSv(>P)3Kq4MqTIh=cT#C*?>z6_uak<1{N_De1cY)plik zaL!d==D-JoSE-`dQJWl+<$3(StdiE7Y<6IJ(+Ox8??O)zoFkNXRrSQh#J*My2smoH ztfjuat{@I+^!ohp4kXuHrX51*oxD>LQmrsxDif`lmLeWCtUF_|}QF9QN* z2Fvo!1Wj7V8I1aSz#DgeIV&-rb(6{P zC|FVu2G#m}J@>^=ryyNm8i$ku9%2g5N*PDB#b)Z))_h_q2|z4-tGm9pF^aEe zty3;DyakOwRjDJy4?)-<+6RzGLm(z#l!DK-GBc&`fz~F1Z5TZX0TPhjVa!mxkJd}S zz5<D!Kxow}CtxqL#^^7uu7bJ1vpm40JuL3a%%&%J9o+M5MJ9j5jii|A1cLsn&thV2UN@(Vel3<6@V8LyA z4i1$`R@X=<0sP8>zktj?$yJ`aPZl2zb|PT$K$d`#+d6hERPJ`Jf@{7H5dt56Y2E}t zS&zx;{GqW&fz<6D?3prstP&p&fy)i#X z^JO-Ggp-rA8?LSw61Tdx2I^>)E)ULSpV30vLbU(4J>q?U`k0B&Pa*1mm4gq5{to&Y zZp10RgI<~-2tKg07eOXyQ@s{Jll|{l&Q1~)MhCMD?1q)KHA(>&G`LoHQyGW{9p#gF za|{8Pbn2mD2Or^;ypxo|uJ9+L=+Q0*!*2@NMLNNTC?m;Hwjun&mubA~b&^YkP{qQ&V1MM*aeRTZ{&?j&$u$Vl$=veONlkL zx6}PA_I-vH1~P6cOI9bAZP9gwe?a;7fs_HsqrvC*R{f6@iGahI9-rhNdR*3DL9AE6 zUtm?8zit8eZul<`OA!4bU`#kysK3f(4AYLcHPFhY9 z2avX{JdLp0x~MK(eDh~z2k$j3QNk?4(?H=zJY`lSCC?x%qG0geGdxK}Z$~OBLl_w# zw%hlcZ3MliW!r3#b}}N+=TS%zlReT~OZW>xP}nEdf@dUQ<)@Qqy_>&K!(n4DB%${;5#GV~H}dYG>9>6En2$!~8yO&Er( zy>bVQwaD9=m^8U7bS>v0z)-AtCR$8-z(;eK=gL3Rf|0;R`M-?;+t>UC)Zx|MP_S8V z_JDR^e~Q#a=oNq`@qkR#{FRV6RG&IPXCg~AyA+N+C#M5|Z{f&i1C|OBS#W-4Val&V zpo#?{Kprrepw0z8d=tdKqoQy)ho3T&J1bt6OXuV(_ER_60mx4)wsYAJrY*N|k3?z+Mt1 zjgyziO)ykxIb7?wf3CF}1_7B+sA^*)$V@Sg<5mF~1=1Ga9ISw#itpAy>DIs{2|OvD z?O8VBzM%_>F?8>qh`9Hc+30to#RD$i0r;ko4-_Ck0Rsf_x=h~zrAR_l^ykC`DH+)T z==_}^B)S_d?2bo9LY0z%1#tCov9WHD6#y6t@*Bsl3`LVjE%+-K(%Fwd9s8LdC=(K; zdqXIUCHv)NcH>gfVrcZ|Y8HtPn2C!M{$G~KER`M576SmM1&%SIy}8*HP}D~L%RPYq zOkCd*gOJ|9@Ni?31vv}?0%va5 z+YtN$H^L+m*H@4{TWjE$43D6u{`L+ulq^b;?f}#omGvQ( zQ<+=u1%VGp6miPQoPg60Y&H-^H2k>jD$kd%r<10CZbdom}o!?Vj71U60S;I#sNB7;0!@4 z(bH0dKm;Ma^--wS`&6xxzQ zuM(bSUb~#dRR>P`+ve=X zMm{L3FniL;lG{P`Mf-R{KZ#`HI}S6ZxU3hY20QQBgoQmHmZSYFoji( zjg0_^fWVL2W<zNoBLw)0iHX5A>jRGgKRWHW*k8d$ZMV~vX=7+;A|3xhJ-;DU6)s>J*(7mDcF{VDvUad zvkN8Nc4H;3nd#+57YA$ykR8YcnKG$++)P(v8}2P@dwKrB#3ZO=3KYJKt^2Ai7tIkX zxL1;`-r1E(O}+nPYC{N;vB34+I3`hrJqE;-xma8)IgIi(wf>hGv} zb*9%ke}7teIN?TTIIS!S!>$HAM^B^%@mV@@8Ot;Hr^!(*FWN>{Z!b4z;(Tu3c@wf! z;uZr;5A|H!w;K0FTz9;NfDm6QJBML!mMVLfTQ%G2{4gsc?okPUx;*8|YNu|X8ojbn zSuUZ_9LaRAe%Xjqe0r)=Hv`2NNu87F9FC{o4j43Hw=_(PZDlR@r2jLd+yha zy)wI+c?U~0$LY4-zXJ>NdQSRoq`N}zBOZGaF{?;d6aoDpy}rV0ttv;kEv9j2XNC)( z0s@?zorm*97iW5%xJ|z&e;BNEC`w6bKXb+jf0X(Pyd^-$j7#OKQ@uAykBsD7CoE>; z0zLemnM3L2JJFW?%N4S4SBHL`&l&<*T(;b^KBwQ*G?8y9c^TWkvP5#vO;Bk zi8aUB`%rfsE_eipDn9p(D(f$=4Tj1M>~Z_Yl%{YA}hIP70=Pe zL-mwBZ|_|H{wY_RteF^8&fbJ>%EjzwR=?(o}WvNppj($^mLTZ7wSy3J=(9Ai{b(|rX&&lMb8EG;aUxQTB;Qu|?RQUL+Q{Ci@eyJF%dtrYpYJ__Pt z3~bZrD}+ST^W)=tgOB`}V$POaFlZfNm)(L(3v4y>-$mxg5h~()4a_t+{-z2_EK!5! zMwRK8*ZCtr2+?$BKpo&B3$|6EK_c-mVf3>f3*$i$#M>feby^vzMUU5}h`8q8*IU9q z$5}3OD#BRj#BN?{EcmdEgU}=$g|bnL?B)@2O<^y76uErwCzsVVK|8~0BDSO<;to() zGHKn(0h37e+U=z;aV~^&&2Yy+ulax7D^FZ|+4wsn?4t0uU(_P&Fcd~i;s!&O$nk6b z(P%;i!Q>CIIsCh}UV*z~BnQbxNT)fi%-xjt58~y0Ng5c7Uib(yK4oFD=Yi*$}|}5bEkmmmdT_=zQ6`t0F=Go z*^LX9>$>vW&`LFt=l+&9Oa4x4jm4$~naq*3SwHiZBD=(xuC-M2N+*Jrcd19u8*T}7 zrN=GDw(-O}{#02t8OD6hDfvo9vLEx(%m4Dk-rfuq&-?m@2JNqCy{b~}Hr<<-$w%li z#e;B>%&+0f_UO;yJS@Lh_t)P92Z^{ zY6X5)wUvHU>y=rcHRy!NXrOQPY4n{jq?%i;Hx z$>=r(Xiq)jxyg0*o*?)3WXa5PLY3F_2Te>Twl@~i_Gu(}PetD~_?(?jtdd@AL3Z{0 zQslE|UB$(!J|w;tn3OT@(Ou>0A57qw>wId!z1h!;~Mx4$;f88dlK>=f9I`;alaIkjVn)*5c%&Yt%ro)fUth-XLb z)1G`2w(1;kSuW=?jU8z$rjO;Jc}@>bUdz15r|fw6ImQ{qy3Da$p}EL5Ny)c#1x&OP z2S%fi8jPwU>*_mnuVD~mm|&R9J?;G;*sdG-dz*i5QhL|l++0(-<$865qk`q;sw97)Rwk<IrAKN#?}nJLZ6 zt#h!;H|L!y{J*8O*1_FO9MSa3Va;rC5k!aXWTJM^*LUyA;wd*<*mc!&qZ9Qk9`3-r z%0cq@gyn8W`*5{?Oa8T&_0;y2qa`ymh4$EpAI{8O}l7#*Lqm1e~cZ9! zCw3>kUL4oO5Tf7}g7EaSI#dfQuvk8=7FpYt^DkhTxygoSZ!am|oV z0%eE?gQzWXa!Z+Yx1atUZkyyxPSi;b+3o$Ew-iX2Zz!%eby}oC#`cm9KVZ+p+=kXW zfLJC%)}p@<>%QU4Wi!G<{ewlk-6(e&8#p8O#lpfdyt;jGr=)wl#b2BHah_QRA23@% zYdpOZ>J4wc5h%VZVuFgx3HZv_kOe}yLt*|=$m3M@!i%+$x}zB9j3D2vAq0B(gP8qu zX>po+_u^k4OXB~PWYHKy9~obzc#8HJzZz4mi8o0|2< z*M6WZlfwQo`??l9r|oO7+=VmRb>ah0GW_J?Bes$SV~dQwUYIx@1cf^oZW7_ko2V0gzQ~dm zOH^K&O~w*GSSo=i-GA)78~8x|!MhT-9}+~k_m*SSW|KgZ1IYqtE|88<-^MRLhZ3Pg z{b@mkqis(XFckZ~e#&wHF~0w#Si5oKM8n&C+)bN>UhQ|}9(<_reg>1tbl}B7t!)A> zwQ*5WQi9q<@rd!=>nEwdBniHW(mKWnm9Y29IT_!NN^|AXx9uOd`M)bNn~L749BR{4 z!7f@&ukiT>$q`eAAU8|oD1COPwlC&evr@GOSKNOUs{J%+2r|sR5{t;GSN*)ZFR0GnzZW9xE-IZ?h#3{t9^?|>XS$C;O=a<@2&qGRnb!t+e_dl;Z_Bw`-J4Fd>O~Hb zr~6k*nFWpI7*ZxR0B2_1%epapD0i<=Q^C8^26{#_=t_XitkH~CxjWQ<X(WEI(-q;9;?n7z6de6zlh%GZe8(m8DB(l?YJ)#wSz z$$QJ4eC*Hr+V_7686Bs=)7A+RlI)Vu;vOFR(EI>^fqX2F0bFdmy&Y&;b`Fw0zgnly z$2I+Oq}3XOM2@}>^$UwaK=#*_wdLf$@JAoN{DPtoF#FKO;J=>W|MRY2{c79)X9fKK z^@HU?$nk**rYqj@0gh+-G0v;47QK*M;a>`$cwLd>5M>S49Z<|&P9k!x--pcS@c&3- zi_+QGUj@UK^vmv5F5_5YYHwrPA3kWKIqHH7iA{E40|PzXNarqJfYUs`?!LS{%EUEP zBc3enn-UeEv>+)X(>b%|=70EwD)enJZ9Es_@$ZF`pE=#StOUDfYARh$&zA3#V)e?T zC-Y#zBMB|7XxW>;h<7i3q+kPA%p`hxW?_K#n{_NUdlAh%prop%*0X)DhM}{!3ma1C z=>G2y1!4n_54SI^?ZB>|rC1AL6KgdF#5A?T&W7EM_U5XBV<0rjD|U+!aaK^~aCD5n zgB#8WHXf$nT89ZKo7ag>oMZ#2W+C8m7s@PytS}EIx3)vZ=-51z*eG-oj);LL$8@lP zF}->YcxIl9fcZR86?f)3GTA^ zkGTCOjz2caJmQ-bm|O3*7*8)Km`Lty9vFR^W~5vfqXn0Zex;*g0>;ANjb+lknca466dNE#MduNGK{XssZ6 zfn145Z}>{l!|8_WBXRxDaNqzY(@*uT1zF#-cW6lPYr2%ImEiOZOX_#pb`YonEql{Z*V z)eJhTHy@e^g5DpZB<89fCWi1Ed#3|js|pV%pP>9Il(6aN$M(s5Hii{`9pZ#tyGpcp zkInpziw2u2VXk_hdIT?9d-=#Vg<)+9kyg_%TY;d`^m|OVxkkGBM=BoaWP>91OinTfjINy zA`1Mbu`y;5{-CpcplYjYVd-S5Z(xfM)i>9-eqyVyN2cRMW@Kw?$-}~8Y5K(6*3Qh7 zS=Yks>MiaYaE3@{MOE8BzegY(<4VFbWIx`*@mYylC6y`n4v2oAM6UMoqC1=3#C?`P zR(*bB?7r4zoYk=*ax#if7})ud-S+K!#WQg;pK#{%l&vK$2hb^XO#8her}-?UABBN2 zUYfN3jn-WhHGZ}9lC)}$WO@bJN6J{%doOVZWy#dMnJ$hhaGHF0y^trg{d>-s>CU;> z!p+Cl{^KL5_QewZ=Tm+;v@}y^gt6*hyQrjDQu886;3)&|JRo42n z3hm4HP_z;UX;AZAu5-*D(AnFpob8lj-HVo(@utW>3=o^C;=YFO(!P^HQ$D7x9S|*L zUWFR3B#H6dPS#gUy^%D6_{*(g9n3_@=&r5Pt4Ij(J{;eZGr4`>+-)H=({(9nrB^0PW{L;>rfiA-)pFdc z56v*M+sN)a^)G#ple~uZF)a=wqUju!*_O=w@(Wg?w`?C)I~92L<=iO^rWN&ZbYvCeCqqn7#-odM)a}bx%yf?oS*kH`EAzvv zd~c{;i$gb@k1!~=rK=>l=VPXe7{D~hP`(xr`g4~<~42sd)Q*I3bHnIf= z#|IZ2h7Ok3%BF#5KP8yX3x+X`0k_jv*349usg_)U|)zxukXvlAQVv3;KOZ)qm zU`Q;nOE5?!oyQ1gz);W%YH4e0Yj3B~JUMml1N6LbCCcH>M_-O+>~q<#QpVpRl zOcjq$Yf7nLX?Zxqwl=n2*54Y4<9hT{wXtrJg}pCFE#(}>+WLA$&hdc=B0n6xHdRK7 zqrf?f7y`QOr!9Job*39*Q7l>sZ{ENdBIDy*^-r%ZmO5S^mj-9$)sKk|^mz01=QvFVP-Q*S_lnymdJ_>QkR zthJ$p>n7Xm&uyyu+(t}NT?rSyQzD* z*)Oi)8V71mHAP6g5voWcV`7pv2qhTEJO1W(_0**5YCb_hkFDhqm;A8=3@Uk2Qc{W^&xJ)}ob2X;0kOMIPEJprybV0!3W>)x!4JQ)w&LgK$I8l@ zs2KLCD@~Hi^!uyG$n@uElMnWD|E6lz zJ6$6Yp(uGLi@{UuZV+7*2`xUxjcp6ld&j4qe}(!0-QV9|CQ%?hD$3#Kx2rXl^?hg- zUk6bewZt>_%gz$&-7KV4BirIVhx~%4*qPAdhmLrObLG!z|La%v=@h8ctBWV0(j4-3}urI#mj>Pi1!@%8U%KBJSPMzKViUv%W@ zV>YsB$IsKKJw2o6uI^l@w@FmKDbbp7j>fCCLGE8#r)SC(j3lA^&Ot}t$4vWv({q@L z?pH8B)xnY={xv#;=cDm13w#-~k;djv3tc+_i!_MV%S>4H@}@39eh);^3y+9Nk=wZ= z*K&x&$;EMJmq-Qg`vr^WyPLUe!Rz~drz|N=x}di!!}y10D+j#FB05d-hcQ=*J-Zi zvPkQUD{<1m+o>~m{LM;O-*=qG{*B146^{jW4k^psjk)UTY8H~C04c4dE*7SVfPb7R zYWe42iAME?Ce1@#ZmkPO_C`*KV4X*1QjDw$a&a!?jqFnXhuv=mnsDGuKfi@84b@6s za#j}5=sx^K`mkFs`5NPR!8rua-lO5q=7H+fu(n6491IN&XJ%%)3C`T+E#@1iou(}f z`toU#9Ce=kYT55Z$ECH1g$R6{%qQMwFI;v#I@lUB8?8)l&~7YRmZb>MJ4!x>Q4T?T zcG1ELzsgxe&)Rw|lIfBA?i|gWJ_ZKHfEgl&Se?WY#|CFeNLNSg_@So$CSZ7RxK?`= zp1$T$`^x2>TVCI?!rw3XKJN<|zWAI^KwKc~(*F4~r>EUdw8(#4!N$re?>qn_hT#?s zA^r~6>&vG?a#X>?l~{#I%Ju??sQq^4vPnK@;YszsBrb5dSM6fem_K&AHDMOrhKYgj zY&*nrc;B4fqFJctw^7*G*f>;X(GJnP>*ZVIP0t4T!Ny`UWQ?4o9`4*L8}ZFM z+S0gW*KkfkSZIg#$2yD7(lhX!;Bn#QA4|CX0#fws*yL*erh2Y%;O-+1_Pk|wWwaD8>(_Pj;Rk#D!)^{$u`4(WDJzUj2T!J3tUUI0eGOV;N=*li#I zGBT}$Wfts?mWdGzV-?PJh8hvi#k{QaQ4MQXD}GfstR%**xr~q3HJtmn@l6X2FRrCo z>uB7jFz)A`&q0_x2gX-=M30l9;FFKtN4x!6y=5$Qe8WRSHj|&8%!kj;dd$|A#MKt% zEcz~w@P?4^)3UR_t=|7?Gt>Db0HL^evJaPZ)lA+e{W@Fd%%#L$KjAPWsEAf zyDgq!Z^KD@e>=ku$Mf|C$p9Q$G;W(a_KVHsCRDm$Zero?HDuY z;C{HX$gF{)S6NlnoGBlIhi9%-_rM^O-C`&6zHim2tId9zWJnn2uW<5R z*JoD4=YA(Q8l$G2G7ui8fD6OT6UV+l*K3&ADpx0F61li&XD#q!dV09{>FYd=v&y+K z50}u@wNjI48McPLQb*y&$zRxaeuCm;(zbFj+zHEUUR}skWTpo!d`v<9yuu`>UY+_v#&rX!reXm3BUCPMuFeQz>Rc0(_glThwsejnB3iyAVP$qWzua{Q1#?gDpwOQGM4>rX;_5t;HYLg5iiGqSz18u<1+W z!w=wj9z+so&2y6mS7NXQCH7lity^sFBYa7(hYtmR|NcE({>!bq$aabFmdD}GBEvd5 zmZM+3q>=o;gvCl2j6SyulpiLJ5zoTnsAEpD@25Z@jIEjuzfpWx=&z&kvs=L9JKSyv zyNI=twm-N!lc+L(*e&E1FId;w+}vO^Hjw)$qc?koU@6vN`LSAWcKq<-ymLm_^4QVa zMEBjtTN~;JRw*qdhO%*;?wy_Q-`QqEhBPs?&ncS@g8L;ishQNyl+A0GkKp$*o(%B+SmiB_j*G5@`4se_GHrypLQl!;Edi1 z@xv_+74u5Q+am0!`Re5jiN8nGP!R66ZeRa7<>BwL8FGpv*=y_5(hBaHv}pSN)z&us zg1>}c#>N&|&&Aic>87}4Y?K(bnQ{zrX;rKhb&girEhrb>cJdPmB5br3&vXiBw3xcdDX_qV(|kuQ(Br}rbfEv7#)AI zvSB{QgD?A)8Y$5Z=O_Cwcq)k^M@+z19x}=jsV`AtbCnK3gN0{(k?3HyYDK4qC>%Br z_CPT?s(R5kllFx23L7n_wZ_A?Hl|*(0GQYpVqRJH6s{bqmam)7Al?Y*Z^4QxvnYY7 zyH-)>x-+Akt1fnw>f(?*-5SsQcw0u?W4)Ee7;9!+n`OstPiAC>GP-+UYu4&0`9Xef zagoPDF4}PCm4Z=&uY`{h9%hsdJIxT!v|Du7JXcNsW=oDb{__L949O+IFrDW<_AWE= zbv>E&>I0WN+*Vf`G^E>;_js&-VmwvI(zu%)f-{@7xvGhV*VC8`8B;U6$m7>%8lTHc z*@uQwJ0%l88&fo$>F&h?kQUPWd4!8^AuFq8)y`jQjMe|PK|{-Zr`Fj@+j#z~!^l)Z z*flf9hvd1Vy9@b(wpJDF)CLAmCi(*K>!Qj+1O|%++iHt9e0@nOS9WH6^$e&L758nI z9}oIq9!=BvdE&yt$eZ%>-yQlE8Zs{(PRAB6cIJ*#f&c3Zn-ttO-T z>TE$V(Pyttc^en^CP9C^ep$<{L`?avfz#1}hrnt*X|~4QgNlMsDB_1jWd4{;E1mLxjI#F&1 zu;9%fXsd2Ti@+WY@W)ZrXzs39KQbNM@ER44PJUb_m5PYtQ$1{a=BzwE>-fVe@Y(rI z&G(Z_*(wfw#qg9U-y3zBxVvj!zWh5(OXDhbQEIC2*75-jo$NwiZh|8@;5`<2aBzPJ zM9xBZ&CR+ye<31~xzs$Pg0`Wd%FK>QB~X`m{+WeST-=g9N`bmkyO)=elCob?FLrn7 z9@;rFc;pG|cAf^lkr&2k(^=oSrH&u&t%Ugb%?(!+pxU zQhxsaYin!9J(;1^{I;`l!otr30wm$AzL$yBRaK2_nQu;Qt*^hf3#}vd*ne*>b=ScQ zA!OJd@b})#XG6VJG@bN;vxYgY)NE1a%8Zp;rG$jEbB(#${F?ln`>4#>#bqIQ-(_&TMP(Aj4kf!N{-4ql&iZ)CjVAXl9nUJSZB_bk{ z!0V8goILrJ)Wdbsk7k1{(*kbWW$`=flv##Myu!nWl9H0`iGqSwlb=|1zg&+uSsE&aAO!GamIjqdEaG{l=I67bG9Cor0X6oax*dqKI8xO=TM2|7 zenAkQ4LXm4`$+v=KulT3SMC06U3v11K!<1;3+0@;rlh|bc;=zP&mcxiHSZOEeT z9Vy+_Gj_IaOM@IEomHcc8a~0Z3<^H#{S;33P`%9jpi7da(Kzmo)kon}AwLt^U=se_ zPuQ~M6%}-Xw+qV3xOk$9ii*Ou95nR&J5fMTD&XOMVXmm@#IgV$-DDawicf4zQoD2wzf{aE;0Dw!$eaAgZfx+ zwbOb4J`4R^aq;cVhcy+I4l4=a;i*;3m30E+Mi)BjdH0^lucz@&q-}*6PyD?Eu)EG1 zXmxwbRTQGZJ+ZV8^(rYjO}}eT2K(5N&|SY{ISz@_Y`L{9?=UYqcm(=O%a$Zz2S32DOd3v6Ez?~JYQFQw9z=(^??5)#sxLD}b&s(rA#S>v*0Z)zJ=HqZAB4gI zrDGoHnezQSAvOpSp7WqV9c^^Wc7;;X(B%7Ees|>^-n$#`l0l`k$2|>Pr9cgh$x)-( z@oWmt)Gvi4CbDRJTOO$@vly$LDphCuUBVP8OfQ`EfL=69Y|HB|!ZaO8&P~T@OKa?t zYKgG3{ct`FqFN!pOdc-^YSIRIMMcFxS?@<`jUpyL#g~c!iouHVPEOoa=-Mtwp+Wns z==-ITvzsGtkV=RucI8m|uW;dGHRXR3TK=c+{PP?DMi3pcXAw+Th(WJo5oV+N^!&#H%-^AWB*rYs{?evE z|Mi}P4mU1}J%YaU=8sw(1gHxgm#@%`Z`5Q_mX(PG8r~r{=8E!0@5y@ZqsF~>9fl~3 zo@3pj+t`mYD_z0yAous9^50%5=>nWzmv%KxhIMoE^6sdERD{%;$P~1RbweIz$bdX- zw>-RKpX=?qS+!Ej@VOxKV>a%il*y}fnXi*$D%6-V@>+()kM*M~beZ_>?(Wgi(Gt#$ zVN??L0LF;l;IXA8C;#~QbDDm0Dj2c_3b|Ig5j$ zTgzz*H$)=rRhhwZ9tXWG^@YQp6?yvH@D`xrOsXBys2Jd9HE}2 z`!%@aY4`O0&bq<*#jN-|JI{WjB6UZN=!)0Jf*qNH<-=lgvYRx+Y?QBsA^!y6F)=?b zC!5`6FoKRunkZ?QC& zktic0)7Pw~sQ7bgN`GTx15lIlkz|ew`Xf2Hq*O@>iMY79a9bgv`VY@8L~Jy^rh$-} zKP~9IVN}APlYD*tk)U85%p=27I#p>8*2)a9>!|7oq&{-r1_cbk+Mdx zRR2h852l&H=NK)%b*yzj6c0p2j{wl8U-+qhL7mC|Y;Kd&>W2#|IjT4P78gy`(oIWv z0#9e6Pf10cSA2;al`s2}=TzKFlq6yZ2XBd{CqD;R3o!ZB*Vl)KheKeNR8OZ>X-Ut> z@O}RL4lgf5f8XHH5D`DWovke+0|UFwb+QJ&kp(* zxf=TxmE9O9f#R_C(zJhk3F_X>a4UHcC86x0mQ=+%w`4Hy!)cS&!k92oxu8Lkzh^(w z7JuL8QYh-~-k!zxg1aVYl~rG*m(Qz5ybcfVfr~&jBJeuvz4JfSU*v0~3N0Y=I}J)! zE7@#lKqdFQih>wCLyo#x@&pz>Hau_9E452( znp2Tw42m2TnQi38Vb4rYEAx_fr72zFURORkQkIj`-p~q$;OIkvS+llK!%_5{`V0Gf zndHaS7!-CBrR?h*xlXxLJx^G1*wEE$ob4PP9kZ2l6I_ zoVdrrl3!snLr>a>W~-x<9G6PMZB0o|evhBQ&c=qIqdnZxo*+5*#q^_S%M{R zRE*uN6_%oA?q9WTd*NdOZv>`#=|upg8nnf!J$@Yb`Zc>z2iXYeE3E6cglZmoPKe#l z-*>iXWqb=#&hAo}oJVb*!L1_Y692c&aWVbsGQ@cI+hGe)P*CvN%`2K|JdOC!lGR85 z2p(-pawlkgD# zY|bmGDGCV*0b@crd-io$SQbBWdf>;>vcP(44ezCKIsc5)$a z&CYbJg1X&qGfQ0#-=5I5`OQ^>JjJD~wqh7xqe}DD&NRuWo*u;%B1H=?t_mU}rQnw# zAzNEp0LcrLpOyq`<<9kzjC1H{*eR8TqAW9wEOJZJ}v4I5g^2%q(7|?$4g&oX5sQ$0f0sI7uW- zss;7U&CMet2QTn7Z|z>;r57!;{&{8f*DrBd*$iuQH;pN()GwTugG(^fS4czG=XjV| z0Rh1n5Jt0W*A%eTe+QLd_?z6gvGV=3y~BR_t%x_FTO3h@r& zoxO~(JV`xgh|VKF*0y`SoJ|bF5dOz70)L3GA+(3k%a+t^3|~$>IQQqQmH|jGTECh4mQG9; zyH8J#Mb;HP-FG=RKiJ!2zBPq7co_@Jf^5%FRrNbyq6RAoBtC{D%JeZv4}sw9BhE~* z!nj1xqZA5U>QR|_e0=tk;sV^$WBy%6g3~m?)DlbG{^ayD9sz-x z934Hq#AuW7RCh;52fn7`O}x(^h2FyFiGtewZDd+Fx~D#E$1 z6v&D#0ujkE3V|v-J3BiJqUBHcnw^bIET>sdro6;pW8)kEyAmQ1lK%$Aw|N3~OOumz z_P2k9jWsmrj#fK4*9qL3nM7^igUu1oVG;@Ku&8K#wlgIlAfPu6fZy8YrrBWe9KhQu zvms7`50K}0d3erWxL~NS|88KqEq-BnnK>=y))Vh=x%?K9{?2_Ju``!R1#%v_r)U`_ zQ$qxW8YUr!u^vw>H-ky>D~>FMk&mXM)fEuD)^L}G%CxG>=fipcB?t-%28$vPNNU8$ zr9jqTJ@O&zRartpf&>pE<062fLx4fW?vKoMhDQO|Ls*Ia1^0?gBXfh@AaG9X1I)zU z)`|z*9$XTV1L0^!5?()!u!socc@`R)833j!B0({HPK7Z{e0-1l6wg<}vNG>X{+x98 zS?~?+`Am!RT{^8mB`g(S&L0eNy zCIfr6V%aU2AaOAT`^E3;?SlDj%v$s&t-mfpK(J)6q#!_e&MgZOuQaNfJ5o*M z*3`Ix8euL&V9ayp4((iWax&bF{qpbt$o9ZsJobjhHkes&t#{d~1xH0i&53y>CXO8* z>_ynZDvcLLM?pckOv0C$Dx1JJ0Az+^$Pb$)yg0CT%Ixl4uU%#0rY74GF#rPdf(=2D zpFZ&b=P*`cQP4gferJkk>pM#uRBn-@V&)?EsZi>; z{p7@L1fqMy%Netxs%j7Lwrrw6u2fVRbsn7@Xl~M9@;naL6E{ib40Sf(8IfGOHo3T% z5tyT^EzB6VEfeD+dDSq(cptQzp=Qu2>PLACTAQnP{0!5yo9Jv zra%?R2)WMsaa8~ngq7PdJ7r_;CY{MIqC-&t|3c(2al670A6)Uaj95N zX_jYwJTb?CiU0R^-O4eOjfLXc2l?L(47#1OmA(xs&8XpmwM%|SRXArf7_ai_p34WblGWG1N@*qvrO-U8MD7y^$En_B9Q%kP}% z0F63#x39s(vA3U2E-eug6X#C1eEW9s@@3^^|2m^ia=Xe)R`4;y*wpf}`S#+og@~;# zlisX|xiPZYsS4-KELXdKA{JvpIyn*%vNS?KBH?nU_Y)R&ZKOP~um7oAfad$wmh?^K z>{lh(T>vx0pXf0-%ym(5a42C)f*|vBgcv*U%CTCJsXHuo9wFo)w>5Lde$$SuM0&7t{H#gZL1fe{o9vx^ zOS$EhQM&w77zg2SyLkBF{ogVO#K;-XUAc0zkhQbtreeHH+780_&Xc?;D$W_d#DwL! zz(*B|c@~kxi|Dx3a1M=WH^+W{b4m9#2In#|2d8?Tekjy%E<4sXqzJ38<|Q)y9`7kn zcS51FnAnuXmsqQjzZE)bap7+Y&t#x-$^grZ#RfSFWKzs(;&D1Z000O?(q9+Pf180O zf_)GOlY0Pij&r=*59D|L@ox&iOi7975y!dt*eLX0c)*C3sr~oT(80mM(&D@cj+}>N zEha@8k2?ONENmow!t*6+1Q>A{dTFN0VO^+O4-z7mC%JZh(fOJ9W^SJ~r^V=Snj}7`P;ghE(rj0@^;m(0o;;E2@l$+PRaNDB`^m?1J9ynYL3!bQ3CD;9 z((Y{q%ioIwT%(Q@w6j|-2@n2<%*p2VSEF$R;sHcCI|#>!DFZYxG&D3e^V1w?p%ZR? zN1HR|E;=b5UZmF3de1Y)rI6<&yw>T(z2N9{sg*xE(%jDa3=Jqr1})dIo4G3S*tFfZ zCwzZQPEryR5i!gGP?%%>5NCHgM%3i9?{{8?lq!_hK zm?)PP7I^iWaCR7;)=<4E3Km&6%5{W99L7vXC!c@Vkyc`7TwEwp^e&*ar6s5IGR(I! zu{ZlG9}>Fp_u%CPO*pb|jaK(FtXX0KSJkceC~Q}HG0AV6535Em&(5kUE2pPY$(yAL znQ-$uEZ<<$dkM&|uI>oJzPW$`8R|ojCLZ0YL1}(O75_I6*&Iz>q&=t z8u2xvAtdaEZH&y!ZH(o#~40OzcKia`-^Zi+|)Lu~+bDJc6Oi*S%_(u#`} z9gz3X$yXALV`N608_~-U=*J_E&w#e6iKTwJt=fcM;#eAAL`Gxn&QS0C+TAR8%>4{2 zilmSxuUzTKQDY~-qM@RqBM2A01R@vg%!N@6LC_-Z+)=FoE66D;|DFtBp+|LDZ{>m{oGjz9GZhtKNGf)(#@`*v zJPL@$>7^wms@pZLDJH*$ZQ9+yCu^Jl)^LD>3nzOkqc0RCH#eoE(D?>JAT*>mhPHrfQ`4iOR8`EV9@#Do97J+ij=H{k;Rf^X) zNik_#tPy^?ELw&r_A3hk!154?QhR1|@lbE^*VMQ;I5+s6i?!<9Jq{MM26&9_xCag0 zI7y_^caK~c?O$_8T|aS;YVU=DsdVheZNNt+QPo(gC%- zK%-AdDAAV5@btM1O|VF>`8zn@dF!;|HjJ-A&2yTDhWBg!zt$^Q9^7bEiG*PvFN&2zOZn+G>)4k%Bwpoc&S zrseq)lWu_;6y*bAV;{lschfR6X`L9bH_1^!m)=@kRRws-asCf35&@>=wre{=2ckL= zR9RMNCL=<dza2 z!sa^$1l|eWkYia~T51N~lMx;n38ZS@o!s1zW{$(8Pfi}ei23(+lzZ07tEysa)5p7I z1?{Y?z__0hb_Tggd(LBJfN>LYMP8)grNTc;Azy{ZF8g^j&^WL*z?J;B8j zlmt;SsEqFd770{N?GzQmcNfn(xj>p25%byxh^e8Wp{t{lY3#{!y-It#6KWS1F5mcGtzcv{3p6E5=F`%mZBdEYkUg;S9?pMW3b%~Uup28i zB>weJ)&ejp)1!}H`eg98ixPH0bh=J*8%85nN-J@N3NOssgH%P zc!;j(<$u*7y6~0vH>DJ!{`blD%6h9QT+mlBMmXb++%e?vi88WUh(Jg+N{R&?H8fcL z{_sBNS;Td(>Pf;2Gp;dl+`8{mBbvitADaz5J+Xsl;mjm#&+QCwO`={N>A^q`8@?A% zX#MU6qSW^o)1m=p9s0iZxXN|vdzLiO*+jLaj)N~XmtudDsUna|2>9k{GQ{gzQ`FGu zpf$>ErT{A{^5GObK_EbaGIDaS`!t@-+)iDA1zj)@gpi&+qm4E8=Hh@2WW~UCi-WElW(B#& zkB7k!2W)q!9OG%OX)Xn-nwlEqtF*MVIIllJWV3he`+;wEwuz%-IZsaEcX=y-grHU~ z*vT0`z-Jt8N><*}FuJh@6BwUF}K=P2OX%(N-3ZO!*eZXMMC zW-;kbZv`xA7hr7bRhZc>uqV~J4!Thj&PdE{LsZOlBQtYxSEx&cNwZx51xruwxTU@J7&9UdagNeN`)sc+ldH*ek*QDch)qwh=*v#`la zliSa@F~ZB9UBH57EtOIG`5rnBjuJp3B5S82SZ77C^4%;3Mn=ZEF3&S(!z(z=haJH& z@@-YNk%9^!bFij-5VM?!51=VasFkGZ>grOt0eK$z_@>AkpsUgk@Kssc*ytPaz>*;0 zcm6X&ex^-2FOaI(6P~)xh^|4}$`e_vW$Z;UdV1NAR6LK8Kx;N-FB|x|s5(-r-i|rV z#6ygjbB{Wnmnu!CBUFy$3&#ONs74rF5vt2?Q~vMq_?sdjClE^HQ;ML~;@GMrX@bjR zJNqgkA}uvdrl*1g9x*P1^5gvdv3$md4h6=}USYpCH{;p#{r4k^HJ`Mo-A!>S&U&Ae zBm|&7D44ILv?O}dfIPPHdrON{$`MI_tA%Y^@w{AAN{TL&Pem}`{=V7w=i*VtE}{Xh zhSsFW;{DBEzqsue-z+;kRWgvqx=oIH^1vTmg`5GR8n5ei!7~4ZK5a6Qu+eb&d!Ai- z28M@SVVRr?NhNXrDRFU;(%mxjT2t;*bc_89;=^0vVzZB_v+mWfCYUQxQc>A0e*Xo6 z7KKRQtDAXG$mr~HSa z%mxbu6Z1~b%^`Qac(bZXO|4MWavcvlJGxemiwQOjLyGfGwtE&=R3E*O#zQjwih(%0d_wD=^y1{UFY#hyV?%5Xxxszy>eaU= zH(re_CPE4E{(U=q5@5I%f;&I4Qyy?s3caiRFP2f(a{OG6S%wMwf;2-cWzT1cdpq%U zelP(NCMmP(&?=xC!moVtWT&E*@5LH`9OV4Oo$Glk>r?|gz=vele@>n#A3<(AH?LQ)qfBZ zoi~PUD4WwD4%W1ErHW%)SXj`dE=y0_&UC52C@d_**K(DA=MT=xhmk4F+yVllV`Ex7 zy_GjpFkF`_W&{qmbva}e6?>oS|m-sxOl_`1c;K za{zF*HZ~8F#p?16M&DIG?_to#JxR03R2yLA#5mEp5vjd0T7%T8hmf{SaE2;2n{A!y zh^58s&GS$z=XJ(jzozh-ui^K;*%QzS4axoh`%v$pr*QA6sHi|pm-_R^w4g&}A>xSq z@U#+nf$F32`N+O%_L)zvSBJK5a(6x43*-kSahB@5Fghd79|_}qWQLb0Iv%6yIH*<2 z%ZDE^SZKLy!_j{omn04IDHJTTpP?q^1lY`$0-W1DahIA(Ug{-H_yUTpcN`sq`+7aDpNv?#)Hlh*i~0Qj&+?#r5uh@Y@3PoFf)p@b6V z5Xw87yZj0rvu~eIV;yZD8ARa5J@qU(LFt5S|EB^d z6z_{R8Sng=6P?ZK>KV{9fo5e)Y#?IAseQ4aiz1 z;MAP#DT34DUIs`w$l_k*KS|Ol8-ZJ^|KjB62(~L#2mk8Ljg9>S;o&ysn>XJ=kp}dl z)I5YL&S}mCe=Q3GLwCG_y88V1`0Actd3=0oe~#My2WJXypTd<1kmz?~yk!LsdOm-q zVqyxaMA4Q%w%;K(b^a2gpGeNOMW+fyRK@|&n(Q(6JlfES2~N_XPE>f0Vfhd^f84&cEdrx<|RR7$)w)vY^(}8CMMEF z-isWmd1e~J{Xc&QDmUdzsn^IdC-)#Z`3{$D>) zj`>p=+UX7~*I){9zWSk(GsN6^_K51XhpG?ebF^cAg4|x1@|yRI@2!u5rY>pDMGGy! zTPi<%3-P}+jo=wU0)rW&!Lf9TS&@~j;R z&8~WF`+D$!E)hQtQs=;vC_s2hlOLab$(amrdqYUk7za=ysNGsi}t>!*Y}Y3lu`m>240t%VdZt|BRNROF|~5)=rdr5$T} z`uhC8K4F8P4*SY&-0Q;Rj~@h!(VACnl$ZK)+)Ku+1Iq4cMEHuGISIPpYqcCwPk?eV zunyn?NS;t#z3)rv{_^EZG6krAoE&+2105&khp3WLt{yrd5pbWXY=Hbz}mMoP^dL7lO#u1?`BFhS!| z@Jd0ypX57gsW)U)Jq$J@=BVAVU@3)SE(vz*m)Tw8sj(A!l^Ulo2pKQ7t`Iq)=7Rs= zdzV&`wQi1K;uGuT8YhT}pv7@>bA#mxsmOpd9x)fsV<#ak&BVmS?{T>Q$q)N$Ac3y! zjXaL3Q7)*?B8{Er?LcZ>>`d|XbJF*u*EJOfw_=G==j6v17-8b zz|HyS@F0X#kXdqhs4RcN4c*vE+N*C~vUC-?jeN11=>Zx(Qr7E8x?A`lybLOj%0+Y! zuALe?dzO@M)o|NB?Tv_@VhwFY-lPR+M_|Ena@qlpU4qA<)4EFJaVmf%=>|0qa44zk zjzA?Q%i`?6m&+T}phF(S`@mqd)@Tg_I$Bz}E-ET2FkDhkYm}Lf6g+95WHoHN!NtWj zCD;gY(AL_z&~KAN{@!Npzt`c5m9+}v?^xZ_(9b1&HD?@Zn;&~iJuG$%J)O&!Ik%Fn zh>yCEKtn5c9~gc>aPZ+~zm`d5A<`oRn=WzA1RV+u>9rAF$;-Vzf{3`3!Rh$N>e#)s zv=kJKR${=#TB$rn7{3LkAm9RFfR?4i#pgzAs#5j+Cmu{2M9R4>%+6+t$WqGF0Bh{+ zr++rVHIFAQks0+?pwxS=FglNcy;+w$wsW($sSeoc&HFy(m6a!M#Si-tsK`JGG6ul^ zXb8_kaqrE6N88xgU>3oPFqkwdH$b~r$$K1HH~+Y41T!msB+ja^uNrDVegOd(=#{?~ zOmrXeycY;tby>0uMHrx1SJfJCzu!h?cx7y}FRQGO-uH6)$IYybe%QRi_4 z-RIB|u{cyFTd}6~Z4qAOz_t=fA*!co#|+P{@$u5)bl~ff>3CS58V)14#KcT+On{YBTLMn}4 zPN;2mB8Vbx8^L={-s+D9X(U9{!FzhZ2ORA4_~tP_f;_KsSXtaFUD=#{Q%gAMueymW zU)yB@M&`)fRdP8+wsA=B!EsrEm>#@}2P=*?%;ZorM@}4MHz>zUNqGFxg0Yj{ED<50 zGc;^r+`uBjYY_fmE*+hH5UGySFw2;pLg|w@s01x_@}M!U)Eylno7*yE6Q|4nxL%IE zeeYU38~_|b%N$84ib)6#t>5J2BmmHZDHe~Xz>3YynSIZn`gV>hD6&Dm@1U3a5d@o} z!oo`zlK<0xrjJ99eXzgJ#?Bsb3B3FA*VrDUFEH%`xx0GxDxd*q41(4VBk02E?9BhK zgxa9ZOYCFvb}O_qjDZ`UN&V--!cg(kZkLrB2N(^!_T}*K5Iiib)_9v-6K_*qy zZicK*CHA`OoA`8c(vSn;)^8C01Ctc6|+b!TWw0q<5Y0s9x*RPDcI^>W4v zXi1t5^ey=^BqEuohk=D92x8KuK`=HjO_AO<>VQ!9CrMIW{oDOutp2$iHPjot!ca)D zHbRT&(|I1&{g!S|${fsr^6+hGsk_P1hgU&{jV8 z^QRuR7KB=49|DiU_{>b1$B{c#d}yc@dnif@mCA7f6psoI|2E|9jk1~h`t|D=X(qqr zscRw_+B&_({#F?9S{G&w>F^gz6Nbp_r|qHpiw;?f<80_KUrLN+QUzgRyouUGix zbMBLmgn z=0Ab3MOee2nJC}}>(<-b{P8@a2Y9DD&u_6We_*H};a@An2>g!Tm5Ik?@F zejGgm1H#MC05heQkUNt;jTlCzjBDc8H6JFG7#rZ-kLuY0sWpjL9~L^eL#ADgcLh*E zm}Ydjb3&>z-BN`ZAD6GTdmI+=SpAisgifqo;iSYMV~%_{Nn*JzlT%Y1(Ei`>!o}H{ znwlD3H$!^jm)9`;PXj!duFR2D@loP>%j=8N)6_3mEB`sfQ?FQ7wAv*N*x2sc+T%zm zM)4`|Hs~e=6uI8A(^KFN)<@{?*aRO6yaVVYM9N)&7q|51z3}mwAFUMR;o%`8O9J_y zO;cJzLLnXGl2a3Vm;kKw)UCw<-SXAVg?D{YA}yfBMxi_! z=SN}b+QB=O92~e@e}C9)B05)U+}kv3ab-iGi@stf6lH4Qg2EfCA79?}A7v91E4Q}j z%=S7-Nwu8C;2qd$$jJ3jGIJS8W>Fjmqe?6L*DF1>*$BHtA+kfZI zOt;?CaLU}<(y8LFODAq4ITix3s^xDZyBNRbp8UwDaO;PO`io3>?M6GF)D)tC|0Hu2 zLGg_WGBnc$i;4P%^eG?$rP)@%%+&LgXloU7mq|9@>b{O_VQT>M{e;5#mKTaJL~klc&^ z24dBvNE{aqu)CqpeU_eq@c(P;tK*{DzPCpZ&`}fx0}w$_a6lR)l)9n_3?V2DgHqCn z(lLaJs7S|%q%a~O9g?DmG$J7#QUcQ5zja3Mz3+WL-}8qboSAdZ-uvvm*ILhd)^ob~ z(fL~phKzfp8(%6}V$O?%>_PQnoeWo*|KWK(=fY-Y`ZE*>hpSH)wk4jtX4VoAa%8Tn#Qv{xvPN=@n`2e?%c=fN~2VBK=Zx)c=+4fIwB`n>U6g7&6h6^ zyc8Ckiq;mK6&a{cr=P9@U(o|L9#24obk~*Izl12^FBKMkgi0uIWT@$oxVSA@Om%br zw}h91X3Bsq)zSrFzBbdH?3K6CCdNCqz5AIe8KESw%$! zGGY+00>#Bwm|rxY67fVKAmO4-*U4AF)SRTx% z)@j#e3^<1YRv&L53Sr3$YDUBx*TWZR-P@E zN#5j#5EvNPjOqTB<_9}ui^acnMf|0qp#e@&@+Rl#Xnmw&ppIF#gzM6EAcFJol-rNi zfxiL%ym9duL|xPe4_*eQ>;*9v7U_z5vmIdy_JScuECPiaV_9riN%nU4$q_aI0fDec z$k(cIs9Ufm@j<0&j0*q#_UE@=A19CjRCkR)cHhyfCmIt4i9DSByj@;`rVS~5o+kecgRoG6Uv#H_{ucp`L{tsk+-E?7uM(vlV2f7_yh=YiJbWMY z2a>mdwhlI>Lr5Kp;mxPl0Kw!MO-y86j{7FQ$ndJ#Pzlj#u;~@WfU)}8-tI73HzgI~ zzA^CzXnNRmD+7aA$Pi4t1mnyMP$cF}a&r4Qb;CQ1v6q~b_0^5Ic!t|w-sO$t)7nFu zW}40*T!+kNi!jJ~QSmuWd3T$jlkRUua+|zFTe=kOE4qx5*$Bs(9h19ObU8>uPJeER59dA*ZzJExJL^D{C>UH$4SMsVysBFU&Bf0^fh-X5W9-l2 z!B=X-Gg(e-X=uAL0Zy#2j@0R{9JFlX5YnK<<98u4993{$XM}@>CSHnASm@;Jyaq@Q z7boXPX2G~=)>!v$5nu^ZnZcDfOGfv4t;WV?x)kAHADDL4U^Ff6I!t#vfSK;*2a3Y{ z!h%3qs8A3w)P1iR)Rdx7HmAgp$$06pC~Cn zQEic9H}nxmcTIaB@N%fSo(Zx{S26>S}5s!NEu9=#qFkv>IW7BUN8%PE=oU zY%$#N$%_%$FQelys+O!2)4V-OnJ)?LUv8cxtW$IqV)wLh>CT`UXL(k2dO z6!xpxp5t?#8w7kqm%aiP8y=7Es_EXOi+VrbK3{#@V3phG*QT%rGPwvPT&N&O{=~PK zpvxgm2Q-~s9S61t6Aw4n)`V>P^)`aCv$N~=g6jf^(WIWaxj7{J0dRs6h=hcMQ8G6am z6UY~&G}mQsx;Dinph1~7$4*044o(&}qkxO$I(!9l#kTZD1DV+j3_YJ3d-eBtVF708 zcwHL5w$zW@y zz)Z0m3c{o?omM7C@pYaGJ3$wx#qhrC{=?Gz%G)S&G64z{Y&B2Fje*-1kQY<#c|_(b z<5cuvE!zQ+M1NdfE6umPq$0IbEb1_m9!%!}tiCa}J(_$l*wI6t7BvWL_j^bSVlyYfCO%$?03qg9J?K!fZ7NP&sFeX5c~4P1Tpa9~LnHJK zW!TlGc#w5^D^celt2Ve9s&Ys_I;6|0SgNhh*TTtO#YnreI3E$lU*PF(IEuw7!P%;p zNFcoY-9soE7ZeRhJdkkrqc6!(vZqm1-W%88;?kF>fB(+{xo93pFpTVxK9%y{um0C{ zpdqOC8WM{VMSOXyfu}A}*A~-14gK*US%l+Oa`-^xHBM zM9F}LlK(n9rTIzrvyD=W?HVRO<#DAp)=YkBAph+hOUoW002q|Hmd#f)!uECQgEU)} z$LHVusG-{;b{tL)ni*Z2AC`igtN=wsEJhXm=+PrcpSq8RYl15i3s~Qs<6k@H(gjHU z+ao|Baz@<_1QKB0`S3KJzfdQpX1PmR*_Y&v%J25s{0grcdewLbKjn#$0 z%0s%@k=_N7Ko*21kd_02ARuOI>+5eBW!V3>pMfWkwZ9U(&#wIHdIQ;As6wnE_Y##R z##a-_)}y1MIsxZ^7&FGk?EiHX&P_OipP+tC!OTFsN3Eu=&H_kQAYc#%tPmavi%!{Z>@8222frsezCiGJ&mx+B0%;a)be)cxc}~zBf`<=@r({cw>OSrCb)09(13$ zpvAJx%zp>!)gCX{0~9`Ktf2IeZmco-%_7G1-OFr?ULR~Fka&P}E%%=WUnG#tcUxv6 z)wtqpK&60imKcWLSvLc>4rA4wXAUw5YFb5h32A9lb>ZLf!3&_3^5yeq!WtsN3$&+J z0-3GtEVP7cG%qi&*cg4VLyzzanym01L~#sarSJwG6M!=-6FX$x-ao8FM`;NgwHHwk zd?1exT|AnL>NZEQETC8p34O!L=+E3up#x*U?D`W)Lz>GeG;6zE^?=%3Acx9$`}z6N zfppGkX6AuRoD`=1HE#nnNe!w!m>lP^3XHK(^qZVDe%zz$yG z{suj``PYygcX@e#$LH>e;>iL0Yz#yH0F@uLB zj$oXWvroroXPN2gC#KvaY|r9>0|hyPC62|F6{s);LfW0HS08P)o9+etIcx!PNYn=E zL9wy#2B-`2^Mwmo#ck=+-meqL^4ZnCRuDgQLI_RG-8BaE=k^y+tR4t8dt$RM%T+3K znQOMc z;F|O5uMzj*K42EHt`k;%Sc^JBH+g zz`ee_iR>%jEJmprsJgpOYnntZDw&~yz63&AsbvnkIVSb)?(Q!|Y6Bh=3DdG*HM`?9O}Y1JVO&;s>v17`YWm5#HZ6Z0%a(_77{T&fnZma;9sFZ&P-t z&() zNfS}hY?ZlnfA5ty@%)ol_kO3{t8_Y;KWRV8%aH-~ zF=Tqwe-RK%mQm8ME{<~DN3xr#6;WWw{0#Ju_qp0VNJ9y0XSJBy^|xRh^-%FKlqlB2 zs2|e9b#y5rK`9q9Yo01}@(<23o}#q5+aCv(Oo+2r^B^ji3NfPIHzvT!9 zgNzB7uiQ1p!L-PeRYlb25DN$6#H8K)dI2ri%(Qzx@t3_Mj+69pzUe1+(jO?!`Vy#J zkLQ0a_Qzt`Ml6NNBDH9Zdw#dZgAHurKW!5RAK;%2;pTt8BGClS3Q_$Ym*s#rXGtpr zv1+87Ns~^7KjRtT33%Ox$;W&4v>_jVm_Ei(YR|q)vZq1!X~Snff_$80SGxW%fY;am zjM?~}6;g7JP(xx4LGLvBZ>YsokCoeG6g z4=5Aj2oI;CfABbtmK7B8{#;zgw^pg1uCueVcWHv8kQcC>VRfhQQ92fUJ|!iIdp~3W zr7|!$T|zD0d1k@ub=2!lv`=k7$q$g6N$~ac-Lm!~_;e>9y)F$d(6*BKo$t*DUOao& z11TO@;Ys0*BzVbXO>N0aa7bv za}#xSbzob7i1Non!BnR{pZcspgSGi;7C}!5IN!*Dq;!ud2;*jB`rBJOg>8EMV#a;TZU&TR>QO zKwJl$LC~V@yr!n6wkgC6Y^XL+|s0-@H1oW}9VZ>?z` zLwjE=QpRnfj6tN_)il006 zz}1c(Jqo%3h_p0P&mGE6|H=|FX3$AB@2xHObuN=GHOdqFP(mvQyV)<=&m1+s^Xbzk zNZj#q*8q0V`JbnPH9^KEC4*LRx0w!#hzvIW}=lV)hot$8`$sgTIXabV`yy#w~J{C2*- zFQ5?vb|7D9q(WQ2E{;q?P~{8YQD@m62OE^e3w~7QN{YUo9uKZOy*$}?Z(F~EiAi$7 z>bpg?3OmU5_-^C#`QYUR5Z9N&qt9W0Ar=hAs4g%(ZGEE>2sw%N_FL---k_pT`5pkI z% zsrLpe15X%0>HOxKVL8u(=PUYuey4&!OBEp~q$Y$xsvJV!=N=l60!5?IVfGL}ZoF&K zgSsamx3<4~w(^|{Wa{qza?m+pYrC44M?G0j&A4-}MQtb5=n(UZnYM}kVv(oIEsn=e z%*IIVCW8iaXQ@jrs3q>DlstN(u&wF~eH!`r)CcYvBdrG`!Dr5n+@kU`UC#Y_f=qG4a{`9%t$oO}UpI^)EJ1>Ig z2mURR){6-ixK_>uL9(k&j{BONoYT**l}$~` z9tw$$Vh~Y)2&EHf*CZ`gSak_3>&LvW;)rvHs&Z>v+sxfCS<+My&!``3RAhE*Q1QTU zl!K_D2Jq0@_+QIWRej+N{g`dUFC%jWNIIbV_Sdzsk+XO2FQQE%Iy(mb!(PccJj-g7t>6Qd(zyF0UK1=h2y>Y$*_&~_#r&sV3H z!(*?OP%4!Bk&;857^c+nwiQ@{-HI8KyY8_YGS#MH_wIzgYAz}k7qRTfbFz)&tGVu_iIQILx2>(&Uye=jQH$KAwGR z?$XS3u5oR{vlC7cVOv>Fvy=Qrlmm7YwGiFfe8y}V1+H6nEo9SOwBGpn_RZ<=k8?xh z;W);bfePhK6>^6ndy~i#H(g(Avw?Dnj!c|YmbIjvp}s@f%Hm4Nj73HmIXQDzp>4*> zZ#`^>pQ=ffjxCi51feu=Fcc47iePj}y404T%ORE!ne#B-ZPuF0XerY|St;P;<+Y*O z&@Ul~a5FV%pF8s@X!f>by997r-4W=I<31SAb{PKyGtxTCFL3Qyyd;RPOhm^ zSy`W1qpJ)EU=u*M`>Wld+eQSROEY#UHy$Zr9btNi6JDhNLVr=>SrrIipTHf|$)D`| z^~<)+MJYYr!4%CyJS9duuLMv^ddfpCa}KdB)L?qUy9cCXpFUenRU#_)V7L>rtNS}oPApnRUlW`?Se1Dg^{feL zHoe$4-0b_|9g$bhuprKSm&Gruvaozt5?dzomT>GMKOr0Iovm#w`ry2MOX7sl7fSI< zg=(P}K%YBRfMS&5^TeV?^?NDIR6Ns8t_U*;{_h}EH9qa3*w#`3KGlFh@S>WYw)3$iV z@YvJ0bc$!cB`0O*?)rgCeLM`4Ra8TJbYb%0e*8DY+CV%5GVez?4o~XZj|GM2TYq|t zm-o}#43ib)va%A`zQj;h>p!9Wn0jh;`FlbRTA+Sm)OKa!{w}rVR|UwMsK1cy6T6;V+gRahR1&yrvL;{5dEv*0PF=(LK8@SzXh4rOke zLY5sjUzQ~+;xu)OQc}(VE2=QpGLfk@P4fidTm4jQvt+c}r)mRAb9HjE(>5(1Z%E!m ze>?SZQSV1$qQw32@}I-Q=i=IPOrCu?z1UOg^0w*l1q#>Ps_ZjME( zJh)bOa`AuT0d` ztg5;&KVMHt?RtF@HqO>4eqTX>i?AVOoKaVG6qADfsm)5z;@)c%3=BREADxvyxloGD z?HX?u8^<>RJq?2y%G&Ft>t5F%MFr)*h;I&(u|EvI4GK_DnLWIoOuNOwph%li>%*;o zO)qXC-%vXll1mGShu{2<>vpgs0+Ujtle#rC_^nb&M6WD8ijQEwE%7NNBm{(^p~wc1 zF!|#UPataDzvCD|Efpfmr5(4o^DBFX8}&Ykt|kaAJaKsig^>2)95PgVHoVA&ATwtIAYC`<(sDc2Mz!#-`A0$3qcI@L`)Wm2S6J5cf>}Yenvg$SU8KY@W&`-M0F+zHD=^lJD!*ZmiRroSMgf z?j9V1XTN`kHuKH?Q=4);8=?XNAvk7~w1*;KD+>$%;8F*UzEBe4YzDH6+{oe*c#8N;mol?0OrYC78aA> zU!HAHsG;_IY(>*`A?KbkZ3}A8Z@=D(sj`Y#Iisa5b-M4OeKo-I5AaGuqAd1nBQ?#R z!i8jHWvgBaTOx99ZD|lJB2-L<5Rf@q{u)Hh8h!QR3oBLzmcQe1wkV-qEX>H*n@-~e zwy$xxHIu^leT)OVC4u^@-9Ar*aVcUxAcKlACxc;3N;*rpYB~aoDw^xkC0#Nr5SxV; z2_3pyL-{}Y&R|1~2le~ZG`*ah+r15N)%@4Q$G2{HsvcP>feLV0Ik~h*Y|Zmi5h%O+ z^23mB=HkC7D7d&+$k?~WG&q1Q`U&N}KeKmgANxgW_koa9#a;a3Vs0i*oo1;Hy1BDG zi^uC+MS~js*PWcd*xA!aScSB{&`+$4yu7@P&AX-DLKRQmkJTckAE$s{1`{{#R`6et znS&`0OrkI$Z!A^=;Vqz0X^nK}YkeRsFR{DNKWn~Ea=mP1WCX+*o^Yx?frw=%-A9PT zf<@i!Zk=m%O-lUE!wa&<&m9=?^dns& z2!kX>kb{6Efso^We5B<-{IECHgY@->44FT-CxJxBXQ{!Y6vltk9i$)s7Z^f5QVl@T zSa6UsD99iGUPc>m{L*;mwC2_$7t4F7w#sr!eg6D<`Aq_#ne|+b0j}I1t2OBlVK+|z z)Px+9Dp#86PQ5yE^!6}g>)3q5t$BdeuCJGfL?VtJ9PzR6$SeJKyJ+AS<^v*5-_N+X z*f&N@!F$8jr?v7}-veJtL0*3U04wSD9dQ^@#%MbD<;Ut(Y?a7D4$N*CQWNE$%e~&i zUbYc_0IFPqddX`c7f5G(s1y8SY|q0fbDME#<~~qro1UKDs8Yr{{JrF${L}7NAt9`6 zh&6!2u#w;caAEdz>=T$wBIe!#*OX{Mh2iY%hGg}swu?yj;#=mUA;w7|7Y0j6MH?T` zg2)TKXvyE)iqPEBaggzT$u0_%2hiND7>GUDcP|N>JCU%To@3|rStgzLQZ7;H zD@*K5G0)yfQKCIhzh^WfFF%^?*Ou|SKIU}Q0aA7j;LOivXywf9$&KN|)0?JW@R^~c z*Ju&hM$KRtxP4GG3PX#-!g5U1Q8^%U?vh9#RfN%j4|* z{CwNdv)BI&Y!|_&^fjSH`ASk6{%3pg*qi+iLMVFtR3gt=S?#_tayuOK=aH!iQfM~s z=5$SMEFN+Sj9J$GF&Jo0cDr~y^Xvi7rn>%xIe1xBk>_w~Jmxu@VjCB0Fq<5Hgg4RD7g=NGeU&voomVNh^ Hq0j#TbUD&H diff --git a/doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png b/doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png deleted file mode 100644 index f1721b32687b4f16b13085deeec172e2f37e76cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 47585 zcmc$`1yq!4+cpfg)UI&8D{ReMp5fBiNDk{ip5fB^? z0sqF%90Tv@2cy%$4-wa!cU;Y#9(dVXTDcP3wsf>~zUOLbammc<(tTIg2NHaI5A5$b zy1F^o^SU^>Ih$KrIXPQf@;X|&N?7aA6A&Ep@rK@U{dEihfoDRoj;i(`|34KzYzOxO zwZmii6^S_}zrj_diV{!IkW>f;7b`+7nxc9Lv2$h43J+u+L!1iS_eKVX*=JiL9zZQh zsk)g1@>F;e8ZVs>g{&E?@M{K~lPMa!?EkF+>Sam#)hdOA@%kLGy z!ZMx?T)9iv*2JGLcY4wK#uD4+T=#+XtX1e2o`}j@oC$<&QdckQBO^ZrDCLR2!?eD0 zL$8r@28(INwGEA4Y|qEMvMLm-TN`;wmq$=2{$cW`dUf0x=`#%*-Eksaqg-2^xnB$+ za|&t5p1$bPmVs-%$Mx+-`5VWF$xib_^sEw#Wc`us&mY4d zd|Q;8-YRl&_Fkj)UM7{5N_4z}Og16?ka!!q-h$)EIn`)Jd>oyAXGbZ({Jug8upGX*Ia>Y#2e zeIy`wM4%{t1L`?2UqPyIciL}8b?w6MH*_ORbH!vFmQ90kf zsrG}GIYbvep*G5Q5i~F?SyWu;}TZNwI?+~Bfmgv~;D3on)aBL?9dyC+a zD|6sw{0kC(R6xZ>p)y}(vt63;0hTYQmFUVb*jCfAf@?3&o|TJy`;1!v8kF=p!L3{u zYtsstfV4GMTeB{tUB1SE010k!OGp9qT?il>{?Ve1Vld0em?SmuOnHu zqr8bLWIPZIhG{V^r@&bn&OqK(w&hw>D4c@g*;VicX4gbqTwE;n-bs^?K`|1M_a<{K~4w@-sZlr0hBRW;*Y@g19s3KQDs?e<>5Mh%zjFB&nfBjx^klRis1e7ycu zgL^&Y<#&T(wKHB1wa%n11aOOW<=^b*NcXRK`{qvKWu$ET-Q0tT&4c>(#DJ}UgQbIN zjeFg+NVS7`>kOs>qjsY6-C@5?_6p-ri#PV6GT4}=SN?N`W!*L;+dI7hetG%k=)J`~ zyWZ$#woLfPjjFq)YrEw#lnHik+#zu0{>(u~Vx_WDM)Ml@r`op`74YUqyMtAUv8%ox z4OZKn7FTuxYA3Pk1zYv>9rDb{TFW22R@qUW21y6*G$RvYi^=<$?EiS?~M?+fy#1KkJ9fWb;I&*HL1VSGJuFHV~@|h9?7jJa zz%GO##l)Yb3yrf5iqGR92*IN)-`*Wb;VZf#bc~jNC z_g=axQxv(sb_jlc7W>*5PG&wU9lk2Cejaz<-ut^}mv2sI?WSuK?ff+8n~tl!d|jo6 zhz9xEKyfDKepcy2pY|3D&hJCJw<*R}Oa`um7L_m83pLTIh$u*GqvfY_C6Xbf_RCBtvC!>zaZjJ)^D&=n>l{iO6V7tWOTE}lEzmD2jMyIW_4YL1xn4|- z>xb-ti%R;ygv{X0_YjxN@^Kf&lf-A-z?SRdeR(#h+Z-YBB{+Db&}Euwe|P$!c4`_; zu-L)pDc@->uzqY2^;x?V?=dy^SCq#*+dHP1c7J@Y!e*tHmbS^D%c}O*`K$ur5-*!L zcRyxYC)t2q9>B@8FxTVvGt0!`8~JEIX>sjOiSmb`vXx7@Wiq^HZR9$m>v-OOhku5S zEJI%(BZ=ZS&IahCJ1Pz>Q{{k-wFTtfL%g{KGXQ&)ViVh1wcK;H%aS{`M>~79O)Oc+ z`2M%kZ9JPFTD{xB7t6|~wu`abXE>tY`ta%x@1Ab`B8zI%E5|$8b=%A5pmbk)w&DlFJa_9xB_ZSZ)6!hyIlS z*2G|w^!?OG2x({Znm(3oVL$JnJ3E;~IsXXUDtmkOslC)K*N#2jTpZdCl-=pK*?Uvq zt43dy-f%}lrmCgcU-@Rs!ESN@yd+LZoIG!sK4TFZW32Db6$=rT5RMs>bo9EwvINDz z+fnK?gCL|nhGSIS;h*>+st?&%*zRqUM8#lhV#34!F|kZ|NNDaop5%>*Pd?bS-U?Da z3gzh>Uo;ALi~W4=_|3w6rJ%ae!yahrYpz1frt>48lw*?k_zo6AsFUk7E{D{Z*XCwF?lVk78<6#6%-D9(gCVWdRGo?c>YM!Y64s`ADK3 zUH0GFVrO~u4=WoXq>lfsnAhP(2Q=XrtT_Y{^B)KBLyeDur5WN8oSA{4l!kM;wn6qX zAwIg|PDu?wPDOzLM#LWo-=Cm2iBT9UPT-?mZ54syxXhE#18!n-xhLR?mx)QEdX*{! zQ5s(7Wc7b8t1A%T=n0k%Q?;)=wxXPe>=i<9=}ss4p|?Bt$oJ~}D74C>$JZAE1?Rr8 zn!ZTpz7&st_8`SZGKdI8XhSk!A%RT(TUWXH1g8X71ox?V?>4@B!;C+c>({ZI5Wr}% zcLTbHhk3%*E^$)}4jdiLXWaVh=p7KnFUf;5i$bIW_IJ7R%9#aN4rW|yoG-uUG3UL) zHy$wA97RIJaY)ekjtH8)5OllaR?6-Dx}b*f4(ki({c%3&ZK)Vv8DR~a7!Z28tR|GM5D zcAbPMvz7EM_nCO^7s)!5^)0maU^3$zs})sh+URVjI>QQIi_x5+ztl%?M(vA+_?Jf? z2rua(U=1 z1phudozPeRRslQ(`Hb`*dx8_}rU=>xk0x32?JGa+!b+d`nHrxuy6E&h1lM8+4d<#L z{y+klL|~SPurPz9y9%F@K@Y;!#l_{ujmHh+nORxNEEEzQxkhCxtE;OkD+2=q+;+Ml z(d>2$7!(OP`EBNNdA)P)WnxYvzS}F7fo#t*Gc)fCb5de`D|>o$yL9#^rS7$slYj@R zKKls1f0|=t#=*u$NJxlY%u(v$*t68Kva&2B5`_hG|EzX2KRocj&EntKTtlHu{rFbjCrL?O0GF;AwXQZcF`BEveh`ov*vFIcC$Nu_* zW2OjY@e`t(aN|nf&!0YdE{#;I=p`4DoHz!7KscoZ1j3S&^Ez+dy!rI$)AmI>>p*w6 zfy+s8F)`Ca&(?1Da$Ky{AOu4WY2$|!54s-gZMO8BX7v1`W^Ke917YO=K|Jx+*s`aN zPE_xln`1BhED0qzoFDa44ApBAAWcJ(B;}>Gk6C^H`1+=YyWr zibGo@RK2{tz0)%?Sa|~6NZnN*kR5*CUI^9H)KpMV03OgL3>6$4oSDg?SW;93)zc#< zCs!`5sxm>7BNri*--Sm4OWhX+z-?V8(dy~#?w*A?ySUuGecR2@-Hndp>Q&Fh&(n^r zBR(J8=5&psjLu_ZP+z%6A6(N{QT4+uC%~GYqLL$(D#?p0EBE1njGbO&4A(B%6=}fa zF2}U!7#olD_h+sOi--hm&?ZkV&CQ)Jj!LLD)v*2M{wd(VKeWMI*t|?aB>ztKr%!iu zx<^KGTLwffr*L0%FB?wz*wnFNxIs8YDN1mH{+|Q_8jReJ7D2G+4;OmWagsCi1W!oM zs1cChU;i)rg8G=4_wjqjPtX%opQW)K*du&e1?D4opqco{c4 zRlWW6v>t8zN2XD9$nf?ezjF?_b7g(kNGvg~a$ac2X;3R)>I$v!dsxY27p!^WXbgA@;3FPrzd&*( z88=LfAW}V(x3+Ni%kM2}V86E#Q?>i$IJwm3kW0F1jIpnd7pBvXiIe~Lu&nY!>(;q? z+e_Tk${z-O_ya6mH5!DDKYhVA#N!VQC+_}D5r;ov!G{=j^b_~C#qr;R$q6Hhy>Rnr zy?|d@TfhzDKoCsY@0-KF#t%LK$2+yFgujo$UFZ><_dR@SBD?QQo@p2E4H`}u1|^Kd znP@b&Fj~yk)|T|<(0P(m!;T)t#>OlZ;(pk*Cly^q969aq_7WkEDU&ZU%x(BnNzh`Z z+TuMHhr|U1nP&m2<>uzTq8BqmWc6rx$q<|oC3te49~C7AEg6&+5;E6Ae*XOV-McU2 z%c$;dR*JKhdT|!Zf)3$d;gCSo*#WSbD=TFJ<5YiPCYR;S_k1Rm^W4=bz2EOcIoGy* zb8>Rhvv$dbsm${4=FI17DL%X*tu2pE5a6aI*A`OYy^g8a z95K|@om%%k{`gCKqNL@k0hW+IFk3|c>9#&mw=m>b;?m}>&|h9IWewrsC9L%gYMO+3i88f5haG|%24m^>G7^Gj%k_P-0_(2yC-$>Y9%Gna|>-R zFE4BB41>>yQ0=AES=2NdiL`9H1=iNXhxry~PCZ>%SSYnOF)_J$^SMF5k1vGdg}dwP z>y9OcP{D0||7l0%(@M(9Y4%_xRyI2|JM|_{*iH|LXrJ5Oe3zg9T}T-Mc{+dQHXGP1 zuu>!`?xqO%p@+zZ`>FNx_c#GE9FcMcAk>rd1i(0X>_vOHd%#g|V+hZP0^q`Z<891i zJc#k5OvVFccSixUUP!cDooXxCVmclfaV5U>z17xY$M^5w>+9>UU;5!*251WFYEuNe zcD9#h+@reZzprTI1(EhxOh7Nmy3TxC;|AMkdE0kyZ*Snj`|mT=Pfzo#YYqv}kbEpE zdimn6jUvCQv8OoNv`e=Ma6%Zz$iq zq0(<>-XNiHD&7gt&>(Gc5T2`7KY6bsAGU36dHVZTe!nojyalE>d_Diehi_2#N)fch zdEA+4Y=O-;xr8bkcI@gIF7sIGFKj5%vhJFrTt#kM#o-PC3gepwU|S6h4X^Adg5+0X z+hb#6-R62KR+_o$qnfN0Um0p=gM%O44E+0vRhJ#V(-*4CM8(9YtEkLG4VE}LIXDz| zHK3LtMi0(^X#Eod0^z|Is3<8fiaT2XDgw9(MZw$MuN7lIs-H@_&wt|zgu!uAZP@~#y_Ws&ag?)s?O+FG!b zWRg23ohK(JZ!(9CjEs17`$nh|95GH)g#LX5!Cw&nCjGxgjW{+~X~ALaCxD1J8CARd zi6=F3Keh>u*n)UFh3yHIof>|SKaC>Vz<*sg6Tawx?JTi% zm)fXAOYXP7N^~;hvzu5l|LnnCoMeWeJ<)NRp!yUbvar%@{Il~A|K2`E7LYwmd(}1U zcb-sh(ffA3i|>=?nh#Kb2b`R;JR()yW?7ZwZa@2k^2zsmUzhC4X(gAg3n}0t_|bW5 zV8|YoO7V?=j|<}auaTCX(M-R$BI+}#j*#bk?BeF`Vz@sOrv=gPAZ=?vgB$~+G(9N6 zKJrQa4sN-cB5dv*Nt8o2_J@_hi2iz(V7MIH&2!+vPOuZ;D8TQfK=T)~z`w@dqbWj6 z%vN0dncByi{}JZ*OX06WL(wiB($%Kl7Tl-tYYdX~Iyg8WU$A|Sy2GujtIHPkOb&kK_Uk)SG0&g?bb%z`COeWrTzk|_mA=~r zMqJU-7pOyTy)WpTUM>Fk5xJW(m6(`V`tFX8re@f~4BVjcqbB)LsvvBmE2SAj*?llc z(mpk#Kl}Rn*4AA8{QS(#-#2M22+ogG7-nnsgq#URvial$fvDWgb*^nfcWvyW!(b6# zu72UIb^W+#HhD1~<^FYzU=#|a;4nTuZdQtfIyvRNDBvCZ_U+P2=>XZ;-wWMsl5`!@ zowYgQllL~ZcaHSLF-ecbNDuOXK9?0%49tfS>8nln)YR1L*RKsWREap>Q{u4yL#vaP zdQ-ZS(Qv6-)`%@97@1ie77E2yEki@Q>~bKZsX`g0n}1(1_a!_@#?RnF+uVBTU66vR zGH)OoSG+Ayu5pzmzPY7k-DUAsa$)PG7qh|K80+oldpnze-L(j1YJDgwLM^z*L`BU4 zw|6)FNq7zrU<0K1cCt$4=4$+f#kz zO3S_k-1JA%%xpg$8jYJZqAcnwVG4SBdhYWB3f#O)JIjWPpVr2*%D;mJ3#fG5B9Mcu z6hT>fd9d1hiU-b-5)v;>u0@^Pg{%^K%}uVOocyMMM#(k#TyIE@+hS%28zmo~vX1Fh z?+QS2hhcNeo=>WO#0ps#*sC8b?>l*vPnw5T*>x2%1u=Hy1UqdnC1{<*ML+?l0axWi9MN_rr^%E8B1 zRU6zfResR2(bz-o`{Q^C+J7fdW(_V$Kt+C*XFR1OXOXDKk5J=*))`<#R?$KC(=ViNHd(1@WY43+4*_j{q1Rx@>6H|qV=HRq6bcxgVP3sweZFrhJ};+UB#y^ zb(t5&>Oufiy(Ga4MD8p&vSrc65mE1RLT^YzM+{n8RwhY90(M7n{U?sB^o*zxzB01b z5_5?vfs}Akx3)MDb(!r{`2m~-SnWR9Olif4UpRLJAV#MQD`+7iNsxt^84WJWHlhJ7 zfV`=Jfq~R56I8mjTJ#nhv`8CG*_SCKfUU2M1~Nql$g=dc0~B5!yJ^LEhr?5g2**hw zMr4q^8tTdqS7CHyfy0g^EtBE$baC3zw6@sx_z(+(#Lr0hi2u&Q?NCNaMZc9McEJ0d z6H~wSx+E>Dj-$0Ao49dv}*hE<&goOU&yWZKxZ*4HtMF14r=|`xjE(BjEs!w*^K1quYly|^n?_&jDczFM5FnFnwyozy=f&R zjS7Vr%L+I)z7@UoWv^{)d_kw!qUBz8MJ>8X9cVxlTl_qN*X0x%hX56H@6r((IVVOHt$3VsYmgo92ldxW~|-Wqm& z4!yroG8a-9ty?M&!$u_Uf}Obi0!xuBhW2$K3^Nyox{aWoRi!-h#qeuk26oJ!lUzQ-3{f)TomHu$`Q{n75?A!iyS#deib06a57XrA zRb;wICX2x%fw$eBdn0L$#sQwK>X$X&D$I zD?a*KDI{K}Tu$r)Bbt~X?)65MN6B-BJ&omZSf*HxQJJoDu1VEM!Etw={6`h_M;OXV zS`4M=42915ZT9J_{|9XYSqup@GakGu47P+U( zcHX)@A?)E*PEM_muCA`AsHl=*_rbAN%UJ$K#^X544Y$f)!g*z}^RuUAx2i9!uhn-R zU~CG)vPDs6JS#>>=|vL&3CSaQF1PIIdoI55+j8~cEs5BXuHTl$YVy-Sy(RKYUf#8Z zoc!>NJ?+xoQ@n&1`l1n)n$C`u(FaBlSiPTzO6Yjh=?oZ&d!X<)m- z;=&?hVz>@=oV^P_BnviZ5JFCV0spJh5F%ecYrD;*ly%ve{}k>m@V)>JcAw7&{<#|N z9o_Zi!^y>M4P0gfXCHzGZ1=jJyO^A$)G=%VGsfwVqhJ$jfpB@WM({Oi=zqC;rthq$>2CO~!@LX1Jaa(Dz zFaL@Mm11Lx0pPm~LGtCsd4mf;U93*F#B^ivNe>JdL>Uu1yDZB@JQAp^9BKtbx=5)I zP!axB=t<{(k(j$m_Z=O1uU>uAJcq$h1U-Y~O$^68{C4v8ty{n*bUimc97#uZQQ`v0 zDN8?)r2q^WQ|2^uyG3bg=;9w)BB%t)KF{4lun>~-Dv9N{vs!ICGaKEDtB&a z7iG&^e;7;zmPQC~i}yJ*oA09wh4kR=7XH4w3X<-nO>#0aWDyG(D`&=YiNF$!;^pK7 z_Fm^U&m76Ar(mRjUTddfMOyR+n7gEF0lTh5<}hosY3Av9Cyf=ZtfQ;z?&_*wW@>uf zo3&<|-!a{Nebz#M$-PV)?e`vTpqnDpxA3|s1$|+v+opPS3d$)P0Vz05n2MlMbLYdIc(tPT*C@Cp@HWz{E3zB0^C$;T8 zjLVWs;R6fpZJ;vaZjqg1bpthiF#}tG1%`CWR$-wSeQ*aIXrQ6-Ziy8qJb1H~wxD1X zU*3AJ^x>;U8e_4bn3#vFD=JsWy)9Ph(3s?+U07NY8@WgyBoEEAwX=H_<>BVml=32S z@=kxT9lKZNygvqJTOx^(fzF06%Rsv{XdW^SipS%!VuzGS5hC)?d_#Y~zW!DRH{hLE zwKgD*@7|qv{`{oc6cGN83{4s)CSWq^7UmhPVtRm#VpYc*iB-Umw5e$ynU52S(uxCC zC5yF}*JppI$%Ywlrc3p(#wX!rWrjvGC0}!mB%72iH?_r}Wy7BZc22~5X81>=ov#YF zo5w4;9N`-Xljex5XOABfhVfU>1jz&1)QJov{zND&9mDb-UcyNtjLtT|DtiuXG}lJ8J=l zvGV=_3PzQAcwb;1;fM|e0YeFlqTll4tz8~zl_n0~J?+JdQTEOZ zLHFsXd-F{43PeOi6sO(P*E})@*8j()4^y{Ibkp{TYSRxnISv*UU!Aj<|2aeQ$G~W3 zW3%p8oPEj5Tv33M)NBGIS$wbKZ5v9Fry;zd2oXRFfN1Vs*s~YeF60;8sUVh9gkZ$M za&hyAm409f11V1&6p(Y_p};-%)r=jMtkbIZF5aJ5@vdms>9SB1Uzy+ zGM_y&0g|}XZe{YXbvA2<0NXL2t7S(6_)=WeY~h@=V7Aw=_l1$=x+KLQqma|u)^_pY z#cq**C5+LFQ<_&WydAjY@15yLwl#-5kpAhOQ{@@3G<*cd?5OIft8BvAHXjg5hTvB5 zVR#*oY2m}NBCWT%0}e9;Et5p2o}MOS_%@e@?CjJk88p(>P0w3dLRX>3muV7J%6Tpj zDK4+9ICBVQ6a}$@saGx@i>qg!ObZ$tGwXh}vEg1Zf=a@IgZ`HB$IW(Fi(r|e;>0UrgrPohOn(n@e0PFOH}Mf@@R82>76XX6|K zK*JB6O58S2K9AqHzwVQZzaNxUj0aeGecHWN0TAIWQx>EH3}%fSX9Ix8>gv-t*epr@WOsX75-BlKpq41+cVrQ3&2hluhJQL}zk&b4 zFPn3}#tMK9U|Iao)t>fRa+!5@A0|HR+OmhE?^@1WK>n1&98*h$Ic@u)I5`|1TT4Wh zAPgj%>Cnzj8Pn&DmUa_wqp=TJz-fNXpwT^ujvX+B+#L_D02Aj9%zFEB$lp^cVosvt zGFzvY?0Yb*jP*l&@P7RZx82T>=sW~Q`>I3Yd8fO~Y5>a*hf;(G7s!u(!EIR_&ev&d zpGe$0Q8At5!w-E_Cnx)o`8E|$FSMZi05DKO7-u6*bIPufv`tU~ND?ZFiHhca^kc6` zods@*Nm{oOAh4511hz$EbH3l4?9y@K(3*IzO4qRW64Ab7Zgr9gL$4MW7ftzRoIj)D zwl_4UO(MlEOwoJdmm;`K=@!iP?rIG&pMBqlY5<)U=YSW1OL&H_{rvf}#`}JAD2N)@ z9|)eo?X!M!gy6@5Pc~iYKu`f!O-YVwIui9#2<0jjPQ5NHid%%$O_y*B(kc|LtE1C2 z2COqw3X8?VS{P}4Yw+&wZXly~BQ$6t@@p-*EH_@-8*mnNaexh~skz*#%*Sd?1B40r z&IEDKSeu)hTarEq`E_tN`#U|c&EhDK)CT+0&1<+5!!1O!!)A1mhDKv9P5JwMVKFg1 zZ_i#Mk6D+LAo&zH{4yUO-)9PKhbF!41xyh=D~VhGH}dzUh&LZ`&Kr0dQ-qmkeH{RI z{Wb53`c~$5AmJANbK5sv38-~RoLV2*-kk$V-r~%2xu$qzR+go92X{SVa1wGIDBU8K zkM{j$!3^4oz`#X(Yy`lQY$VGggrIX(U##45EcOQspZa}KoBf-G@YU(?2fIpWd_DUX^G^IP8q)(FcZ1L=m(53d50HcE7V z{|r3vp(&~ZsvmXyr;EyOV0;oMmV(Z+hx=&VxpOrhI1n-OI>e%=@`MtDp4ebGpw^-k zL08Locz9G7Us|5Bh9o8=2XZ0{+o}4p9IQe4`2xlJ2QAw?EfU%SP`}yo)YJ)J;C#GRpDjo;7@mt^ zPXpXz-XD~WWwP5=6s$t)`9Iq`R&SaJ9ZF4SVs=|AC3b)p9CWBmwv@7N{-(l7`VPY=Ey?h`fS?d-3ppo3SCO=EwkoDay3$MBhaw5ux!)YjGp z9=q;g;aya~c>OEEYo$LL8x;3HsM zybw*z@4%d1E$eDbt4HcqnSC}YI#?T?P$i;8~6tD~>q|NZ;rAi(%% zqi94MT6~Wz3t-hUKo$;3+v7vhHn8y$(K2_7I6(*!472Smg)DnY8SCgwu6rlOtK|oJ zFaM?M_5s8A6>VuD-9V%P1s6qj;uvOMO|rMBg&*yLh|ZCu|NoF$YHMwM@ZmQ#0cplt z?=>eQ$6kOGqpjutTSlq5z)jmf!dpqyZw}LwW4Nvhyd_4ZJmxS3vuF^u%qNYEj0� z*M&X#<$;uj!#KIXX+cdBPGYs^lJH$1`g}JRkg>glvhUN}LT5lai#Y_V?|<)Le~&S= zVMPULIrqQkjuH|QhJGiHIL{_l;l)^>S9OHeoBcza$5&F>S^?#KL6`3qa~Q~(!Yr#F z2g2p7+Kwy}koS=X%aJ(@ltkEGV~AekYsh$CyY*Zf2&F@F6BjFei%Lp%d}Z=_>0jMq zXJ@y3)i8ec)Kg$;jnaTQ{cQ!|!aHv9;?^$oB_S2%zvzybv+l;tElcU&dwKPB0;TXN zaQGoV!12c4GkfIl;6Cvh$72A)-zNj_xVcsT?fsY`eAU2Awn`n+V!ZS8YZPwLV?Yhu zsj5}!?Uw1kMPLnR$B0iHeqO~zd&}&h47+LNzxIIu3|r{(G-Llqhom$;>NnYIBs*fP zbsE%;i0P{SZIsi#>_w;Tx7ohw*^65|9(U)(i=X%%KVmxqsBT*X`R@Ao-22eVh4Yxy z%yMJwaDA?apKa#)1u$5LtC=4@ukW_u7C)NhK3Q%Vv=c4 zwEiz{_KI{}ul@mGXC@;XQ6O73`gKS2O6G?<|C;Rd7&ua-H}9@w$w63#<6f(vR0}zn zjbDU*2!{L&l8EZ3`~wuL$N@FP=~^7sM0x$ImLLKDjYC#zNI;e%KPM2gCe)ynqGUtp55O9`r1)m%SMR^o))>0Ea zX9Y+iMc9M7pHk;;u1r;L*7{5(pm~F@&J_TUXe=^4SE7iEjGeS8-EU_j$FPKVeON3m z#gbLx>F^@1&cRi`C*&XH5HhFa;XZ?pcD)S0v#hMiQr*v=AC#2kVgPHLpP6~{ z6{OPEl%93m8&X-obTt)>EVyR?2NP0^cQ7y7+^phg|6gyyChcSFuL#mTY3|0ef6KN+ zYGZx9yQe3ZASyEQt64sjD19U$Ox(w9WugfPWkvb-nr@$mzWRCyMGYql6$*qkFLHm; z0A+vnz>hqVGk8;!UY!Wzf_QxWT}Fnrx3@v(ie8NBBufgYLe#^k*QLzIDYvIPQHm@) zmkHz?7K%n95PfAHPFF9)#l|Y~zBMab9oR#Wb~}1F?lT}KQ;b|PYXM|`1jvJTrs8z| zFF1yV*Z*Z=*f=2A99v~t(0zR~v1!0bMrYE7)PXFg5w9LUjlGjoZ}b&f%-kHvvVKY& zuAT4nwz0{a;Narw(%ar#Li4Hi^~W0lKUk?y4*q6>8#KvR>`L4mr$S@o$PtNm%X;lUnX#r07|C@ECq$P#xLcWL0Glvu8sdpD6D#n z{5Mwu0l_gYE{-DTYj>93t4&T{9D+S+m5?fv4gqY!*Zwa z9DKFYj8f|5Ds=KL>AOP$xdKA+xE>lb9Q^9-=w9N~|9C%^N1t|ZaPVpMyNPo#5`NgC zwJIR3sJJ*ejRW@Fq8yx^ok5!IbD@HqANeB)u+&87$hj&Vhy-1A56v49KT@5&=&8QG zx638M)516ox>xoNP|)aD%Y@p|4=5>VX(JO8r&dtfB)af`O;5M#Bd3C#-qp_Yp$*nn zR;ip#?*n4HfQcm9#cSVRVCm`#Eogekl45iBZk2yUGbq1hN$i#Wj!v`fg#;$nX~cT{ zV*kL>GXQ9D?b_SyhwXo8D0d0xvA=ofoV}TIbYf)t409)!a>eeife-$H@;)_O!%_CP5r$9igg(N1!dFO9W>|n7teO;@hq-2)=*2wA#Hm>q% zQNIXvXaneS0ZAhl9ru1Mn;Nf%;?EKj$xH1(M&e`!?jNyuAZNOWkkI?iAbIBg5^o=$ z^$s^SVE&h#N99J4Szq<})itsp)yX?}3^e;S_eDTKX^V$-;ihsG9&G=sycWTc%V*krl6jEs!Yt^om6!)@ud>4kLTx4T@Smf?Dp)cx2MsMTqL+=rMQEo!`6)Fz*V19q1cG(q!Q-s9po9 z4Bi+n8WP5953c9MAVD4R5jkzf`TwAI#8q(lBdBz$YiVgoYfR{5i@s}O!q**aN0%HK z!v+vRM{HnlaNQfUyY^F=A|fIpz%ohF2RWit^LFXGUvQ&_b9I1|j`s_3UOAo~e$faL z!-Fc0_T4f5du!Bb-1|>%hqvtynH`&hQ9t!);2#Gxt|a%Jz|Vg>f;EDF(_mcoF<0xa z&H8&da5VF;UjR)w1bW;(-ede18O(%v1BVOWcLFfsgBUgBd28H}npCxeT=d(RWcT$@ zgoWKn=s1g8YljI^w>rGXXkE3X7fL;p>AyOgfnFSSIrfLg7}4UtT(!N$B#ps-%JvB3z0FFxpZ z#oE&3nMu#nU)n=rsc}|n*R+FaY1X@ANoHe%w3#jx?cA_j{;CDEH60BO@L<8;Kd>YL ztt9KvPnlg6m0hbr?Ii&eia2h6cpsFa(BdgjSt{!&?LOw0Yz&OTU*iVS4WiJtG-?x_ zq*7OOAd&vR%-2|JMDUzmZlcp%g6BBJ!(+t6QozZx`vp2Uy8H{y^;$J1mtqRTfZ>Ps zvU6})07wHe{8c0$PCU-Q*^c0TNhqnn;`-ovq~Q5wErvw*gUJK7YC+zGY3s{r#||o) z_|<*sjYi!jTw%yuPRR(_2Tv#^6+Jc3@Lx4RpW`TdvacXxsL7`Xk7G~k5+y|*1M9O* z3X~V^?5=j;{>;Md*{Ax|31O?x`Fuw^_UAh)SEBNy&U}b$6kxp_&0yjjX;khR)_)3b z|7uNwKl6}Rz7p*FLS}m+A`$;Z_J9@k0BlA8wmZ+i_`E4cn7PtxZt=>At7XoUM1`dI z^muA>T?qB$`09Lr;j-uMcJ`$x7X+yF+^rX@T$cKx#oQ3dpzrM4q$w&R({6tpzo9aj znG*DJ?}n*=!?!vTO}$KzJy?~E`Qi1m`gF%}ZdPZNrZCoq7zRs_rm_BrPnkULCJsB! zb?m4FZ;bZ(#C5iljJO6V&LqOtI(f27g!pbC&r|=1U;hDmdcc*E2>ijEqaz(wk0eTSibEuz!jBKZRa!2pLfbG223h%XP zvDdY&tuHOvBW{2Blo6e%!Yh(j8N}e>?v8mf)&>$>rDtG7HpE9`%3C9j%JW;WO9{1o z*pVbX`oPoPUZn>lS%8Tx!DUkbcLZ5B!$|JnKt+cXz^?HvJ`i=lTvBJ3^=)l!Ei`jX z3awP77-Q&CmCM{0C?d4ySb0DpfKli$Z{MU?0T@wGn3gq_kT_D5;hrcQ<-8b=V)HxS zrIQBYP)l>OYd=WWg!NK}GCYbWk;3)mfL59&4UuZo2lwwQgpT7yXlx#Zs(4gn4HXX1(wtza-w&=Z{dlN{}sJFW6(zAQ%783 zoQDE`BtT8yyriXGx_B-%Gtvlcfd+MZItCP_P_P3*?v~}$sZ$dU;MsrxJBusBgtS>f zbb>%zNZDfP+}EJjGfk8{}I&W^1CP%robJH z&+dz>`H|_AqVR>re&!oVClb-4fO_+$I+Kae@-(;anDsVI*hl*bB|)Op=>Ak5>yKOl z)bX%VG%gXwX+K=L4j5wieS9F+{fI)N^OiD>vU%IMbRSQ!P>>U7tl$X-2|M|lUVtot zzDDO}W=CXgx9XtBW%D z(&M^kN9DQ!`EE{KF|X}+hu(X6NMVrBY1QP?;&iTn^WMR538$z4dkINN6Q2c094r3N3n7!L3&}K_S#x;uW^?w6~Utu_-bh0IIvzc_$szAnGduxeEdL; zF45_LC7dW&J3~$S$No>3Qx>m2%>pcxR|j>Tw9MC!;cJd#nl-xPKuq2HwB8rA=@)0c zedAB7Wo|0mmbYRHxdN@kccKwMREZ%vcR_oqui^Ap`j|7sV93Gr;Bs`}zeBZh_>*Du zzcuUp>dC?Bo!?z7_}BR69uSqk{OQyIPVcwD=m^L>9gHPfsb384Hq^n+63i zKs@<_-cZn%_*B;*DZ4mJn_8GM4A;i_r25_90;sE<0R^o2oR_IWK*^mVOyjESMB}+K ztR>&Be-)?>t{WX4RbUxg_fGrV7B6N}>FZ@IDTuGcuuSWi2CPG8Tg-8&4|G=_IXC+5 zp0R%hrVFJ3w2nMnE#cbbS&(Z)A~}owe__78w>QY5Rf4k395k@#5%Jm7^yhWl_)!z< z?rsvUT2wa)(_XB(fb##jo*d;0B#>Al$C%?JAkKTZ3a`pfhu!lIip2#UDeJL*>@ph4!qEBCQ;poN`V>EF%y7aR_2G!C2dsZ9}?(6pmY5)@ze#;+oc zc8}XHr!_!l+{-jhPR@m1^F~#OG_n&H5?rmXJ!zNExBq}(wW`mVwH3Xw3>== zBJBp}y7EyTV7lyz;3d$SB%lnsW%5^ma@UvgUb~N#Kli&=40J}s`7+u1`RxHDNnK?C z+NUeZ_`Xrw6fpQJNChNpwG%768g_^C(#N*mmJa4CK(qiPZ3 z+0q~}JXfWMJ&@UWtzKWU>!Fr;STH)o4O-bkm@})SVzJ4!>?4~tW!lIS^!(>R4Tu=( z^eH~$a!;PstW}4(U(F>A6sZT(8~{cdVa?*8cMz6l^=YT>wh_}oT70FOUvHU=TgpsV z9@;}b5t8TBL0Gca!I5cy6 z9Ca^C%fdC(qckesuFhTaQ0yzP46!(`zE=#Nn|Tve5JYnvXB#xidCCxU_GsAI*_{vi z3X<ZB z&M~t$rDO}|n4v;AR>rZ%@4C;Sp3isv{d@lCQPzFm@B4kduj}=CU9W2x`f$W6_9S9J zn|Hk1xrUc*WOfO#?V9-0PAtq%6fV)pGc?AFh>5Y4fH8^O8e3gN!OoXNYGjegt*+Pa z$_b?(y5`tvb;)F{VEy!+2H&Z&1zh+Gg~Z3bs)>k86r^0gzg&S{&KVSH4Vh}P&|#U} zdCq?o`X`}%%i1F3<5*^ewB;dZ8iLbNW0IzU1(1@^CvUz+qr0@@KGlV4l|2Y1(@;-U zZqwLC!xx~F@Q6A{QX~I4 zU#B$8j`vQ1uOer0{!7-rlCo1Ej9VY^G{mk^G7MkkEufvXY+ZH{^HMj`)J8P5c~nQP{;=I zd3gW=Qk0M?cO79PCJj?iAaZa|gWg{vT;z}hEoh3guv2Q0=1LQf(Zr!q`GPI~_fQJ71r7{%HG!E2Lpl>I$N(=SNAl*6wp=j-QBdvag=Ym~}kRI>uB;R1I z(GJKDJ{5f2MW};FMA+{jVIgvsNbGewIOe6aVeUUi1fYWpeM3x&g~>^^%q;IsucJzYqr z53E-L;jlOpN7my+K-2TRxmQV`#7d4+aV;{0H#ToYT5I4CFsL%$JmF|wy-11w1b=QZCO^ItKNPSZo>zhP7c)K7KPE-q7K_so=`U-<+s6xNrW8@W{vF5&}s-%6-g6m4wx<0h);;%{a{WD4khHZut>kZz5VCH3MsQzL_~z1mR2Dj?%^`q4NGNg7=SP@ z^34)+LDeHZOg}Yt`8?gZbLT=IBxsD(gq*5Q7u)T$KO)^g=cXC7ZLIxsA3C-xUgUkd z;E+=Ho_E6I<=0n3#ygN}_#g>B4hrgW=~yg7y!_#6lXJcKwlVn`|J}<*LQXX?Ecd33 zptBtJ`SW-g5uA0-IlGQx^D~H{%_dXy+DUsue{_5G#{@2FxxDcNBS(heB;*l66RP61 zomnV9abS*H!T$@o$TH~}TD#y#Z4xH;nAlsUV8h^S;1`ZAPT2kMJ#=D0{28`sLhi%A z#vGin8lMIjY18u9JA+%$G z!V$lsaKvJP2Kjb+0U=vl^6+@R`PwY5u9bJfMIijW8Io-whqs>%7}PkXRNKI2?3WuA zoJ!WIgAIAT1CT?dvxonp4jP-7P@N=tt=Ob})XLf-j$PlAl2PF~CXAl95fUA_Yj{b( za0AZO06kE9Gv@pkiyHM&8)cV7IC*g`cSO^akv=sKju%{9I=Z@%X%47|$I?*+4<2l9 zYybN7E8uKm1_fy!AE)69?%VYi6PnmH8A8QUv*(oSPjm}ec8|@u>QD|H1zzNYZqtBT zfpFvpC(p+RGs`r3MJQ5#XlC~Yefmt)4l--kw)qi)jnsG4=> z9&V!Gp*g`oDuS*_3r`d9hH;YS9i*;`#k*qN~#lH zuyK}+HHUYB>5L2vwI4OoBVOdy6u(kSPU~>v^*v7q^%H*%_{exKJK?Go|Avx3+m!E^ z%34GPxUuDb=%jATixIwgnG2XKZIRv7)V4PK6E88gb$Ir4kK4t?`1B?RxifHNBl1qa z=st}c-`sDJFDStlVLrA|{WJx`*lf!FuN z^2zu&drVW?v|LQaSy?GR7J2WOl{9zC9NdR2wXE^(?fc|*@t-8`ZFdHR&CkI1PJLU4 zAGr;mHz$Ow&yWuA}RCf zE8ew#8u*w3EE4TyDX(iy3&-}(pMs=;0PNotYfFbZjuWHQWfqCY-XAa{C!?&qJIv0i zmaT_}ob6kY819?ihV|m=oEvHOU2J^!_fDDTlxU+JSZ~})mY-p4|6lhHN`$r?(nXFu8&K&n=KJv}{j z^QfOIaIX^FhYufs`vE_!vV=Zi34g1$@|OiF zYU=7t!_XHQqU0X9b*JtOtP*G(*oXgPR94)D6?*|*<>C01K-Lntm_ z-_B)@HYq#9HZtvI<`5#oIU>}3{>?Ib-AgYgPL6|JjbAiuyXmh9mz`$CdjH;~rUq?V zxYn*c@nN5c<1;>cY5e6XQky>PMb(%cN)1%%FiR1hxRpZLD^!yQ?is3R&JSw<^@1tQ z%DUspJ*+`kRa8+?DeRprOI7$jGgObJzvivHOlQo1$p=67xLPr-wt3s~p@M#u&0k7o zb0eA&bW)<$!r$PaMiFwk{*S?&2>M7`w=a}!X+E|}?aGw* zmx>B)RdTJOlUEw&`_A{~%?Bpq)X09LqQYQp?n0dl?h$KvP0kv#R{Ly-ze+^^*nJDs zmCqD4c?3FlLF7V`v{*WM*jB7a&k;)IIUpguhp1y7JV-lVa<@XW~Y1Bo1y#2{KR?U)w_Q|K41^pghQ>K$=0G$A- z1bFT>xQu*BfTZ^fXCzwU(&@uMl7cSd;GPkaQ4vOi)+cG*MP0RIh@irKN*|1IV*0D`owvUKSrUc;`$xJf26QmGE_QQ6y-o zyei6Ud_56*hS~E30uDDeH$zqqP&Kee$x1O@3(LJgy2DPGrQPC7_`e7s{3g zNRF^Rx?c@U^yW;6i929N&#UU$rp+0$f7-wp|06-rP>+-3T<5kv6iH|{Z@hD?F*r7+ z;LjD*jXLRb1KbW&u9n{36Tw#{LZv@(bYl#%mZbG9K%S%|q}ApEZf2F0mE+@SLoH`M zeOKuLJ2>c>$CVZqa`4!-g%PTi)*z|kQa(vQcHG

*Lkwed3hBUsEq=?non~7@nqr zp9{1<$ruDuqrzT0QfwY1?eQk+zN+T3{D|zC8f0Yis2C)`GU{R5nkqHX*hWBi^uz-kGh{cc>7z`>9^G~PO;9|i8 z5zy$NNH`^yFI{hoN*C540#W)SRS-JFjr_TKZaW{h@~bL5YH*cu-m~(`l_y~X0{(Jt zE35k639bd}Qg~QYD2qC{pog0{oMQ6|3rW&Xd}xRQ27+og?T!=7%ne^PpwS)h40vn# zjtQ zk@p4js*3IoJvnxA0NKlMJ`S2wwQ5C-j*LA1JOE*v&*v&R`%qlVl7r-jRdwhTB>f

z)X-4N<4>>UZR9IiBK_$x?*FY`6xNwrxI#5VWxuOq0t03E~ zM_YM$eH|lN+)ROMu(7c*H2NEuYPBIACY%OF2CQU)lpkG608Vq)_WPIF-qS$7 zO-{|mTale6DI}D*FAsyctPO3-O^uCu47i3QULz@JzVQ)3V<|8GnYYM`f0xkI)C7Rd zA{Xe*s;eQYna_ZDW?YpL2s+EqR%T|H?CjQ2r`-=Pu5tTH@Tnx^;x%6dVqk#1e6?!rXNsH3OX7DX{C&?qpHc}O!xg-pW?#k2Z zs0#}UK7IBq`y8bvxF?v`3wl#fVS`onKUFt4H*)=ln;3lW0KbA{;}YZ-niFacaFFYn z04md1U;hOo*$b4{sQ9DLY06_5)S%t=?<8~I(R&C`i)E>)r|MGH?DBs1ko*sRdk4)~ zEAMis9$J;=(bYTwBvvT?rNYli@R=P%q{w@uliv3&&m$g z5;Rc@Toa_b_)(~HcDSY(pV>&6r4e2)vs4l-=Qd>Xlkeo`l*Pg)n(YW?vkSQErQY~B zIqvnEDw&n{w$lC-2NBr%-HF)L=8DDDt35tHuv3ebBPG@$T9kL=-YW%7qVf7{Vtl8c z=Mts&;w~+uabHz5b>n@nt^LHeubtScKxX(MZpS3q8C5JTSJwPs(q&NmeRKB&ckP7s z35Z2A{Lp(sNiC>gZB(}*D{+P-*93JKloQBzAQ5X{IGO8tvAhkw6|NFKKuQldaIh@D zKbT?pcSV83tmjF0VMvbJQCeC^S5j-CX+IQITSNI3P@}MRv}wKPa=-IfUs(n^^(0RO zSABo^>Xnud>iH*lp@+C{25+q-lo}-~OQcMgSy4WgQSvk#$t0J)$Q?N3iCp~!Ehuwr zETU0wT;|8So;=wLujuR&&_O{0U!jNqB4^a8&H&X;0yi*dF$euswibxU<+jCPn7J52Dwc>VbJ7pJx2 zQUB0dqy@7M=I^+|8K{!@MT`HV+~|bUo4oaH$`c^q;A-J$j{2Ho85|A8 z6u&zp+MI!_ih5RfgGY}Zy$R;6Sj}EEw0em4WZJWH&O=@{(a_j9qwnycIXB5{Hl+M0 z?>f)6D~i@!MVjU)?c;Ha+fYi>M?~9cp;joF8dZ!%ilPvkS<)0^KjVoLbYh{5{#6cO z`b*;W{pqLw{6X&9e(hoFU7Tjwj|p3)2n8F+>h-g7rqT8euYf8<0{9-VM}E_ivg}uu zo9m?Nhf}+biwFg`CRjF4-H;DphttOMrYV8l`|nKgx7-3Jf-UdT5OTd^?3aReY~TEm zXg`m|UpEdFLb?F#MO0W9zT~WW9R4gntiscdgt}Lm<;%FkN&`bL;Q{b-CRz6z=shBr zDga*<4aWgX80sUy`5CM<&V zW{om_9S-li?HW+78tCbb-Vudv%-lGx%j$$EJjQb=-e-9fG*TU00G1)^ue}CciMP$! zEm4&6#VoP)LE-CSHQAll=xCQTXH(+cUm$NFQtJSwTmTa9^2iOawqq+qapE9t8V^Uq z>-eW~84n>R^*26WIV4NBKfER9>e|hUePQdLe4Ky2G#Nj;1?qroi4pPWtRHGsmkNKN z!oJ3SjuMIQ14Qx{TFG0r!ny2j)cYnjidzB#h=e3TD2I^!5+X!aOdDRuP?=9!IwOwr zxmMGt+(1S}(gVt-1-?p67*8Zxkva*O85jL;`W@!07j+)cQQ#<~;DFOodZDNp9MF>6 z^t80_U%uR#{_)9qMUQwSM%X(vC^GkF{ZKj+Gj%EDn@n93v2y-2$7OsBrA(|q>fz)x z2P|xnO^Twr*%r`cnw-d)+!m#9I<0SlNCcZ5hayAhP6T=Cd=HCRJQqsQUhxJ93S(b` z5zf2o4M{_J8B%EH>UAd<7dl8kKx?vZUkdcRM1dT!pod%P5N8EA3=taGM!+;^*~no5 z<|klIB=yQVtby0qXwl~Zka7q?mRY5i^o$5F0MNSMxlkGh`caNbn#P&BQgcFw0{~GU zm18~lC0GEWIn<9b_u=sP+=hf9Afw;3+{PsXxkgT?mY}c&M^ZS+wHlSe@fgJrW%NZx za0}@H?7_R<+NcD*1bwA%T(e)_e&a(+>?`vm2rF5mIG1k=%|cfHIMyydJzbTy%}O9$ zSR)#Abtc#4_hS8ulhoLQtU%xR^r@X8JIMxU3R_o-@H1f8L` z7fk#fhZgnP#W%aIE;?YZ+1CyN7sJKN!vl{gvr2=+Abp58xeeW)9iq~1r_Xcl9Pk|3 z40NN4uxA<@Kut(Ilm<%Q>6JCM5Me*7;`^;>lcl-wRw>p*v(^+a6)#Jpg`gj&8m8OK zF2w@203G)5{z2M1!#`*peYh75PFLyagz8(Dytkz~i4jrLKu~b8|D|iiF3f6o4}wXnZZ8 zy+l$>49gpM{`~n@F);$R+@sv*?&nx_Jtjyp82fX%SeJgxfgI8#%pKHTD^ZVHTCPP6 zLvA?FW|w!LD2`v``hKSq{=ZKmN)Wfqrb{TC7T3CEY4$N}XJCR-{pKmYu^UdGXUW(= zdR|aZ7Wdl0D@F1Ed_2gKNB#(S5S9W#mpFB2YHAATswB(t7k$}ciB{crahRm0&~^>` z%s%DQI+mrX(uV`UQcYPC1eJI0eDSfkm}cbNQXwsObjk*eb`Xz!X|GVBsMEq7T;6huyg=RX)KKW@VdBZ)yaHh8&ldqz>6sX^jOri52pdyfx{RV3PTYgDR)#(;p>k-G2e;2VE=NRu43Bffi!n1e<;*qHp!54cei4+x*W-7}>Z%npKUhUV; z*}pKegN!%BAJz(*ng^tu*CVChC06{&Su@0to1Z1!5BVgL!T(}2AJ~GC^&YJ3fUI$| zdR~G79RGoYk+XPmCQ7xb6+%jE)=C9U@_KD+06gP&UJ6W}(k5vE>_uJswdB{PD?Nr| zN*#h5uos($H3N^;7s+OjsL6*6QZd;1CjiHUfN0nu!(W$f`BGl1O7>(x76M=l5P%rT zZJgZ>>#Iv%)Abm6m$(GL^xE$7JhxukhCJ%)Xa6r4&GDl~kic;F!syuK0m>^i!%0R= z{yUn+k0o5DJCx}x9t$pPWp2}m%K})FsrX$=sE_0w>w=4ca0mj=ykqq0M`O3|Scd+& z41mt`3^8JWmNBAQ*-!@wI>$rfpbd0i%;C#l5<~ePIu!zp)@jk*!q?mI`_zgB3BZ>; z>yGhHJbC`R=uQi@c6I3)LNcl)$j4_|LBvM^1|Z7M|E3?xdG2%Plo?st_KWnIP+kI{ zaD>mQ24q60ddbt;(sGE#pI!%>nHi}#aUI6G#KpwG%%;}YHBk;TAb|1^)>Go;u`|1s zf+c}6JbNBIM7EAFm(VH|B~3W09?xA)a{y&kl*pOhP_-wo6z%1wP@d)jj7c$^lP7|% zB%aMiVQJkFWafl^tQ=toY`LF2tb1^?6RjI74#%1s-!Xh+eqLTctmTQ_zb`ZYw5nLF zTB>}h@f02(#-)5wMJqKxx{;kROc2WywhYR!vclYab~)0Lm&f#{Px2Q=RrYDee-NBM z9h!vk1O)-^{3E<^*Hgby3&xNp=yL9Q9IJhxjnN8>4kp7ntF>dA42_JuC%+Z}NIjLn zGhf5Ek3t|o4rHSkoh{}cjbXh)%I)|_;*Qg||M>lm+07Ik?#vQwp53zyuxN)Y586i1 ziAgst301j%R^8pvI&fT(y-AGA`s7T`Wuo=Wo5CR zV4(3&=aJ35pyh0yY+5Rd$bDP)hn#fChFn_8_r{IyU%$Sx15^au?A7!J1_v?aFIyE_ z*>t38F`T2upkpK2LiK~w=tiC^NJdf;A{xU^=83@CTDTp@rG0=u2sl+sx+{Qx#N}8v zM_*C%yRlVBL?yAav*XkIfxei#ws8VUsQ`|$4hIpC_oJIX{ot@kom96+qgw($-+Tv6 zeq)CPpdlwW4k1i`3g)x-(e2r3)&#od0q)^wjJ%C_L_TL%C!l@ss*GkiNsvlxVvyaU za2ncmGK3xBeCyLl_jpM@{c2Aq*_B52TM?23{?7hPpkGTeJGPrrRXnFgbNTN72{Ufl z0!z-`9oz_oGg|s0r5*;73p6CBP6YQCI#ewh%{Lv0afI>FqPvh!-{?Tn<Cvk&$2^QAso27t1AYBNqc#P}!u~#B! z?nPIc!N7=k`KcvCs`KgQ1t~}1$-4F1*) zc~E|}Z{2cURU#t)Vrm={6Xd3|Zj!Bv1N&ECOA(`XWv6)%Uo_I`?f*10lby>M-s%Ih z05k9oXySVRQT-+8rYIl>S&Xg;M2OAl3A`*N*~JDYLu znv({h5Sw6^H5-$+^=q49|92iCwH(-hQg{=gziw#ws(3FzYfxTGNLZ(VyA%isaOZag zf6lommxun6JFFNqlK$L05sP;>>w;7=27de#08eiXB>5G;Z?bQXqi~}4Bn7n2nF#*4 zBZ|^J6!-~x|KtNe>cJNB82u32Tw;=%kYxMCsnw^8oV*UWl!WFtSp};2-#m5v`sZw> zE=}^A_RAiK=D|$F4cXh-LLcB%(x@WmvF7thadJl=K?@3wIry`Hn7>v3>$1Tp2mM0bq`~zJ) zxY}F`UnZ)&Qy-X}bgqX!uTbLv%oAM*Vxu0Z&k}9F7|)n>EpkNG@C+hy0nE9D zbH*Ap#(hCt0bY*FC`2hHnZrr(} z!E0R%cYHEkF@`@U*B)&4izO-oJfBdu3GlqXS#BgjJf*q~VaEXDVuxq^TlE0gH{YP*O%AE z$B|h~lkPA0m7qednOacJNzmY+3+%H0CN3V88~;JDG|@spc>xjW`4HN%<+Wi!GPSq99WWy7CqnczxK_Pru-!po>U;+$jgB zg=$-MA+2|odWQNDzEty5{q`02$HvTB7hg`k#T#*M=#E@1CAMthU?4Cs@DrDkO0R=` z4JCO;;MvhGjImULaU)n4*kwx3_kvfadtH4!Vc=Vy7{nh2rVbb{q~1X(cYB)cKU#I= z`f&w~sq)FwW~HRmSp0;l&T+U&@jILc+25C=5|jLJ>Nk{f)RNnLc64;qVx}+jUd5K6 zJfz7{MVY+NyMWZ1WW+h3O2XDce~@fkd!0L$W``45eL)Kx!DXeh-SV>cikC#r=Qhji zG;~*P-d3QP@On? zdJrN|RW485-|CerF(_wr))CN8*RKx#w1ZQCp@l}|K>#WJp&1Gzi`&d0TlimU-_wT! zoR1Ngn-cV$`K!FmoW(N7CCW-_DH0#5BJ&iGT}^Ah$dD6iO5%MT)&+h2<<-|xa5!Ly zi)Wi8G#{yOCo)FDl7nNbkx^{%X;Wz6F}aQwLKa-0ltz2J-7UZpe9_A}W`SCq7x)_v zQvT+y9LvT7o}>E+R>;#N(~Cv9D;!t>{+mhzUtr4 zTq3*0(YTy~s;f&?b7A%UaK-$(y3DxbYqjXQu}A8;!R0G&4bqbp{zA|kGVCYhD^kMi za&W6ot(}VdKNMP<3u6^YmRc=5TW>ivRXSi$ad(d*Ff#}}Qxh)eJht2zux>9oU9N6l zRklh|%#n*{yCgD0Q#hq*uP4$xQF->)R;rj@5}{I4&SNz&0Gl3En^mD#S|*)PqN$ zIbA%q_$$m`N1=YeDHI+3V|vjRn$yd|!gZ8xn3`6Ok;Iv4gv0~|2Vsulng$0gG{}{N zbY~OTu*a1FVgsNv3hCcFa3Hk~(D$uV3+t`cOBbV>kN)Cws}`OE$Oy^6S$s%BMq1kH zG|R@jW8u-`lan<5RWO+j)lWGTGBMbE*SV2v>B0P*!kpJcyJ03y`|%MHi1lnhJ4FY z(lYH^x0q>_bXCoy!0XNpk8MCujfgqkwT_Y0R8_IrfK)c`-cLw+pUV^@|A(A+H%-Cu zfdl$*tgFwMLBEoa?nQ6C3U;|hv;2nP3;8zP7fw+C$iX}^0Qpil<NUv&P(m>}qX zVmL=egGcvkza*F8n!_7j(*1Bdtsa;?H`ziohTJ5}_xo89E@(m$2+iwI;l_IjN%bLg z=sSS~ea>KU>e|ii_f3&zxb5K=fmF4|ZhStXN$;~Vdn!X>klP(W;dbss^j2h)DLNLv zv8;O~&TVSymFH(_%na7F?XAqlcahIL2lqqul$MP3gj;Kf5}@aK9)j#f;-uo5@2{oI zvW>O_5(iR3lV&`=ADMVNwa(!I^m0vz;F%5i;HOVZ=USDY9z~*2FE=)x9;9V=S4qft z+t~(zBS(%f$Fm0BNd}j!9;=jO@$x&q-eafC{ZDu{f4&lEO_=@`;z|I3jw&CVcbmMH zCKMW>M{5K|?3Q7CT8{zv*?ZIvmnm1ayc*aqv3xbSboQD3THVRh@E!4Bp&>czg1Pe5 z3Hm?gASt|m5-4Wp4h9^q2ms8>8zOOT{mDgg!(NKpJ>M>c1$6*7lp|CUdoXCrx{OCBWb$3!j-ozFx(UJT+rJ<=4N>?<`r14769BgKP95)iDQlCY3=Pj zPUD|wtkXgcdvV=}42M{x@G>sp?OQoILyGQA;DS~RRJ|hBw`{B1oIZQc=frD25tEQO z$6j1odfwc@L}-srdK3-))+k!MqyS!}e2D%CTxhGnh~`ri=z|18gxlmH$N-mp%`xSs4U`Mv1JiEo!x|GzU&DeL%_M`qx(~;~Ft5S;JE$?| zeLsm#6_4?(w5#|dRswwZ<2O^zoH?`nq#Ekkv|0_AT!N@hl?=!UYcI(5!oWS0^eOBL z2IalMQ+LJ)6%Mc|oHX#2H{)UGIY}H3&0)3dff%J>V8NWo^=jdCP#$A5=2Ordi8Rrb zGYm3iR73?-x8PbmkE|2?ZqpQrj~b^!9&?0LegCzx2GTKWeyu~Ck@exVY;0JN0=J{c z@%2S_m=+9&6pw8(L8D$Se&VWA+P$_J`Jm*nUO*`)A5Eazk1LQXZkR`|?2iRajV< zl5*SBRF;6G>iPL}86Ymee*pWmk%g65wP9d1A&81fN`ef5ClrJi8&69cz>Q{j+6~$| z9368UrnT0%_+Ixz2?h*r$R~K_&`F1bFOO))p!6pvCi3|4cx@I*w=avd_b58I-ZV4| zh7Y=O^0kDyp{QMG1au4Zs{BC9KA(PPWpbs5oZCdbii(1K9TABX1KW(XthBph)@mn2 zUM(@J6wndSo{5f+r>{SxtsM?o2O`G^UtabYsFHz^=aM2KkqW0VoYN-#bh*Q8b;yPk z(Sd&YFg)SJH@lCZbynhh9AI^$6?H~DTyWG$oMZ!_mxlw|R@ILX~ejr zFEz&=czt+9Ak~E)YKoGAG-Il=03+vtn8UQ3?ZM_h4wILdWY?^aN{^+Pa;V6)D|bx-uN3L&u}&aWuN(M zkaCwjN&oF0(;gHZIc8@FYdIgsxdj8c>isF%%}kV8*w{Zh0cHoX?E-Z=0*TQ)=Bgz^ zYCN|}!PAO2wXKPU2$=Rp#XB#N7~bd%>xux}L%j&V{Y)Ljk|x}UvxgX>SaHk>rjfEw z)Y~k#5|Eg4%M6NTybhQxF)|hOTg~uq8d^bVTcA4?c=(7?abyKF>E$VbYe>1z1xGnh zHES2Vf=L`BpU6Ilk`Ef{@M&}*Nv49H`Q-USVPumrUEHbB6Jy1J(p;ZUW$aw zZFSI$C@}>?h86?+mwh{&l%P!pDIBTtHBf}H7RbMpz%3Y3AuwkE1xjF~htzyrF!saT zsph0uh8x@Wfvp)?lDkK^GsJ77FEbAE$3cz7U(G(hu(%D&^S>n2OQ+iUH|#A5V)$HY zjosufpt~YSD6KJ(@=hDP)!*gnlv_B@yD`AhBsE*+YqS*EaNzY*vObV?KZIEuM&CAP zeQ!;BNpx}hju9#7mjHWD$nf~$kaFpsAmle$Cw-vkoGLvvis7WzK-fuHM&ws)hX?MR zzL4O%`u#lR-)rm(-FNK~{x%JU8E*3u6tTeBoGTrAJdc5 zXG3B}9xL*RdpkJ%ip97Gvs<7K%>E(eX9`vA7y%q-Sb9OMq90O`z$Z`OwdXM|EhG>_ z@%$iAp_>^wjWm%$Ns}>5 zV%C&8x*5=Io!d0!pqW9tisxrCeJ+DKlUyF8WnY{EpI#4ofo?|3TOq-)KJXf3>Y@%m zF7q98D(RJupIV$AK;5#sWSr>=z9`{|;6XQMb5q7c7!8x!w-nfrYI=WAKV3WOl!4`x z3Z7FEM;GtybpvG&^2?(T=7UI41d=OPilKM>H%}ZfA zvuVnh=)EWHaI}Gkad*t~4=GNHISef(YduW3@F(ecSiCVjA_(`>QiQ+QH~< z3ZjWJAS0Um<4fWmFTc|$O(YOFqUMp8?1nPsy(H{DmjffGszV=PhgWFK~o0BJs z!OBmyD_$=AN_BNLq)m|Wbxe?dGBeJ?%gbfJ5ibU2RTa==(?CBmCJahE$$X% zG?dLau`M@0e;#ZRi3Z<*&xX~5OC;%RR??a#cR2HDY5X-aW3&K80q0Y&$K#ky5B$%*nD)E zuwh$gfnYVV7xG}3WNka4zqN|bcSA*%KTqWWu?gBhpq_y@$=Dv$QPP^7&^33r=xM=| zf=$HJz}PsS|3pZUEGz4N80Zr8^eM)L9c#|?NU0~R;VV$5cJF{XRh)p?(J;z4k`A)} zZ{NNZ?FNRp2jloa%+=4KQymhE$WPRQK< z^4aEFM!r7Ld)?w17q^v2yDrc`Ksoi5viOMYtXCyO8qs?e+al{fWqj6Hx(AW>4i&&3 zn;#j49y1?J-7qX1G!0zND!14)Xp5(LZNTMdVEaU8!ZI6z%YIn*lPh;-_a3ylm+-TD zo0CK{uU$_u|I*21L(N*}Vs;1&A9SuF1K>A<#O7iSj%;(Xut-Wsct7DH5!(bw-Dvz6 z{V59diDKFN2)YQYVsz^JjJ?aRZFM45LW2cfy4DSpc$=iBN3piw@#pSaG7T#`J7J!Z z_vxuH2X`W`!$;g{xC#3J4~ng;1@^_)qUO4sx+wgq9645GjXvOi3DyYT@1#T{G)>9_ z3d>rXpPbQwiyay=XFy@YzUO%yMrNUIZ-rX7GJ;taJh5PU0rB-E`EqkziT0v?Q zQq+73QfFB{QC*PM9TIz+8M%Z&li@sruE4ah$LZdwf*~g>wUWN;$ZZ1D%dVIew-g~POvi@n=GAe0!vmz4g)pb%^=`)BFoI} zK57U7>Ea=0qaUg&V7wS3Zs6BrN1z#o@CavI+HqMVHVg$Lu_3fdk2^I(E?fzcOiEgb z-A^1l4s|Rg0}Ys{&7Rg)$&~A@X@|=5zZR5=vj(I{rTBbq1a^u*;Fr@G*MuH8DfvTHL3nxbM1xcspU>UFXQz9DiY;0(Qf_QFYT z-i-;u-{Ju5jh3xK%9)`JhZs#%Wgu90?h3N$1wskxHjqU_s7*YCRgCVp zddq(Sk1?loVwnfMH{}bpUkWz_%y9HN)J^B(zUQA!@4y{rP+>rg2Jhotn7Ne#VS-s! zfbR8!?em^$Q>@aaR9DDZ43g*|(c&WXdLX--*i<=m_g()5EooCoiw@dUZiBI~fAW(K zt}E6{^S%HJ49{ZapAH?*xsN0{vW$?)bjnkIc}2nqIj0pDFF&F;R^YZDZcleMLi=hV z=E*1NzyCZ6(N?ba$&TB7uZj(nWrFB)e%3!qXZm`mC!d89n!q1WT_#9is%HO3%1TN~ z+=C(r^o=0vNN&TP*qPW5r#7Ha?G?rLULKw=M*dys3cwUW5Iv(M$(jpo3giUy@5q!Y zwk(eruKf_Y{h?W@_4Vh0cT4U|NW7R`r^uLH>NH1#m~PvgMsu_a+5vTomwCwB(9{Hz zjInviTm$+dcaF!Z^acx;s7J#PK^p&4p=ExMK*Ga~aioIjslPk9-iR(IL=tNg0dKE9 z8>ufPfCT{yGYxMV3QW?y?qUmH3$jgsvBX6GM%xL9gGB1P`2_{Z^X)iYo&ChC$vB#q zj8?$oI-w%Mg$?xe=RAPEWtaLWgy!I9DVUyiI3&?)0|g6*MRqO2Q%6;%!g5d`2RBYi zK!k}H&GkegI-CW<8&q6enp#?zI`Cg5UYJf|gG!a;L25vOZ=%O;?@MZouHvz{adNg{ zVQDGQ#d9k&jWMhbI;_Xw8gR|SZL{#OL6>ZKJ0(f(U6Mm8xoPYbE%d&!^<>01S%K;p z_)=$nw3m%bJV$1ntt$NXU3~ZdhZtgQLh_!RaB2=yCvn{>o99YhqJ#)h2oaA)1x5tM z!?mf?#O*)ks67Yk7YX+lxmver<`MrLEHI<;QNkSw?E61OSE~Yb8SW0f_11QG z$9l;RQ3fpPS|^2mp9GC!_dpi_g(IkqVU?(nZ!|j_5EOls-FnD zbQ4STkO4^PxA=f)9}^qow#|benQeXjGPJR5l?;7F!|brug^pzCu1&b!R`?fLSv{B8 ztdTB!$2(S~JD6dzeDfWmlpMFETfThZ6BjS;$!Cysxzf&}oE{Vc}y{g-S_Q5Siw)S-Dki3sZ-oT~t3e^B2me;CC%~ zQh@FXR;Dh#@U@P?h*9QM>nq^soWDc@b^MT zf%MFjv~!>1@W8sbGZE%dK;s(R4n#WCN#rJ?DHa&W9SW*)?L>;cLD!AJh4pXm7y3xQ zB&1)bPnUoJ)<}s=@%~}Tuma?rymvcS@}AG|^jVY9RhD68nS|4%uIP=Y7;tafA8VEy^!OaK82$J(e zwLP%@$W`0nHK6q$(k?A9t=bCLyrCYPhrU3ea+$5AqocrgLtUaBz{XkSwYoa+ngTyd ziIXp_|Ml+uj8E7L3W%dy!rAu&(<6yud>a=brGNJ%1U!{g>;nr4ByofaL`Ws&%p+vf29$QKX$mlCJA1VJ|oa@ z<;};P$hJbt6up&un{9(#<-1K!hPxW*CycAA0f z<1zep2RI@+uS%cg<)Z%8g1hh6E7p0XGW*sx6fgGX$+|~I5&|7wm+l|3lvy=7MqMX{ z5`w~)NCGbOO)^j>`wp3=7}%ve+1%cu&`Uu6H9OCm zrik50i3W3=a%sNo#cJ&jH$|#$`9Ru6Heo+WCJ`ta4OkUQ%tJUtNTw-N23f05;Rqot zAk79lnaI-s$gRf|T9eFOLSVZ?Zs7(U2KlE$*OBADDGTH$QYdfA@*{tj>(!Ct=IERW z`Rb=V_zi~;sN_IdqCK@+d+0ZBnq!}m)SjnvR9Rv}{3{PS#+z57IFPI}>NTx7tH$1F)yJEw>^2fW|2I zkFzQr6uxqJc-TCj+^OagBR!({PEhYMBR#sW!q@kws(I%*DRiE5l;Lx*&ct20UaINJ zj$Th^N__T=M(^^1Av=gbDBR(stQ_FyIzuo(9l{2Z0#KIP? z6bCmH@C`9_S-KvrR%84Zw6;XMF)q&wZo&;I`a3sEooLp7`*>xCX%C!2-=OQK!y*M_ zC38j-k96$lgRWRxS%G!QGO-I6`=H>hWsZo}Ie)btt)FFeS$)-_lw_~dj9d?1i1qEc zqBzA1OI@ikTdT3-ex-G0nMGQNB6v&&S*i|ecuT{ysZJ=)!-g`$TWf8WZ)((ghejn> zw1#PCeq5r+_NP_F{BaB{EFD$h#Y+Z)O4(O#HFr1w>5s6F<4jgSG&(zz5|tzIgY~=> zUeGp+#c$U#F<~(|LlW20W})%VI}2yQNGe}2v)G$43PJhw9GGiv7I&EPgKdZc-!EDb z*1Q|MGWI=Iz~MyzK{WCQU6M}Bt;^!C%Pr~U!b#Ch70z!lnXk2_Og=j!WdX=l`!ycj zzkeU>CBXHCA5Hrn-a2OqcV*>z2&cHn2P$=|GZE8v3uH0es>0z&+eCy){H`;@M z?*1+AFROrS0hNM)TpknaiN&IN#C~9F&(*k3%6fgC4!ksJ%z#bRcNC)ws_ z|5KV!x(lRLo4NpRIiu^>7rzcm4aOQN1^Ee)aCGK&5<$p z;Hs1Coyz)1g$OV*$fd`?atPk}Ji=rG0F|3?LfmNtz*M5x3{-2NS%g}w;iE<- zlpMwqz%%+d%~H&ElvW}L!8ZcFGC{<~PAOCzFXbD!;??)jw44(2#xVBNe`g+4h zMrU(J$64qH9(^EepWdS(Bqb%aJgPKi07nP%XwY)$>FH7~JF zF)*+maEA_zquI_h&fr(y-Cu*sn*6;61w9%^mkn>R9z>w8r}nj&{wdv|pFe+sA9ov_ zO<#8*ITIKwq#+d2ZiU#b%Lj7`SoR6RkswH#Kv_;q2Zs!JZ}nTYr*E3UL(nH-{*L7rg8m{D8VDpq;h76FZaC5-=9)8k1-n1tdn(Jiz# z39do_eP^&e!U(*DBM3$XT6F(Z2g~-c2abKY?b%`50Hcz6N4G!eK$O1s*|HXFT86igcRxreiB-Ti9h$@=$nu>{& zkY}(w)naSFx%uzxCL`V;lZ@OLixXfEj^y10|PDGOxA8YOw5mQ#=Fe$Z$2*% zm~E}C6D`5}CTLTEkd?Y$+q=?u%zMP3V#WZ3Q8Yu35gjQDxA;q7?Eb`rjfqQBV5{MF z^6P~kl6*gw8$(=u_9#A5`LG)v9Wsig_b%KtRt;-0WDJfYNUY~B0=-q8X2C2)C+?{*=x+K`~AoS%w&WyMxYQ|k2MxNh4 zt?oUw-EXmL+xRy=_S1W=-8}3PmauNS5t$+F`>eyIe0{#cm(zED_`Q=pE|uSX{U2R8 zdGYb#(5xN%wb@2ZHb`fTdzPX-Ck@vbxwgrzj^C@=p!xgOr3VMkumh8qhi2(bUSC;j zxjhHb%4dz#(eL&Dy1M#!D6=nK8(UMN6|I*Q%6nv_u#~Bpt&GUaV30ASu&G3OjhT(X zhKvl22#wc4s5BAAEHROg_vpv0ysa5#GG2xmGtckQ?(ego&+pIsdG2%0J?FXSJonst z?)NNrCnQMob!yapf)PwKw>Ne>-baGJKYo!_|F~d%iU<=e)Z$mscyArlW~hT!jPwpi; zfJosNC#GrfQ%RsoeCm`9Ou$G`9mCgILa^UCLLl(WY$=(r#hSRdxV=5;?IW{mdlB6n z^ze?XK?kM%@`S)N_0#u}p9~O2k$cFTxs&;Vby2GBRNomS?Ljv8+N0 zOPV&(c|8S21_qhgZ*KWU9xfu+0xzx?`{llLD#o>Dr+>=RtdI+keb*aF2e03*^CSbX zSg^_(CV<+oIi-$`S$NT$lY<35y|dPs&#m=Rs+d2gc1l=2JIj)Y7)0vZ%_1Uh{?8U* z54f0w_&M*sUI^ZSU`ZA?8X8m}Xc@)u8VVa7D?Tszi|DyJZO$0Qx~}4>q-PimBlrnw zVt0Zheqf^(#}IsY%q=#Xxg9owQ4ejz9E<<3_Vq13qoH5KgOy+O>tF#JFb5^8wE%oL zuQ@i0|LE1nPwPvS=mN}uQ%ke7N3e#Yjw&sHh#ju{Y;xfGfjVW9S}^sP2EaEa_viPz zoxpC`mW_x~KuJ5MRfA6sef;XAD3>HFE>&?DSZ0~M&25;HaCyv_W~^EGd;zdgbHNrj zsVu8=PUtR!x4e^6t{!yi(NlQj+I%DG4-m{|k5n)=8 z+0V(@ghjJKvbT^qLFb`n%JNm7Rm<8w$i%M{1+-4a>pI}h(O28{V;>^;SjmisK4W7x zB28_NF3rPZ!yDO_5#X9@Z=aOmzs@v|);a@pvr?(b(;ZbcCh1Yrs1-Wz6!ju^3+Ck! z@rx&T@+JLZKYaoaGt{$QmQy^9Zkp;ewty&YxcMyf!Jg^k8FugSR>FNp&S>`?dglYV z-_zq?iTcj$Z4hnh%1F#&5O&bW<_(QzQXcHq7g%dK{^CK*?qiU!DZPh6h+i%^KcOz- zNA`N2YE8otUbUT0ZCcmsD7I}~F!u^C)G~(!Nc@<+_csrhu!KDk7eBw!2a+)iNmI8B zKPpnPAqo@M_D+Byy=e6$Y;%&tOkoz&PGCTg$+%|%W676P+=ch0iPvcL-PsG@BVt$j zjI$m!i#i^%EN_=y3V*!x%z;f2^CUMvy!ycdgwFnWM?ZIxHWmVK@TW8YAxAhi#!BCT z?^mfR?DG$6@KfDTxFXMXEBb}43tF_y>gotXyEzar!aM&@fQ?=INa@muA*UnKV)+}c zKi(`8=h%U`(>N^LOL9R3s(d1Hs(es6!!tdU&A7jYp}~@_xsn>ZQ3#qzaHYwpwt$bN z3l|E+qx6>ny!t8dCLV5-eSlPq>`V;{JMl7QRc%{o?bU;!Otmt_uROr|P`MeccT2Yo zc59Ji4lS1OJkW|Sfqw4AT^ZM%?jEDYYA{J7uukkNw&{W@nB%-^8T_nl!i^IyQ+1p2 zMeLNmL2_HjnBVe|nviGs5%-K?)9cIUVbRR>v+2Rk1T$TPHQcQT^3OY5VieEdF5h1Z)5TK)_qZyPaBmzoVz?in_d)Sf?Umj3E@8yWw z&#Q|wuH1^k)I$v=Inl%p(3DGz*#Pj_3ja~36)I~y+<(?kL27Yd%F2g*aDBLLQnH02 zrQ~CK2|_1i-X|;+$$6>gg>@13f)v4RK>!5*;6PEK09N*^0Ys_v{h;XH+*SY38|Rhkwn^x#uE@sI&7~ zV_i_??`!8|o>aI*pEL=%9k%MkBCGlPwGFf@FFmI}<=E#T!%A_-We1eBZK!#3hXk%m zF_05IcE6L-@4)veDb+)dg+|@vrd_hYbh>oUw!A~FoSnm_9&evpp0do~+@qY>Mlzl%&{tgA6@8TMPd7WoV=_?@{_Y>T4@Y0Nrm*R~y{w_^ zin~Qc=tqwZK6w%ouB#uE$8EB6m`n6z+XUg2w`pghEVP2)4PIsn2yBPIS zZ&iwFG$9jFS8^bIKtZiIThI!LQ#4`#>qq_)6y-NBvEc=bJ@R zPxUUgGIAa9IACdUfc(_@8GS5jjXzGx;)gofIw1#hLmR>|gckK3?!!vj;7D4|!553& zDi^t2H-oIYbVK$V%NmKOx!_RAzVGoeKF(Wucp`;@(7bCn>!qGm?~Cnic86U$g`OJ% z{A(Zo)6L=UZb1-QUm#dSrBhSwuZ7dj)X(3j)p=Ga=qEte{kGlR`T}3H+%HfqUp_o# z*;YPuO_Z*{ejQ!iQwe}62xo0ILD~Mu-_ocBWLUnUsjuaM6hD@+9Z9)aImd1i;-jN; w{-5W!rRQ&iZ;_zZw;iu-@&;S0(bL9^Z9_@s-U#1H$E`NK(F_HKkql*81Ieo@((4+Is5Fh_gZt!IoI)5R=iDdgzg9l2?>Ra z^i5R~5>kE=l6{{K?T7#19Ut4l4-Us$NJk^P`>xg|rj8`HO>9lU+npEYznYwqZH zUzC^kzV$s@M<*L=9wR%O6MQ0P;0O=g?jRk1{-1>8VQf&Sp50@)-T)jAW(4GLx2v8!r;gJ8e4PbVOd?!& z+AQVN%^dk+6%zOTSuW?lG5^W8!l=XdxOqW!0Wp3{_VOL=(}X+M)s93M)>W@YKO7ui zk7#!BS);SG)|p}{Ux_UJ^v$;O4F~7zY~*p7^Qx-f-q4m%R5{TY^;cLLG5jHM`Luyq z%Vp1F_b?Vn-k{=sGbYowm&{aOKlC^{71rr~>2TB+uNP96Z@u8>+@3=}Ph@(YaLJt& zef*K8)`u+VAB}kzkKv^o$Cy4`4`)yGl)BP6+{x>6l*OQk@w=R?5$#ur;B=PvTW4QB zCB1xX@hNkfr2SJq1IGPv=5{UDW+LXUMRA=_la_ynP)!Xv)<+XA$u0Eb^?PI5C#;ee zUn~ULyY|vA&d`S4Py8sqQh2L;)`rH{McDc8i}=%;2E!at-+Xks9cM{Mtd7gvymsed z-}E54?j5IX!h;j7=)m`fUZ1$|{rZ+_d%iT{MB@gXbauBj*Ggpf;+*pXLA#j9MfT+U z44c`f+ME}Tyn1Q%Eb3MY2esez{dBp04{sE6EfcDH#T%P~_7yjk#fL2XcrexwC16%D zzOlw{*W3_gDtQ2w3dtj~wm>%GFC>qU$98_AVKu<(I96RVI3+%0Afe!alC$H-rO~`I z0=pk5YZ@%kip^d1xx8}kMI&Q({IcUQSsg0Z@h+QVjKq&TLh7?;v!;A1rX(mGV~W4* z+*gE2*y?mp`#Nxti(Jyv*!o@<=Caq$(F|R=OYISYC5`Q&<&q2Z4j0eio8qYY)STt3 z^@m^B+hz724;9%vXiR<1;PW!>Hycy_0$lZ&_PWvfJ6_+WZX7Up?&Q27;liLM}%e6>57=e|8O&Rb8}NvJoaVhI^i>1MRN78`xnFB zc`jA6v+)-rDRf+m92+Yb2EF?&TRSN1jF;j@+)Az1PAy#V(T|FI^(5@VsK)&L{Y6AX&S>t86{o%lM^3fC>o|2?p;Lizlf>Ex8^0iel27>@ZO?5 zb*8_FN$eq?xOm*JZ{1XJj>os-*edSvM2S84D3812+L0RKkJ}lEp=(y%;EL0o0CrX< zCR)K;g!XAoB@cYB8rA5KbdYyP{R&Ii!a3)?QKQ1vmfhpmS>5~BXXaKmHI2+z{*d3a zU+hpyIK6?r9(88@cGjVgxA{&}8a!)cd!tUBo>TG`wRgUQl^`^InM|G!IU~e6S*0M$ z76R9LwTUu$AMulZxPmu{iD$*dw-!7TW<0*d*@XNpeRjBOsW>Z#H(=8|fbKKvGp;CI z5p}QCANO4S#Ge1W!r_Rvwzk;VShyEH2PtPy3*0tbtG(9F%xRN(YMv>y4psn1MqNGr zxa-;4KZ&Ozu2E7_QY|g5lZ7rAzPZRe+_@at{e=GE?PV|4sI?IvO1QmrR8&dE_F7SD zKWA#@0ylSKb93Z7&%5>gC$UCsW#}M+E4btmdpfZt%!5OFU(i-oR=z*cmNGp(9T<2tZram)-QFLE z&uN%jvwI`;S;|-~_ZrWko$sGTQDJw!br!XAiB}QqKY!VI{}Iymq^9BCsrJ^`aqQ2< zCbAdj40kSvq}u-1FT@#uB~Wd@_sh@<&S%3kL$J_We{yL zHsr*3g?U{kr2X;zoIG`lib&!q!KzbXf_IdYf^iexz2`*4-hK*OyRBH?n8);qVBa** zH8)aEd6J9aigRXuet6&0f)|yywhkQ*tDI;Xubp6D^p<_Z_|bSDL&O1D6#@B+S~o}q zhmc<9mf!thZ&AJQVgr|2J@@$WVIp%zX8j&H{lU25<#Qi%K;)$vXmTDRPmyd3i2` zWp%BJL5mMxz7L+nnw+aU(_e z-8WC`&dY6L4^j9mO;C{;i{(3*BoZdNYHM(Yj)unN$4F(;?U4G_n1lqSqE`g9{foy_ zQ+s~9Ip3b)LaV6ur%G3Ou7AEm=LXlSURK|T0% zTj_AijhE4ew{eeDJC3T!cPU>Bx`2YcdscyZFm<>fIX{80!Ee_WrRzUjuUE%37o5Ko zcH6UH^S}~MsL1I?T8|{^EL5cnM3(*{JIRMX?z*{&{_n%Gq7pV%)o|?f4$B^I!s;T_ zaD7o_`JuTV(SH(@y&5)0yTn{zRDka_S75NQ^4Vgt zdNbIGxlO$IwtKv6FM+h~OyJlOpP*G&U?ol0rtQ;{iN2RwXImdSzeFmkEcgcAtB(~g z+t6c0#bk^)b}9PqYeTXd=vaBNb<2z$eA*n!J?(b4C(153tl?fkQgKm{yFLEc(pWG9 zf!!!-GlNIO$GVn-HY5=2Ra(2*oPAEdD?6b&b0ABn2)}4en9kd$ zQ?sp$3W6hafw#! z>htVmFqV~dw3sN;%wY@;@wWS}>}@FWc8Rfk0`-Vrd_BF{mx*U~XiJzZL9Tvu zbv|p~zVcLs4F>eUTPbNLAAYfTNrJ_l4%s@C&p}BJy~jw~*Y`aT$bPXoH+RFTwgd zdwnC}nmrC};af38a~bE$Tan#mv@d|Rk(``r4uc7T{0=JvRaufbo$gcRS%24aW7&m4 z^!*TKu?*eaXR`#&0)vEC$)wp11Yt z2EDN_)k%rxE3+HRTb+wHSZ-wNi;vewG|S0hw#H%_lcY#B*7i?bK*d}*Ba&)=Rxv(M z6~)4OYrC275PQ8qi)F@R{*$jLy_m-;6aM7eObIG8EGw@<6h79iaNs6FxGNB{bNG|3 zKLvz7zO0ZlORc#gg~;)`DZEI98%vna?u~N(@X@eNpn3IEErwkn!6SF92;-@}7UGT5 zks0Jy#ZveD?ALu};~NpKuFUp}-)sBq z3_EzQ)g^Tl-4+d#YF8bKJdS-3xrKqvSvDW~_gT9}6Mf4sl?VIlPHs{1$z>lpI?X+| z>W|Y9yiO<9g0CUs;Lg5#R2ecp=`J&UfA-}J8>KSY4|}fEH%;XjeI@O>g~-XhUuffg zn}Xh`_E>PB?wdgbKJ7MvekAv%4U($UY`PS8#3FrRTr!IvMQIlt_k&Q2X!*ZI(vG;6})+Re0^gW$o8_JgZl3Q9N>l@yx=SAe4s|(D% znVOlos*uiC!-Nm8udG( zdJSI4X7)|)dLYAD5kC%&R%R(NL5ns&P6*wm$$)zh(>X8ECu$b;3UZAgan<6d3F+5f zcU3%d4-(#gX`)qkh+=b0IG(?~bcJw%K9HkAC(E>4?WM79~2Dq&G&b;lVoaQrUW(6MRET?@B5@f#B$Y*lT`;#Rrs@u92irf&~+vragf>Fab z=SQjZa$;fwUaR*?0|Jupgv26SPjSfJ_oUh|NPeTn3GLm98_S24nv(3q;HOIh|U2%haR ze|ARKHC$qN`1jo0Bv^?_j!YZID=BFxEdkw-&% zdpAZE3d_VhxBt2X3%JK}T~jagM9<=lr$;9in!U5bBF|E;O&Jh6GP`mMHTd3vHhHzh9ckyUra zt7B%Ztr<85|12Aa8P*VqNoFhO>7Flh-UfGY<~(6XbG9~CDprQN@X7ql-%>?Q{-Ms@RzI9HYeymUh#XMju1RZrhzl2^XLI4jXY}C8t~Kf?RmW1u877K;%FAh`{=#Dr+f9w9 zv+<-|Zit+SkHubFpRIBfLN(L!L#ty32AVB@P_=O4T;lUFPk2sT&wY;PGH=t7fRr!N zC=G>@R-3^^Bj&(~Qp+g@HuRaRKA%on#XsC2R%U*<#p805=}XR6z1JtygoVd>F@cHc zm3-;}YEA@)UQ4d@F2mwWR#`a)gEspE$K=aAi6uj|z1tylnW>t`raL2}?b@2}1xr5p z6IgmZg!jsziy&RlX8$20*GBs*7EFO-Wt|7M5&+-woxpPJd@M#D;l0>;oB8~CBF}Ru zj|UB4c<0ChDo~9WirA1sxgZyegCYpBviis32CLzvFTPA}uCBeky-u?OYg;SBnt8T3 zmz8d($&NQBSd?C=vx2<*K$)vUB#)kUk?mH8nv#jB>7=nyRBx_{TnMAMc7Y{sa#Hlc z*J^Ae?^Nc(w-8A(Qqto0?>`HHQLD2DeBzC8uZ^Web)GT}o|v8+Dc zEu;K!rvy)@rYEik;NTS^hWBH+7ZoYJ2~17>NgR(%pQZDk7cX9fv0i`A&l}QK%)y~2 ziAuQq;A^*SbmaK-bY@S3BsM!7;lV88#*Iw8%t*p7aNm0iY;etf^oXQxfrdx- zJ-`kb8Gp3dB?c_*$kT<81LP?ck9+IKuv=; zD8-Y9G8TxdrEVLWp==Pa%sPDfa*O$G2R16I9d6@MM6Eqrz(o2PWxz_$>gYH>n@>*< zm2a3Oc&6qlqtYesZm(bndZERYOwt)kNJb{6VJQ8Aq}t(*sU%6g!Q>VKTcQ{z+S}Ln zR`Pjjc^nnH;^K5KV1HC-epOXfr@Ip)YgdlJ+|112>s#boOwvXpSDEj4ZH=Wghj6t6 zG15=tOQafN#(Ba{_)ZL~Rhte~v=BJZW_u2edP6M=eIsozhu^*1)g6gA_B%Vj} zC9iTH$BJx{7+t!0m}K!{A5$W=AMT77$tjMjhaX9h?fzZzew6;|i~Zpw-;ENt_F4Y5 zr>?+Zh_5{sk>n={=^FQ?Z9mDaBg*>|L;hMmm`-Zw>htCL=D+?U;>)!6ck4Du0SWMx z&rT)&ah2)745c!8wf!;4y}y&-bo)?D*eOEW{>EsnF_TH|C8yRtiV96#!U2t7xP(CB z8ITVVMSWM*r%JCOa+;mZd%A>{rx%HQ>LcXPALq4h+=?vm8Y=1cYAn>Xn;AHJu;Xph z4!ugGUAaY2<2c-tCr_N_hT@RGYOn^a&6du|R`H)A>X6lVI{fmoqKu5nn6T|Ydc)_F zT08<478d8uol{g)TwY$TtgKvHTa#r|;g}jOak8p>ObXHc*s)^}t2L6a>q}GO;^L4L z6A}#U?Q?U>Ucdf2Q#3r=gN7LOEr7QIiA1Jn^S){w92~5vkuqban0;C~-Z?Dnl$xLK z*p;a@!&w(NaH%{Y*8^7{k)555FKbUz;^pFM?@dIP^A}#TIe6^M_tqre%(EFbJ?|<< z;$wO$`jpp0+Sqd(Yl9h>r2?O8xNWbEEDw3La;t}j z4YU~P=tS>+cm!cl%k^uuzr$FAO1JlNubG3r{h~`-kL_u9eV-F@_G2IZI*`Y;G&k)1 z<>Qmo6*Uxgorbfn;p?XWCn59K502U@>ufB~uzr$xEbhI%iRHyu-)~m1HYj~1{|DZ# zV$Bo)LQ_-IUw{2YFKi3B*VM=yY4lg|toFvN(c0@<0pWvc`bkaeZSPK;IMHU<)}gB0 zHeH~T{3wFB7`#m!urW)ack4=K_RQ2tFEPo}Qk~u_8$g24xRh+uPOj2m{3q>J`Ul2KkoD$m{^r z#ECk|Mn&G{U^>W$PRP~OGuT>vgg%|wewj=-eWvmt(@{!F;93∨Gh(tc1h;Z`bL; z0wXfpA`rFdx_Yv^^sI(G5s_Q1UO_c|o3N&yGi1ykoLeh6iWHIE+thbc(3@k8F`)y- zsaDUQBhHKF_&icK!vBywtBl?Q#XcH zC)(S-v$mFDBt2a^s=2YTQ*n?il3q+FJ~Z_8 z)}Xyb+r}KmyV~!>73XPXjArv`ir*rh;4#9Lxw3^12Ppsc^~G-X6*Pl06w2q8XBstWzUa=>61YEd)%Bs4RxhRGW}?U6Lyn33 zh4f?znr6ct5#!AXIbNiqe0)E$iiY0uqbTVvNv{>=HJE#w^eyT~QJsQ2%5_mm`>LU< z?=gm>v&B*U-LBz>X?XRIQc%pmhOH9HL?iWGva;|iD+am7GJph3O~*dJVD5|v;_lww zE-P`z8y4hLGg4D)=x7?61(MmRng7vJYwrC>`w?rcJyj&Nw5+UU>+4S-k9cKoQgBB} zy5f`VH9b7aLPA2~;^KULeMg-IgQQCw?%!9oLq$e5#|SIo%OI{KpK5n~^ZNC5A>7nd z|6BD80DLtyH5#<(fiuLw*9(C!IoTNW{d&?W6R9ovTz_P?F1@ahgwy1m8S=oGAufl< zUjbB%qUnNC0mU3Xe3)!ypkZc8?5#Fk^1Yx>c!+T75NZ<=672VFI!$%G;iA{c_AFTA zP)q$>QO9U$;9oxU?9@9lVmc${zYiacMBaLu$2_=iF{7;<9v8_Fcfa)SVMU@4=Z8Cc zp{p)Bx#TJ0O4PrRnuzhX{a1~o$4bxaXN}@g1DUu$DPAr4o&9fP zDvti+<;$0J-Np+y1E)sHZq)vJKd+%J4HxRryG|9umx$qV&Oas=!#pR`(HzWt>r?x>i=yiR;olsBz=%9|Aj$_n8boR1lwpS!OyExXubO5Oa8qe3IfU3e(->UhbQ%q4}9diz=j0D*~kfd zQ`n*{845$I;Zn;^P8}VcHBE0%8%s+%K}*1ZYFHGv`_g0qFo{$u%d={ZTcbbv+}Z%V zhnx|~)ezFx;T`CCCG6~BVhZva6eAbk3)Dvo2?+r>2$5L-9%EPGI?=}JlM&V9_qnk# zPRK?ZHt5Xs^ce;Q-=nlAnV52ht~h>gvFR^VDctd{e0XhG%}GrO(jc(|ALwE?BY+~Siut<)!ay_=GX;;l zoLm^++~}yWogLGeGqDK?IS35wz{jNIVq#)v&YWRcBWwn2lI9l{|3y|8-mLOamir^UX8Ht~m=XXs#rWYkMU0{F=rCxkmeO~d zNRix{P-0sa5f>lq?~lnPg{5jwqHzN2OF_R3l*6{J>oe6cg>lQe4?q#P&|*uSrclqH zE5kf(tuFx{kdGJB(&G6`^6c5GhBKWRnlLXm_sJ&}WPA=BJl2rif$=6hA*XrE^#>`b ziHV8KeZ5>`e+&jQVfrFSH=RWwfQGlrmD;G(a#{;m3%xscl5H`-d4QP#5)wpt?=@T` zMa6k4rB~(J=H}lY?K{9PAdvEag{(PB;I>|4weT_E-*Ce>e2*}u_2f$udD3b2H39DM5Fw|a+zc3&`4oZ;*EQX)AviWAV zZR_hNxw(~Td~uW%oEI+KcX81iyW%pN>22SSM6BTPqE3_HKm@2NW!E1mE-tPwV=-=@ zP}I;64uzapexjXtDVOMioDtqXDi1t zv~f4=zLw(Z8>VtF+41c{C$Owe{k^@kI*lVz$ut%QtqPcoj?1V8mm_5~uZfiafU)A^ zk}Zw_cN8Q53q$3r%-g=yXO%NyEKsMy#&4Vz%Pr|Vjl6p|5mF64u&}VuGPSdL6Ik@+ zdVXeMg&aHU1O>b5o@_dI6cnRdtKUK<@!x7g4w90JT)E;h(PlC?R8GcZH3m+&P6>I~ zf&c*IGvRX-JcjnofmfaHHU`l|cX`CK9;2hwR#4+b+ydcih>{IX#f*pNH#whMSZ!mhxtPHK5>EB>`| zeYc)#`f?BkZE>OH%8rtgW1E_IxVg8M`|YgmvpV-)mzNI| z0~f;$#_a4o>)u>oN3_xb4bjjaq$sp!hUinmN8c%aLYUt#d29@b+o!8lby$2MQ(d_WcQa_yG zOkY9h>m`GSDk`o1RP3gW(I(Tf(`-WavjZZo;cl$XA_Ju^b6sAae|)JbvhMvj%<)72 z(9gp5(UV%de!1J5giMW3pFSQ%lX#5s$H3tbEw+d+KtuKp59*9 zhYta+WlV+d0G4KeY^|*-p_p90OPB7%%Sp~fgoii8il`NzENw_PTg{-yMsT6A0vS3TJm;%xS_L}5>L4%mU8 zioJOs$)i6%*+YiDWG}06&oH;7Zp^R9eJ%^>h)Sm~VA|9RW4Xo|{YD$Ua^_9RIaVh`~o zO8>R~d9-!;ul2NAj(vO$ZA2Pk>KRT~n}qn5Ccwb$NCq;c5ViexU#1XaPuHJ6-BSO? zHoMlUyMV_ay8a8YP&_4{Kk%>r{32F<;+|Pd4^3P+HnnU5{Kh-S{teKnHwlJx^EUP zXk%>+G&VRfTY+m{XiJeJ(-c6Tco7qG1r}Q{!D%5)$#*$G{VL0KjX} z2V{1G?)&@q7QeSbh!igW{{l{=WgGy=OhVETmY<&wNQIr9-Q9g-WMl+LER!`Tl&KNO z5r8)%BkzDP00&QUwqgkvH+P(%l}4#7MDET%fg9wlR7z@HU0Y*iXOB_L`d=WyAsz>W zfwW}8|ww;_O^f+2r6X($p&4OHKU{j!OPi5~_B z^`c-&I4xAUcHsbLFIn4Ha5fwFqiK zbmvjqWZ=Ca{`R@2FBlv?nztEsAf5Eb0Sg#gqOIIW584PAn&9AIS;oAA0(F$tF8^Tk zD`_JNqKEv@dV=FxkUSYrof@5<9stZ)?!H2&+ukrEH{BiFvYV^9boKPuqfS7%hl5H1W5d@n!Nl z4OCXKoz?7lR6v1Mx9qbn?@r*W*cD@63R%v2U>S3V6qD-838has7ohVYQZ*q5~hCeG@iV z@x7jE4Ih?xDQ8i_&O5PwpX`V&VN&=T4U&%8A9pd^C(P9M&)6ozmnk)SOgCaBM*>}j zUgwy+YP`UHK1RHPFhBe&1rk~BOGA()jUY=(#W%Q^ugmg>JfM5OP_eBZFLrRZBv!`O ziIps=g*KSYb1HKA@~?v-6%oSAPV>>9@E}2> zBjm*<0Qn9K4u*w>hR48oczJm>$4qLvy1EvL_~XOmPNYW^NL$TY7k}*Rbo%)BVbH?) zmji(T43#HZ`<)J(VJn9@T57NE5G7?tbp4p#b)3%%phU;?HGlqmj6u``gylZ+HCvI@ zE2QhEiEQ6ll*8&iL)W-kJnUym$`F9~ak#1O9N~wboQ$yhpoG|^X(6{qBVP=aXnD+j zQ^Mv9ngezqP%Os?+imNvZH(q%wo7iL0QgYbaKTdW9Nann9uhO>zvYHQPNF&Hstf5u zy+>InNQYAQSnC!RBVq=N*4;Vu@|NC4iPG~g?xlJOzW+oPkSP&gL8VG$tO7jkkiy*b=xu~67z+?Fg$NI(2ekFR;ZnNK10+MK@)Wh3|+ z24o>;*sD=bR^vgph23n^oBKp!Uxx!xblkoEzmT?dZ1OMPnYW&wVI`;GiM{;bwKHNE z&*b_Xye*b=f(01<8IxkUI-JPW4*qBFj27Ux+RfA%Xf}C6}r<#XoEXg-2 zUi})t8ygn~E|~9BSH)>SZ2cv^qVBKS4I(5~=F^~+;`{ItxLqh|`A3Ijp$SLA6+fDj zV=(1{H7S7onyFpz%yu82)Pdh_{p=U$U|8x2cLA6E7?Vid3DVO^JACdgCy^6U6<82)}P*84?5U_#4-QOSB`rC5pK0G5wej%+kX=YBu=gg=WB?1(*Bpx)95H=BCXZq`7CvP2By_|A6ONLNlEjspdTCg?)Sa5<&!(aU^d$qL3|jvyb8!=f9RIpu)R>WWPQOLGEAm+T6(X4vsT42E)(O&i4R&RT>1byw87yN^8$uNnB3z!*|g zrm0O?m02PH6rUd2kkqMh7rQ`Et}zE~`@5klv$qIn*Ra48l$3}c52(sSv#UoYNO<;r z`jqni{mZto<}?3@{0O^|qrGRxzgTT1LvUsku}{8L(Y7w8v+aO8yZ}d0>D$5PL_iFe z4xK}I%hOO!-DdKx zZ(7YaH#bAW!Xj`*wu9R5?*0WD5vXlITq@*PKSztV4%-WGiWQs$Fp8s&& zE$L$Pd(RrV8(L$@T$BuFeix+$d#JRugzTY-2cciqvok!XXL&d|bDwHzbD=$2U*D?N z=ipUv_jDDtke{U4fqjtQyET-RM`maBO`Pi+8`Z~HE#K0<0c^6KCLffg%Q_=|vCzav zxV)?k*rk*PY*L3nMrR+KX%LjRXUkG=tuOgBziu^$SVD?v)Aq;Z()aNOL5&J&f&>;d z(-mg~A0IeqRo0U8v$AX&`6F1GiK4Dg)U}%BWNn=;eJ#^bQ;OxAvx4O>CWW-q6yu2T z*y>?Q!AY!>esq&>51c&RV22F2D}o;A?{|lu8IUlGJvRzBGZ=z8sLFr>RyTdiqgV3M zJGGlQKlibqdyo}be?4Pg@bPWQiKy0LV%@fk03icFQIcSps!dIaa)h4ThP5kPNE7RD zO)!Jp)_xN7rsgj90-M3IW%$4O$1WTy)?Qgx~{?T0j)7GFo1>3^7x6M8UIRd-=|MSFJJyXr!FH? z2RR{PDhwOAuJwKLsXA_b6dwrsPG`mE;tAqxK`}<$(Q|t?F#$ zK}>g+ahBhZoi=NV1^WT$S%s_h|5P^+ABsGP2AZ8frm-fKe9ViJsOo@F0teGh@RX;! z4~m&a+hJBzZ3%_=T3;VDPIO%UFnnci(_h$UD3TK{p)p+Hy}+wd=+*Y|@IJMs+q1$C z@v*zqlP?6VeX@ycD#`{)eN@;0oYRUdt?5(;4fEJdp_MB=E0hNl)`wpf`YbBNx9?Kn zzZ^lYg2P<%YE9M~=2DoOD;WE2{g&$-`k3w_&I9SxlFl(V7WMKa!t|NNmG61%`hmGi z0Z#PMF5fRFo!M0rm3{Fcu>%k0+AR?wma{aT1|M7mwsTlk_=64*DH@=d)Yw9DJ@E|6g^l3W(o@yok8}E1U*3J%vaT4h;cHGJ9b_A$T5ba)bG&>m*T)USC1>X)JQ=p~*HM5OQ z-$HM$0UWADHvKion?wnt%;1lm3mnpNO5l3f4JY`8W!Cx7#1HTX(T2N3;Yk5&Qz zKD5-rhtg6~p8WmyWf76y{(fKp#>dC$kD5*@L}_8X%G1))eyV&LsQUoa~qYb5Yk1Sp{SF zt;oC9|2qfNoHukWc<}3v!{izSi4K-H$w^CJ;Nhw8+FB2!6<||}*EMlIa^wi~$a5kP zy1PnUuE(yE>7)ekY>;xS+qvkCt(5|8dPinuG8r!4zw4X=K1*KR_wUHu&vRj=c;ZFC7>gSTsysVVTMqvf z9ML*-Qm`|yqu9Z0OD(lVBRh0C{&atCWeh045TjMca4M5got88HnVEYeow`tTrCF=_ zr9Sk)V*`ti6&2-72rn!s$kIv;9o14*H8kZD z_}mkJ#|qoWg$*3Ytc?F$o+_+5ChT+FFv^88h5tutD2_ zOi>@f1(X`fyCCU>+hgF=it6P%Q{-qCniuI{l8)N;0&O;eQi#2gt$|b*J}M?dndIud1wcY4&PJ3;%>+T0xChAo z9lY>Jfq#l4bL*KLskqhEhak9JxpD>S;U7QlJGt(fO<-X!F6x4e8{zt2HcZgZZzn(9 zxN(Ew%$dcNw|fqkr8;o9ARL{l-r9TGyViibZD}w%ut*dqLGXtioSKrNwaL{PFWmLy z?X-3-y;`dL3%yW=_KK4m@cavK4m}G9poIXE81PzzZQIX}gfQlc8%NN=M_x%!uK8hcO~z+p7$03k=Z}ZC{e2H|hen>l`dA z-|iIhzu>bnJ1gdCF-aRyf#Fb;EQ9s%)29GqvH~JE87THRpCLog|ZDVRN454dXthvyP{KbUHXV ztavKq?HN;?cueM>&Twx`o8y`%9vBw4@_cOF|IessAh%o^U%Xs!w-X)vyLgjMeyi6N zy4LSCO*|m_U-y#7E|mglVefBq?+?~<6&tpXZasV-wMEWb_U=&UBG~?+x&K$0fC~m0vc{e^Q3}#HWMt+S+>(z}2u|T&Q)9kV!5{-~RztF(9N_qS#qk1v`;VCPM#McnpgraN5Hj ze&rWy@t`e$B@|dsFcZkjTfareGNt@eDP^8Rab0u=jhj;J$D^R2AVje}u0C4OiqUKJ zi~DSG8em@VpN6vbab-;Mge8EyA&gn0fCdw^2gHe!SWw9GL2HNMp8S0lmyr@)o2&3; z4mph+eQr6>i<#y0-Dd=&D9On)n)9G3er0J1yb!oYzkd&EO-ax7{F0mg)!=-OYcMl0 zsRPfNQu!x)ei3x+tK&P1m{>c$D?w(LC#z>@h%#vu<>h50AQjozQG{n$>wi?fE?O+O zdrj(K=T5GL+8OUXc;PrUhsOXK*#qeWC!xh_6YQHTec)lmWE+V@#qPSNtMT6b@9y0I zd3ZMC(*2PSMDwTxf<0{aIS?OYja+@0_x^y#6u_B<`E^bI*<$8x=rX(E3OgY;IBBuU z<^|GUbWLlrk*nG@_=do#{l7MF)oY^sAKJ(g%WHe&^JumG)9a5`xOQ^3xi|8zP*}B` z+Rk$!er*PDxS=$eB76V2lf?h*FeuTMh{!?nHtyb$4TC|3y?>(1^o?xdByMF{Z`ul7 z-{qE|_+Ij!?cOxgEwGHp%F5d8KP#fURZ#d3UmKMCX%`4xMdU0X^+Do?xrC?+1ZR}p z^IZY*8vhKm!T1B${ja${j{Uz)Itt@A{yzd5!C;_(?gKSbx0P82Fkta?51T^uc}3&5 z>N6Tc*~criBN1wu75i)Odw)~Uw*Jvc>mmJLIT8KtUCVwW5lPv9wmI?!V($NXqenT7 zTz5)M)Xhp(In?UL1{W51TDs%M?=MgHf@K_ttNz7$(WkoxqY@cgKXMIf@K~t z{f0MMHa*k%Q8DJM3&Q$?;uq(i1C6%t0GY9=sVr6=$g!qa5y=0))Mo{Lloi(=%0P|( zZcqkt-1^62PGv-aiLo)nh4Aq3&t=do6e^@(*FK`a%bNqygJS`I0jlYRm1~32(3R99 z%cd=#7SS;Lus}m->$rizuz*?9g#~b!E-o&z{FVvKL(!YR)V+GMG#eWmkSfU{tuE2A zYR6E$Dkxv>vufP#2Lq74kBwyxLcqR#(^EO2?-zyLrd3>$btfxT0Q0FMCbYRgClemx z^PejNI3nj&3Sk8v(}OdZ3D*H9RRdh4Mih6cK_|VXzVIQDT>(kh5cB)KpYz+S<`C zUYt02@^v;a`ny=okPUYuN&Dn3s#j#eg5~1r3)KxMH?`R8bn$1foCeh6)r7 z*6H)d|G*BxRZzh<*48X8(J3i41JYEF;>KdJ&`mWCD%|()r>Ut;O^<^qt$V24-HEzp z*Zk5W^b>=m?O~9J9>5@1-2pZlW!;%V5ACje!vZL-GN++y8MeSPGKl z7+{9L@Bh=Ukg*T6mH)PVF#YT|Od_0w?cbhfrW7>+_Dir)J2&XV*fTORrbibi+OhTZ zZ*{YwF@0+f&|&Sp8_ge;KEoM8_X>ca z>;|!=4mId@lC_6Ne-tCSho!Gw`wOr}B@$Zv%$AJbJKt?0GGGDEo`DN8t6^h(Jzm5? zDhl&({xuYgCEFlHAD#7RXLjhzF zU;my2u1jK9kNduDhso4E}H z=z=qViCvi;>~sfvA1hW5;Lh?)KR$=f#Hqq|5XvJTGc3hWcuzt^L<)wzhNmVqma*GQ z8r&Ujc-@W6_YJ4Tbh7g{c~*RpAjAB=EM3u?(0$|N?7aOwDa2SmR0ldL!b1wU!1n>j z?;qblGUuDi5Nz~7L@|LboZR3h5ZzJ-pegm}naj$I`e2HvqLhT1x5n{ku%DkYuR}aBmc=Do1_+-YUEy@u5iYOe=UUl8FQa)v*HaY%vLm zq1xKp)NVTlIx7RV#>&cTbCXT+^GSBm%Qv{bEK|gQfvU#bddF|yvioGMmJ5a=nbeUU2Y!Wy z*5?vnr~E{i|47a&Elm!J@{uES6jMZ1%Ihq}uNJr4NzY%tJPq0s#N{A`mcDKp+nWk? z*CKE5GQjD6&1F(0*lhM153l#7rkYOt7#DseHF_WWKR%kAmc|5l{i_x<94@DteyM7< zz8&=W3(AMbS6_73nX%h{a@(h_uI|NM6eRMOFCV^p{QNcBxj4Z)kZzDeNs{ok(HWV zGKT8KG_I$m9R7b5cHMDJZC&;uPkc5IQIKw-2qg60^-1r&3Q|H-S`;wSR1j1I=^cW! zNH0=elnx>!6a_*@5D6WT&t4u$-3h9|MoJG5b-BAf0xoR?W`9t=# z4tNXs>d`ot!t>uSj+vt5v?DLhq|=@PGSP%|akaYOis8{VZ|W}cCdkdTp4GPndjXjbVLp@n~3 z33qu(n?#GrFVy{%=|E@I*BHeNh2OoKfb|B!y_%X{a$1e+{X+X;=Az`xY zZDhpQ9Q5p-eX#NX}NOkr>f5w!cZhoAMp{!?pc)CHI9Gy zfO2S$0H$pRt8e;;N`+1W*klE8dP_=x&g^Eb_1as%6|Va;Gyws-0B8#)ZI956Suz7# zuTj9>Qjcbv0{UY>YX(thW~($%wc!9R518z8%7I&pj9gshwj~BuOlQx|uC3h!!gln1 zaEt*xF(B$n-X6XK3IP}uwc^h}nGa44j8hyEz@i@+zV8M6W!Sq`@S{9KDgf}Fj6=W9 z&gP%MFDyv>`87!HR6?g+4!F*7$$PEH;!zo|eLMFECh}N zv}PPo4uZVSwow-ND-~BYP_s}0f$B&4D0ZL;0KrPD^x5~oH6SV~s=^js9ZDg$w5aCw z2V&oY2t5<%{m?%^goi3D1cx|MB{T$+C_p7xJ}80O)M--X`aWMCA0)!$tku`g!{Y^r$fZW*@%d#g z7L34jk^2pAxDXaD6c!afP|`^NJ2>Su2svzOJz$l3ptJ)V6<@3dCs|U-B6%+hYHCag zb)Rgumj*R<>{SW?GBGmCdBpVYbCI9xBHFSqpAPn%^`5H}I-JUSEMWTmJ1&1PjVAr7{$ZlKu)`zs`m*-$}lgcndp%%iAGvx~bVWblu(D2_d4BG6K0p%Ki zabi|*?2>M`w#>m;B$7r#A=RCW|L$kEHs2Ef9f!Sv$!7Sq(XcK=CcUb+1Ku7i?vDe1 zXMtD(9*-Ll2Xwl_2R}9ZKmpVQ*rj-EiXKUQvnsQVrWk?qUzzqiXD_=xb_#3{j;Qv$ zZ>yqJnOeQv;+RCKK+FA`wfmNK08aebm< zIMA4wG?(~(?F11ArdmDEs6y%=Gf;$ebaL9}jv_~m3UE~Bd3;-037+e6*`A&6MRr|C zMlPoV#|@wb_FThmWxbxqNIw5QK#Pq|%cY%38n;7up`@_o0m*w{T z)wt}@K!}W#FL-@xKPRgq%6%Pee+pj2X#xlnD_=z&WQsXW5;p@&(}L9*Vq=vyHr#F&mcmcG8ZJ*~Ho1qf;xjvq&Z;IlRC825w4OWW3AHXuGx+p*_*vS+!a zLWpjoQ7E2I;8)r0<_|Rz!MH!lbk*owX0ND%z+Zg0@n0jc6ZbE5k<->>B34BT#kbvS7MW0_K zelS7Rsom&FxkH>wXoP8=!S?EA>Sv9k3d)A;)6(D0zKWE8{c?9!t!z@8yp^fQDikcF zVvm5`-~EAgRGY2~kl)QgZF$;v%4NNV*B z>2X(%@$f+6o-pw{sz*O)YtKUfEd>nb;85I3*~fWYV1Kt5r@`ehq&aW2k|aba9LAhx zpU0r1N}Kn+UiyB`78Kl@9y|hPD#F5(UK(UDD5siQAyga9e7>`P{=7R>VLyGVA7mNg zjvWs@l3X0+zBVs^s-G6P2vrD7XbzRZ*Qh7!o!$lvYyW}`6xy;Js81Po? zhmn>B?Lh=BdDZPH90$lo_&z-9;4->O0 zLIZb4)V%j)R`{1TR+WRwl3YhhQ}D_W+GEy14+DVXTbe9f7q7}8f9_RjclU8huXhlv zU>q9n@4s&6=@ZG;tAf8|nHlVo4NYl~^qqCv9)n9%O`6J3!w`DlC}sLYEHtV0dUU`G zOZpj>r@UducC~eF1y9=4lSyf=To&YwiUy&jnO0F!XlwD>0Z|Dohzn)&je#m0tmu|S z0ZnuBQOfevt^znn$|2ZinlAHnq=XCGdWLYJ?X9jHs!R-Vyo#figR-M=bgo0^?BmRI zY<16*I?weuDKq;P2-8hK>#&Z&wiC^G2h=71o<0gVV9&+_| zK7N6pj*fnd;6a?OU^qznsD{<%(DeAB`X*_kvxyZrD)lp+g+rEgoj^e zXi4pSck?3iiPPh&bW=q=dea&1y{V<5XH2gF2YyUVrTN$)$w*Uhp?MSPB6scJW`n$$Ap8@5syZ{YdEtTRF+lj3T4f9@++`s>$?cOpIco1NjSfp`_jA zyAjOx9pz0)^_|FUplpg^hNiPMnp4cS3g5?jRf&nCb-owkJvsmQy+GIvFn zG)T!G#lFu?w|m<*b|%u{Cf&B(N6NRG$>*2wIAfy9BCg~Zeopm78d)Pk(rXo1HQK=t zBjsiua(8{*FHWo=+m}M2ky1E-rh5U`OQ3)R?8WY&Ircl^7#2g2IhLz05whpmf6uU8eK#{eeV}hhc9?oeD|7XReE8Sw$czftu6n zrHX3b9Bd3k&CJLM+YNEaRvJ+z9D{D}Z<$g~4wOzhm}HodhIorw=Do$8HIcyV zgpc8x7hwtIy}LR#HcMNBfK-q=;y&;)d{457zU24ixYwQiUI|O?kAab0|LDBo4Pfu&CM-ea%I`xWY<1w+weoLHd9RvnkW%ZcRHk? zxzMKG9-F*2Fl7IO0FyG(LeVD7~@#%<8g{OA!a*Z_VmtPI`E&4t~bOk%F0 zd_fA&=b+uG7`C9JHQK~%ep0|Io$y#le$dJE7s=p=w;qfJX=&mWVC-q;&|?oPkpQBQ z(`L!Gp!Gm-ikR2(HYQA6p`LJEeVIw5TY4ia#Qw+BE>ItzKYzZ*$FSEuvq_Zf4${D< zVW)?XYY#*A*Xlr3V{M8Z1A>1{MA=)4t^YWy>4GC-XY0(;KSK|F3&qjtJxrmb)m|ut z4Kh_cxb)XTPWqs1u*1P%D#)rG5z#h|vjhHX)-2}>#EPz^fdkMF((F%sP9SX0u9h*s z`{yE`Y2Awk@2Bc{0Lkd??+3c1%V}XAeMMX-5o$tQmC1M=;%Xcwt_G@>BeB8`6 z$c(~`!gC=Go-FkbE-Nb9Iub!no|~5kBoZMZ%5OOP40{d2A&FM15}>9>o$xf^@u&W@ z{a?@z(}kw`APToiY>W=K(x)K4W;=wBM#X}+|He8>ft)xc)88u3Kx;S&*^NU z-(2_t*4f2Ei=5$h9rUh+I1iBa1G30)&WR=FC61-bm?Hp(Y5D8XZ(Qf7M%K0M<@rC7 zoSV?T%^7181r}h8#XEK5u9gRf4CqsGkam$D;kP<8bS`_vk?~iiXKDa80Org2JNAa< zNu_~H1JD7v0ky=Vhv-sUtk1(o%X5cl$z$67oCkjXFPJ%$vlW9LIOi!xc%SmweR=HXj26LyBr2OMa_6;QY9Ns@=0^04@mUS~~OJp4CB9wnf#Ca0vdiK5{jjipGPl za{%@(KHx?(aidT;k0;i9Szdm%;%@thw<#_9hA(e|kUs3q7-Hwzoeoi;Zmc#^#@h{jq|uRT7z>m^tLaK%vv7*dGvmS&`8z2<@@ixKlrXyETQ8oJ^HZbLpP z*a>Zns3gm(@86=cj%Ixfq1!O3B3|d|3`2GY*=6F}-Wk>KvOKUiSv%nrUBrO7-@nyJ3D`;W2fE) z5bz@U3SpdCZw4H7PN!b2vJJt&$LA1xWy_s#EtoDq&<1@m5C{a_n1Uxk5%hpCNoMj2==l@P zgcd^ot$gMXOZ}fcAM}4#(f^YoI0;dH#!0>Jzx>x<#W=tM`S}pPXodf?$HV{IU?Qso zhS$oa8IDLgvs;uDm!*$kkl_KY6n@9LR2lX{vt4JgY)zovy`sC#1KF<3+ zful>7(SO3bzo6Rcv1-qz!I&NUeA8dz^}Bs<$~{lutN^+=Xo(;4C(mF#mm+CD z{7J?~mf&;&JY#8Sc!ttrYQ?>@5zZ%Q=ZG%l`7IBSzAHx2y|eNvz7cHLX2YNb11Ooj zbM@}=rd%9RpQiMXO`}Tz4d%nypCelV-Cp@PK&OZH-5!5gpDk!1K?L<8VE4ECO*d+X zRe0hK==o#!%|csz;W$+SPhC0YJdr3m-Nc5*GM(7DO>W9Q!A~k7>T5m6*c%bd-O4*@ z2R--D+R`%xtA`hW?l7yAugBtRoNO2xO?`GW8tW5F(rmS6P~LKJ4GM(v5E-NeQqf>| zlkfL0%*XUu39u>@Yf<7e6I4Ev#k}_pI~K6gG`Nr1S~FT-9Z zU~g2eGkQC=I|+r^mO#9RV(5-3@1Y2=`N+1jn)AONr8Y|sYuuE3Fpc!A;!6;!HCUdd@S=$b@4sG`-@k3FL% r-{RMY_jBP>m4MVc{36B7%#mqN`OlNRIKDem|E#5^f1~{Rt;hcXGKX(n diff --git a/doc/source/images/sequence_launch_action_plan_in_applier.png b/doc/source/images/sequence_launch_action_plan_in_applier.png deleted file mode 100644 index 0ae17d882d3a64320869f97043e46a52c1d755ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42190 zcmbSyby$>N+wBMf3MdK)A}Jvt4FV#iICMF5BS?26jUphRbft6R-YX`iL6Tg9d4;uo3+>n%jDnKAtNg)t)nrmp_ z9rL1)Y48WPgQ%LrD{C7U3qvCZh`6DZp`D(Cp#hb?3zdn3gUu5*HX92)D+fnQ3s!q; zN4r;sM%H%5hOAbG4o{4g!PU^XT$R)u{`w3A@;Wh7N!l9z^j4$hyzqyWHVF<+Q;B;@ zdAFXrM5;=g;92?-xxKIEtQ&P5l|1)wCMVFUyai>A-bmWwDCe>d!&l5uwQb9F^3#1_ zlRNbAb;_rcB-pLgw{qcM$&~MC+~Q(+Ytn}{uh0__d%b#3XDr^IyLA_@OlIuWS0ZN7 zZdFWD%6A*vD`xIopHJ92L!5)u7T8)VIpApb1K3iqAC@SEDn2813bV9z2&s@``^mTw zGb7$V8KtXXtItTFCWX=Q)?`DO^$oR_7t5>T7q)1=qi*yU)on++sj15?1`pp`-=ZRJ z;-Gr{gYZqQTEtBTnM{={Xn0=OFR35ZKnZANO7Gz3?Qti4XcKruy?f*9w8nel;iKK= z>Y7=MMg}@;bViY6;!_)AIx+XJPYEd^ z+2LN3k-6IIhu+acrQU&cfe7F?yjqR9n)1BW`q;SeCtuIzXPb3iIrCsQZjz@Q?OC@a zwtcJYxR;|1dm2bbU&8&1;TAr3M05?bBuhn}9rI~P$3F<)sTm^J3MY_}C5T&Wypbe^ zDA?=|Ntj6+bimYFX&0tz-*XUk!cT!fydjd%XG*W(>(h8zTKh>`7X=HCMY|L}J4pSS zs#PqIEmx;t?$!A)D4K7o&6AKRQ5%Dm;CQ^WV-kbaUEE+*Vl69>{`}K(&}wR^f)FTh$bzH8Dg#o z|FV}LEcI$mMx1-RdHQ32Ojp=tXEBb;SWH~pG;>dA%0zcq(n`t5`#avs80FZoSp__10FA&*fE?misaLU%#n~k^maM%`kWWM%D6un9qVR? z`GaRK6sQU#XZ%Swf}V7K?7|)Jdf=Y&Jnj7iPSskSv%t|0pPhKgTew%zvgMM}qQ0Nn zr2jP!{qMHGAUWKc4`%FQ>R;js?!9nxtIPNjK`;MZ>X%2&;9s|E+;YfAi#$(AMo4MT`N6(K*N27E- z_EW@IwbYIbF~bOnkwg$~BG6_zNx-0R|Ze$oiGA$n5I7Q?2d`tug3#if#!6?bRlQ#@5RyJq3=owKukQi^9T zids#LC1pFs>5Z0&e7lMDxcREfI}hHBj~-jQr_XQJ@#fYkXz%U_(R+;~o^DQ3cQa3f zh3YA4?_HdKZ~Qrq*KS=oT>P82$wgA(h29+TXi>v_i?6B#A%y4wOi77QlD_tfw*N%O zgvCGtVZi*du-7`={6dERiSVg<-Fb}wn1r^ru~p*eI=92~@+Yj-Q#<=NNY6PfeC>{` z>I%R8zIJ{x=C7!r#WTyJRV3X*p=`g1p;^7zZJyoVcIdPxB6Ld)E z^{1X3TgMhVjejLf)->|HWQ~>ac>b_awNQ4Fxnxd%^v?J?Bat<>5x;~X=KK?w#BPkv zylRu{V2OFS5H0=k(D(Ztvy|t*jVzht!?yjK5F7HtoSaF?=P-32^>+P>jd9d`=?`T! z4V`H54RDe>Y1!Ee=gYNszndDNB(6P$n}_%n{{F~&AXO|F1HE6oQr(289@9RYO?wb{ zhe?RWQ8b@O0lzaBH9gYGVfT(^+=QckInUmZz@sKGHO*~0?v;k4aSjhqXxHwP#prsz z$Xa4_bc`~(FnYb3>t^uvfwbB3D32AhN>FEwEPoq4(*;<^k#bUQD-*_=D#d*x_xdKS zou#B^%a+Vn)j97Cd07n@DMs}vtqm_0%jDzrs0kDDaIC!@{Ig6RxranXN$^#gCSX*l z^%S5b2)3=uHtS>M_B_?1$KK$Q7#T4P(;i9>8O;}h#y7pg-iV0%jxMm%!Bbh^6RFde z_9||$gHMvcmyDYPVZNojgSoKnu8^R@&o=F>tQ^22KI2K@w$6&M4Z}6Ak!z)Sy|Wmy zMEB@)STS~eUA}%lTG%Yjrk0h2!t)KmM2YV5STS2mJLZszPvC2jgf$aP_4jZK7F7lN=3 z*%|7Qi<*(1k$n4okB-#m{1rj08QBeUeoQ0oanXE4PsJAU|=9M;$oyHdba-9 z%PhuL8h4~<>E&1SwI^Qvf{t+U@0mPiV@jR68(eG7vkkwngpZ1Ty_kK>qQ3A%TbOLAg~Pha}t^?>-fF~eqb3zDbwDR+8kz8h$MauDva6j{u<7b zpqNPZ)Z(-V;?)P<>#meRgOg*8PU*O=74rG!)!)Cb@mikn=XRO}J3NBNh;&fN>n@d* z3vX{-Gtl}W{!VuTT)4{Xpxo(~ zd}~n;kDJLRM!H8EJM61=C9d1bv@^OwT~Wh9zg5kglq1g0FOU9o) zE^vo3w97E9C2jx^3YhH+tC%-2q%Kf{#tpSjP`S0us zU#z6>pIB#y@v_IIgwKaN>y~$8xkP+A9Go?HL9Hyo&zm=$AulO^k#pX{OtLA#Mx!h( zW+Z&n?5rF$S$CL{S+&-br8pWLbI4~Yi5PRxsJKVctK~fHH6-+%gXZpR*gMfoehiKXusQs<;HAQ_9jFV;c@SiE+mqzuxU{jUM1wRK<#7@VEuGCA^)u zsSx+(og^XLSe&LwP~~UGY)MLC%|ftOsL+A#`BDOmKYBbkk>HqkQ{U0;bVKes`pJa= z-CRf_4Xc=SfJ1^PvDj&voLxF6`d*6U$TD+KvFiLlDtS2CeiK(q!qpWI2|_B^@6DUu z=Mm6m98A{~I#;e!5wtb`ziAVFW^=V?D=(e{M?P*n&%M!;KQT;#uz6tN2z zbur|{QOr%r2*qa8ek7q5n3(my7%u8U!!Ca_OIjFnDs=nj;a0dE{j%&q(tjts7`!vI z;f05D|J>RhU#TQv6TB z2qF6D01>uZ*949iBis(wUD(w-8@q3ZF4(LNGisP}enOG#PpEmDS^oMMR)2o77VdeI z>@n4nC&O`vPWR=ffA)}Q7;#GMJdp?U{1(Z2zau}LDB!Zw5k{k+AMk9WQ~W^VKs(r* z%pb|5|I3FZHp9xKAlO&Y{E4Q}buY4?2TA?)6N7^=@DplNPbjlld;zDIIa-xMvicag z{wj!v+y_AZ71BZo4~KIV>4~)>c5qt+|e>&UL~-6Uq!`egXPr>$_;7OnuEXXm@( zA|%nb|Fvg>h?VGOSqu9z@>`iuq< zBn{9H4xZVeS!H-CeP!ehWMu*nL-6b0uTRsE?>tQ_`HJBVVZl&Rg?uumf%Q=6a$fs$ zc^-TWHau{FoFdc(>IJ~Zke3g58-w+U1|7Wpv_G0Tpk8*LTrai(UC)=zT*D@hlgEzzK)B7BP=d%Vr;A(hd1bB z-f=@igH^TW%?H!05C|14lmXwYgLI@wyRNIdJ3>!>xsE+EGqcWlD=PxDY_!<;qIPuH z0R*RqTlBD=2KJ4Ojq*Mta7(m-E+m;ZaB(A}qq8zH9w+ssrKQ;~_mKoMj%de?*V<=~ ztwk;E?(Wvr)s1jEIy(OP^~==Mw70hxMjULQ65hehHguh7=IH1(_`lUhaiMkP3<1pb z9;Z&#lN)xm51E+unF3q~aI2~9F|n|~xLEWh7g=L_Q(|L7TO`6VUf#HI<7CO-&(9#; z0SGYO&{CT;owx?OrLLz`?0kGA*uDmjnP_TrS{ngT2n)OI5{N5?gDq6g?$sU5!Z{y# z%t;oxuf}H4Q9x^C&ci&DC7Tc!6eOM<6&cwvtsV&_ieAo(f?5sFe^}P$Y~YYPq9$f`50ARUQSaiz@z#2 zp@(G8sKskuCMIbXW&*2fHzK+)-_z665h;VD1HAY-6=t58mu~kWp!eIK>E`)L61tPq z<9~XE1##uUZ=0tLd`k5~O)WM&Jk6ux+ni00?PC*T8Pg6UNh2j(?(a!3$+RMaEUez; z-Cx4{kzbJ4(JQs%`>?y0OYdj$!-E(#$Z{D%ijeH|bQ(hJ&WbbTUBmUqe0;gGSU$}; z=?x?Zujx>ANu!e#H$+qQ{zYxd;N!^?;X@-)?nx;Z7Z*1-H_r9kWc53N8kNj_u!_pc z-l3=o+`%uWtf(Df6vx2H$@zqz-*A4a)}EBd;>E6Np~lGYFfEO6oGuJ@+mRtx8=VSg zvKI3T_#WJ`#Q69pj~)@$8tvb9C&?osCSEIMjU9Zfse+$q^xE-_I;^*<%($OIT^@X` zC%gb#v`_(DXOl2Jefylv>xP-=Xd>>Wx;lPg zVci6;t%gH5iB#7o6~89E*?j{q2XhttR&HJ%p3hpWt=3QT3JV=&^K)}yO@2H>wocEV z``~67(kUw|hcR(55#G8mYe?B#%(ihT~{dvh1x#%=z&Iu}-EHIs}3*N^L`;u0W?=uv3z~yEt5AOsmwf;eG1&%2~)k?}$7e z2q#o9NbH;DY~7hPt=;C%z!0tJO|4yPuVYy`b+9AxeOa`=V7>Fj-Z&V~^gU4|svsP= z#DbRCd&0I0-&ocUj@rk}2oR??w;dDT7Y8T#K#=p;+>Op$p=;sxX#=k)l&JJ@o++jG zG~wo~YVzmxZ@N>*pPYiZ|Kkff)L{Z533Q`)1hLwlw05P$&5?`F7&_$s$K`?Z3Cxw453VE-;zBjROqi$A^ zyCW5eE=Y0pKVBsQq0Rq)5UMFcKFZlB^L7;*vS&dXcnpyEi+*LKFUeX20}5Tm(jcJ> z9_{+Lo6FYeJY!_7Xk>z}4%43<8HWIPEEah(?S!^v0jx1IHy5M`8Tut7D;wpDM%Y3X`h93<===BhgoMh}Y&8ZaBBw zAi{zIdJ}k!4GcQq4<9~!+x)5bISW77tQjdZ&W2fV9QBc|C?;k6fvq|+WxmJdI=fT6 zS0Io8G;aR87-(;c+<0DXF+rWa8Wt875O6(?lIH}suFiBQYkq!y(3Fmj z4mr42R_Xx-x?=el7#O1gBFXtG__eU|H28#+k&%(Hakh+vTuxyj_Y`^1{u=S*XUF{Y z0t-2!{JgyH#l4}fY41XJm;2XL>D(U1$u`|#^lm&`{iSeTvb;K>zDowi^pu(&oAn2} z{)mDBdlINZvc|^7hx_}YH&1SC85tVp`Wm)uIOSsu=VGeY4*jZ-BSat&W^`q8tsljR zM*39hb*$~}cYpl&)V8TspnDP8z5rVyBPCTtKU@(k9{j0fI+7l~L|gH$7k84eJVJET zJh}V9$~XWXmM{VgP`-OIGq0k(X=!PRj3l%?TCsV|jG1c`KyBf-+i1{;)o*b5vYheBb_U^UKA!tJk@}1c?We4fcN!z)AHQ z{u#eqcKy$GgE$y`V`*F1c5rZbCvrb7y2$oJt3DA;pj3$PvDdELI+33 zxFkeLt*5$-S7m_u_xER%O}+W#Sy{yuecj!p;J&zct)|8Y23{W6jW-4Bk0$)7Fc?q8 zjiwX;eh*_{0_Yy_Vi&7>qV7d5YG$@NFz{m0Mv-WM_02U*pSHHPlQjmTNCpGCW>dd0 zSnQn4|wxV}ct?_>8e109{@f{F8doAq&yPE+_BjXI~oxnePfaLqj~ zy4DUo+q&s8Z5No7v@{)guSu`{)KzkRQ1Iw1(te-`QE_NsH+@XP{U(>P(XbV;T`dk4 z$8Rwi!1wSmj9v{;nrN#0Rx_Ie_`YO<+&>Vb9z$qCf3qZhQ~gCd{=>zCSN)#>l{UP9CSZiVtl@Oa4T;D}2Tam-S=h6t2>|;5_1B0CLqv#h zL(u}4?r~cZp3YrQ|5a>nUj07=G_5`0K)T4))n($9sjmA!unstD{p|>b*DeqZ>|1Ty zC;kf2eqQHA75o&i$j+k$heE?Y7sHdl3xbS7JW@RR}}{ z4IaF*is_Dmq@Nv6M)z{Fed(k4PvBqM2^9_V`@aJ}B!F6vpe-$=O_I8>q4Q8g{JN)f#x`dlfle z-zd<+Y_6@XEi+2#r$;w$ZD7~(TGZFy{n<-GLQ+-*3SUX9*LF$sqoq*Ygst?f$*lrVT(JYr#yP3Y_EQz8901s&H8!oRl(8<#o( zJl@_=*>5tCIW;M&o0XTB7w_gxjdkC)lYKlF0Se{5(ozoYrzz6j9};s9LOq@TP#`0A(H+Bfc#80ohU+#ZYgmkC zD5a;TpYS!mWZ#i_`!4bA+qX30!7IjM$-V=^X7;`I6JO`62&sbq6dUFMXjoKK<^|jN zhd$W*v$Hcz>xPl#-Oeu@cIp3aANrGi@dc7Zv0G&A^I@0c!_{lQ19jW!p!Mg8AqPVA zfl=`%1dt%>;d*8#Qfu|~^~gD}U-i1A(4cSCp;+PjY$!}sHCx;C7A|H` zP|%1nc5ZI&^XJcD1a0StQ^P(VJRy^s!4DsB9W$H#ZjkP!yc=Xy9Mu)qs}cp@Ekn37 z_2W_N8bgniiSZKWhf%vQT5R=@Ii1)-VT;m}lau|mF+5z{4hMPpkb9r{<8kNdsw%6h zq&|d|(ErbJsBql@Scz+S|>|&9#Tj%*=)> zZ+-muk(1ue`)(D|^)sP#=UhBIwYRe)yXil+W@&0VD6IPS&4)ZtxYR8M3*PY%h=Pve zfLsdMaP)ICfu&PjUESTt|972gVE@lLbuL|Dv@u@#3V*Uh?bcSs8#fdw`mxkStndU%UJRBO%3ffUp}070-aB#?3Lyby zErBIFS&u~;$|=?chrIMxZ+JkvUazwy?{zYi>F%BpI_wFySSUkFZUjF5eb&z_KD+po zh!rIbcuO1Lxu8?OU3_+qCp`W2BPnkXNIF0QTE+)8Js*LY?X{N6%=UWyR>Ues`=D97 z4+c4jZg~dzgXfrgx^4SCT#Bk%+j>Pui}cs;c-x|66I5&A>N3n2gmGR(?;VVbl252p zb&`#f<4(TP6YU)`K%Q_SD+z5N)Xl}uKRZ4lTP`K*fzRbvCDs}l({sqx19yCwxrEJ$Ns`MY5KSOUTjaakpn!SK z*{5EmEc;o-5(IjS28)vJPWHv#b3aO!T( z08#*cn?gHiTSTfNWASvB9UocPFX;tsz$^3X>jkc09tI^}mzI`x-kOnZzX?&)in{@k zjAEdbTc2%8fpQ$SR8&{@KS{WP6sG1|zkY=p8fJDc+0{)WHI%s@KYJ6nI5`(n0p!1A z&$&T#C&=;U|I&5-vlF?Yn)3jx|NQ)X#q7>^Q_48&Q?)~D`+qee8y~DjKzGyM+Z&a% zbLoBHf%Zfm>U(ILS=2>2c7-?_ixeC54Y?3}In8?Q;zzm8z;I#?S*SX2|UJHO`0=_ps zuIB3c839eDe?DGO@yHTrApARa#^qP?31v`Pk0#5fb6z$kJW_7dEw7 znwmmgx(*@ej)vw%2Z=#9VwA+N_;^KH6iFXN&*spCUb3>Z>DT8h+dN*U|62mmU#{iB zDl@xfQB~gy9h{!lR#ACy#q@Y*$+KzJhE6C&o{}&6$7XEdS0mW6!cNCA5mQX-<2rf4 z!BTXIWI*)V!oS4eqS>1=g609-0C+vY!L+#PGGfpFy+Cp@`eQ_~^ha4AkgwtquBQf2?+&p1AY67(S z;maRmk)py^59!?ed?pr_8*)rcOp2||muPIRmkyYb2emLax1)(khpQ$a9IG0>H%NJM z9LB=~N0mjGpGV(PC-fMC& z7?!HEtUrR&+_Pj&q~PQ^j?#;QI;-2im!cz8&ety9HKvRU3k#&YRt`^+IPY4Tm<)Ke z=!p3CFWWyR12WgD*8xCr)|hVBA8RP^UJL+FEHhKIspLb3VNYQr8Ka%=37h!e(vff^ zPdoHJf0=$8_C)WW{(=*9Y1Na{laqBT}FIuL@f0Z{J3UZtuTmwX4=Dj3gy}(nnDnb%8sdqorOij(MpNF=DpG*hj zEJCEdyiXKf!IQ9bUr!%P^q5lywHry#qpyNq_~*C+k0qQC7qdS_xcK7H&1n7E2Ign3 zTSzTtMt$jE*w|WNrsTBcH$U<*9fQYog=(|1qM~Mc6EH|bNY=z0XBk6XiGvhpab1MO z?AMbZOS}q1^PLc{_0P-a1v{m~El-axs*p_y*rbq`WDnudch(z@x_pkkCg%|5<%&ee z{>Qs5+oR(hX?3Q%+u4G7Wx%AQw!vrs zn{Ym&=>I&g-1=$9$VO4V?%9qou4Chl=ZYZE-jUN1Sh`A5x10y!b5E`PMN0R<)T@(Ft znf)u{ob2o`G&JJF!{;5I1O*26CGlrd1o>UKws@E_8q{UNv4Ia5;XGe-&wTEfQjnd~p_Vd;olphiP2nH#UOp1IB_8nz^jSfF| z>DG6Vc_31P{rX;ifO3EuIw_hjoEY1PSln6*0rpkgz`1kFqakU%d&s3Kr%J7FSzwCV>INA`YcKU35UGzfJ_@Y9Pn0g@yoh~)Howf z@D^-UfJZa+Yhmbdy7Pw&$o&8WSZqkSH6yX#EZ@YGkkbisJN6%O81j#a zR=*84)E_FwG5c@I^}l3cBnlz#4chvTIII~MkcPDSzxfF3MDrX*EiNr-qtjLtvz|V} zx`J}x%8Nk{S`;AiZK50bz!~x!ddyGM2HEC70 zy{@I2FL-Y6CDYVu?Uo|sgR&+(g1TIrLu%fDh1=^#{F;5v1pZ$BQowwybrsZUsRO*Dou@= zruJBlFi7p3E}S9y^^5ypPQAw;1onHK@8Ss^eR>y2pW@DW>ssOyN3+}bZ5PMF7wrq% zKvP>^w}=~MGd&2c@{5j+?)N+>XN{4jXjlm5|H&p-zx|CAPGp?K(9-xy`sara9{{q{ zWJN_qb*fOu;A*BaE2^u<#m7ICubuNF-Mh#hi;h8$N=dnx4`MUp@@akvga-hM>XJbI zcHV#+!j5v7K1{J8s@B)j)k$zuNNa0L(8Qx@?U;!CZGVxynJ66Ha6+ou&{wb0nVG!| zHAYk7;v|u`zZ=P*Ql!NgFrVsAVP89`4Ba%*y-zpOphG&vDTTkhgn%628M zB{ZJ{micQayB`Et7e4ZKO}k*<5xzL?Z_p_&&@GcyTP#aqpx)|w|6ML+qj+MsC;@s+nF5`LCl+s-o1bkkS?zbvx~>(5Wrp9%ErfpiL{#_E z%h%7j@Nn=m?r49d*B6><{C#%TFdFutk!Yj=84wTv_(WW26W}YHFbxeeVVF%W)BLr| z=3*Wl?%(Eh+^yTS2O4y`Ax`?$=kR;Ml)^>Rg0}PTrW)4Ey1@hJh_w^51zgjnR?E)W zE?%Z%ItN{7gE6pmZs6xB!jpvmNm5_Qu^B;F=69obOLHQoNA>0nA)|(|ZQC5ol8Y)m zHy0H6(aO~}dU;A9|UanJsQ@RL`<{88#xI&11II!G9zz=%3%zc%BmI9?P zf7@L0H|hmW7v~5&RqWydV2ulWQyy~DS}6MB;8}t2uN6ZyTCIs^z`z&*qpB0mk&9)O%A@E8vyOe` z^wdPfPo0c1C>**ex~QjRCjO-_JGn(=a%?P{MMYPa&6yO)$aQ5j|N8FpZORK+R?}|M zR>xj-<3018k8lymG3zkkavp@96lM2&Y}x}zi4M?9jNQ*1*VB6P z2#@(m!kutw?c3s@5Uosv^TK+j&k$*CTC8oDV1tBX1yCQv=mQrLG|1`?uvwIt@3vQr z7b%`9TOdmZq~vh`HAl}j95nFJ?@Jq-2|j|PKj|9fyfQi{H~?QwFUFb-P4gR#8j{-H z6G+%;tzzeyw}f)I0+;jg=Ohc)NYxt>-(d} zU3ij&IXF2r9>C4f(b4PQBl8u2kvSMZMxvm4ys)!wRh}8!c5;9xbxs@MFKp+(Ue|#l zW#nigK$%S#w?#jzbcs18bzcdogW8T=9Gyh7Xj(Pwt6aas$?CFEIktj@RJ73`>62F} z_4>`jdQ=)i+w~YD-}prf1kCF;orhFUJ0-je#e$C7K(jr&(8-(i1AEsab2IFr!fU3LL1<1#9Xii)z|eKb|MoZ{u# z8_&&I>bk#La$5%I>aMP?8X6k*_T`RY|DvCjLRzjw{nMb4pWKMvQB_e{T^65i&Y>I2 z&Ru8^F(0zDw1mIw8Qh7gs-Fwi+#36!wJloqHUokfLA z>~c@UiFTb+=T`s?V}+X8KdmRqL@^t897rZRRSJSCOotv!jILCKhvV0pH<*JPjjA*A zG$8458j$OZvUXn}g|oZb*u3yN-64#NuJ}~Q!pT1|%XV6PF3UTe;WxGux?V69)ewDE zzoA?~o5c^VhrB^Q<*|9>4W3w%G+uRjn4yvCM5c7*CnIdiUS3=58)et5I`kSo#+OjX zKHG9SfwM|pzGK=z@x;0fiHdd* zaa)1*O10q;{&;SvnQs{;B41cOAnnr+`9CRr9MFP6uh4!Aje=fz86D7K1Ji4TLol|3tqN9m^_&%QCHuRO}`EpWUP{T~Y>DFvs+x3PLZ`)Z9 zO*=awmhNs;cHQHe%VL;ixY}ZBB%!MML`+|@AjpA95Yl1;3DfY?(8$Q0<5`D~9~6Of zQjwZ>_VKe^`7zHJO;9f0TEdO2%$!x>X2qdwX3!Ejb?IjzCn{vbeje3$*~-Jbeo<7hpND;Ygk`=l7@= z_uXJHm|hjJAKSBGY!dbKiUlf+_bzF#w;ED@E_ukEo4xV~S&o2LOkv|R{hqQvM7j_A zt5d`}HS)c}4kqHynp1KN<&s=A1RdsDm{`Z?P|vqL;4M@6o7#%VoOG{WP?Z9;Ifp0q z_Vy#<;s!5Q?P|ek1`uliF3j3~RYe6<+jx8^y@=kz#S(XU_t>Blh_Z}&<8p=S@ho~Gtnyo#b?y7sJv{i5ael%F5wM*bTh7vYYX zR{6lBi=2Tk?K0PwBFr|lqdNM)-lCC+=#;xoRX6zCw~wTm0%Z$jrzcF0sF8cK%~Q1R zkpL2B)0$%6!=L=%*_0o<;igje9r5kMK z?r<-IgB$@pVaM?7@KQS;f+g2u=}2Ot%J06VG-*PQAgkT*QSfpq7IAS;N%O~ z%7|~I$TbB{wQ&0XxhjO#B8`FJD8lQs`V^Uo-ftxg5pEP@+p-4-VOA$!Emb*h2}|dc z937~|(Rdlcsf9tY#TRv_$1du@jFym&o951fkh>M&x8c`^F?c`Tm;j%I1Pv(|?VXwf zl_vo0Z~hTDlz9VEx4-GyVcgStmdKnD@|w}zq3d8 zX3MN=SX!0U*HytX1-16z`7NR#pzN8@#We0`#^{(?TJ|C-H56f#I9~5keo*Il=gytx zmjeY~!h%GJzM+^y7blRAd4QC1c^5r2m+_j6g=<&QA|fJEUx3tx9OjZ)3Dc`tWTsNW zZymV32LWZkmF!%36!$)!0auk1fuq%=36yPWn2h>)_pF67(W&E*EN?DHL!186(h_j? zc0zIeXiHbNiqy-+G%eOf3qiJ|%x7m0tbI6KqZLh)Ig3pObPPPJ;V;vpQ$Hg~AK+_{ zuQy0ovokYun~P4ontGg{B*6jkb3)d?>9H|lZrI%-=+8CPG|weVCs1<0_tMe0@z)h& zo^T4U7r}3bw5L}Ip$esy+RLD}J>PBD1`tREX1bCncoyLI*N^{%Ev3Xyk3J%rJ%*ur z#*IRzJ=EaxT|Yg09S&)5vW&MK_=6l#nw8{?8Y~8EY;E^8t(GwZu2>!`j)+VNx)ar6gBTDLa92|f7w(al`D*~MzSH8*{a6FU@vk3h3 zoc>Mg@sj(ip&E6;)Q<~+N(W+8g;1~U);#5WgT?!)BG`&BSZcJqtStGxdz{jQe)(m8 zt_$4kwXtH#yLUr>0bbxpBBln;5a4JQ_Qbr%pu=Xo8$}QnFyFg1OSiC3=NTaq4i-6v zsuBA27CGtdQv@F$-{0Uyt{^WXqol0Nmn28B13xaJFF)SV+NcXLuq>0aM3!|=&qkG4 zx2*|H22Z8)0V{3STKdT2aAMe+q@mo{Of5cGc6m469T;R-U@?9@C+Ae*>bNU2| z5i4Pa{PUP%i~2P$cLb7sbK2;pRbx4U1=1)OjSNlGz`R#K-va8|1j2(+H3f*|{N1x& z7YOGm+jb{I_E1~d4k<=V^a|Irp9QzI?C{JE4?CP+RKA4mYFtUwDz0tTqD*-<}2~EcO8LLo|Ke?Y)QgS$ic{@nJtbN z*rqx0GF~f4Fy;{+eGMQ`vinz`^6B#fo16iwjXrL_Y-Sy|4Q`V3?gY>s&CU|yRD0Ba zGNq8%&InadVk|k(Hb$C|Xiv`&=I?;tVe8?x%GCykUNk<>GwohE16Ssh0d0znG4mt7 zrUAaczVfw$L!=N1Qd09i-v5@BRb{9uqbs|eN!+N0*jvh!_c%bsLH7;s*tcJ&MnXOW zpH@!b7h-L>`1lm@Ypm&PWb5XoVjql)($3%CqmLIpiyeBK7>P6nmW@$ABf9_@UD9v4pTW^#!WA^^?NaoQ?8>h042g>QGT)wV^}#wrG{X#>6~(86_fQ=EWRk2Gdw{yGrg;tiE1+^v2( z6);C}>B(vdXIE4>9Bxi4sWQ{b-MWI3J!u19%M#+#A8gGQr_cz#R{Qqp^IIRhff@t@!Nq0o;GiVj4z8Z~cd#dW2oNfV&8gaw!SeXn*w~zc0*6|yov(l|aL?M@ z=yn(uW!2Z!W%rnD6j>8qH|WH% zX)jB&H4MYy@evU;&Y;wUknM`~rT5gXQCnh!FtuK%l0_-z>lh)}RFGLnF-s+s37 zw@H=Nw1$?J04FDP5C}eujE-v1Vi#pP)VD_HjA+3K%33PvNZ{-3bIU<6Ko&8qT2iyV>nC-zA{iiq>AKj*E zd10cdscEMJQUFNJpB`A7{LY*8KxG|H1pWR#;{~uq)PN5CB=ASY{JcIO^EWON zH)hW-%f^_H*~k!Jw3h7Bgl3u`^_dp!Bo+jy&*gm-_KbNti-7K2CZQH-# z!TxT0JAcN^-X2YD+p=WWIiS+}pS47dHV`PAd3g`BhZtM+L5@DIFtxN4R+UMq(|v0S z1dYP+En3c>Q^qkE5WvS6I>kohn?zYQJ`k1Hn5!uzxDi%q#5Dm|h0h8E*H#tfQ(ts! zI2|4wm^nv)lU~3`K_UbZ;aKPu$?po`b6B->A9F(ZI^Ufq)Z`S!2K^JozlnidI6ppO zfjs_d;KbcMti>;z(#N~yZ1rZPcj@0F^Duz+S~IfJD7uF{9JynEFq+=qSsFF@riYDk z+I1G#LXVG7L61gM9wdXa5UZ6KI<1RbT#xb>DDwpSdYX2P-UDj-(`Ukv~CM3 zJytPaW;&=X8cit!PiN0U(cO(1@*F46f~Keg zp3R!ZQsg&AlLtrynaJR{zsbtSdysyg+f&_ckgeaX`i-$VD>j| z;rQ&9qij$xUgV6SOeR$Mc>V29eL#idQM#gU^RYTo^1D1mWfWrG@lVzhJPEZlmZg=! zbU+5B<5WNc>FhNY#eAx5YzJC^_4W0vtOqx_SH>6a{q=Cvsv-&KPU29u-zr zj)0}GxA%@N5LhG~`roH*5}&pAHDf&mk{J*Kn}FLu;sk%T*(KwVwz9gq^ID)^_(A}S z#9^Jx9jKl)GK7gK7(a_M0qsf@_RqF(+ zV6p3qd1bx>hV4i?U`ktu_Mecu7b=nlxy1#JI{iuAz3>Xw?@ty4$(}b)y>shypY;7}A0bqTLs<>cZzT%TyY zjgCxDc?G=PEq^NWk2UGnE9|tc27`r}!{L?Yy`CZSN81bdJoiXR`G!Hr+xztvsz>26 zXKeMKeNDZ_X7J{jaLjY>Kj)Vq>y?o>ty^RbOO5nG$IqfeC)p@helP$#F_8)lb)+q= zI&!Yny8b8=-q6SGMxK9|ty{hIy@iY1pAQPY;IbG))G7%b6-3J3Iq8UGQJ%K4;nM@d z4_PjxR-xe0e+m9FYcZ$*9p9k$(q;Q7@t`8>E7Ej48Q}Knw^b`J{agKdDaJ2uOSRkW z#?}~aPYA)i+z(La17B6Jr+)fqZDXmfUyM7x!-ilRpMz!*GM*`?{##ofWlq05=xh9) zcR*ftMgi7iEOO_SoBvP0F?3Oobe00=pXK=HP@jG?pbE$a4uT4KT^756LsT%Vxf#nu zi#G7@WeoNol4BvWZm9kGRA2!G;YgYRB0QIC%3|66rC>x%cxl3knIyOP>e42J%}ba5a~#&8zq>r_S;_}_L%P9achH^)m`OC&f) z4_zAo+|$s|*g5O#w`?8ezrN+zYd?FHoaCva?A<>PpX;tz_btV5@Bi<)WRun9?lKaNdG0)e1?Qu4cZ89+clQZno#(VaU<{LXTMN~)?c$;rvl(W6sS7NsD$sj(iG zjQbXJtJFqLsAI{GF~5Ssv@~i9dE;kdyZ??LOH`^zKJ}7Wd~7k> z=z)qLqfTaGQGzHo34!kiA8@$l0X@B8hT4nYb5bA-qXY=YlfsM)=%(LCW*YxgI1~&7 zcXR{|92o!_v>s`cD&4JX9YoJlL!sBWbEZLl?89^>J7~H&XI+Dc=pIecH2f76yG$P6 zFBkD+K_-tG{VtEID463gmEUhwg|>7a?=J7#8(`5H{;Foh7}|=_h+0=2w>SR$lh5ic zGHYn}8?4v)$FQEwvx4>bldZ-N$w_kAJV{Ye;N(QMZ1=m#jjA{XaBAbbmAQFDz|4#e zPy)8Uq(RCC|K!UwEg#tt=sB}Unbe58D&+nA`4d=-K{w*=a9VJLv=7bB7R!cw{Fr;? zq=1Cz`G&8n()XBmaV2qj{N_%s?XDp43)T7&0Y>ttr`GS36JaNF)?oJf{i7M2l#r42 z>JdBrtq}WnBYZ=Q{HA|fD6b0L@Y?1kD-%=1;Ke0@nG5H^L1HQkkGVn~IFXA;e93;- z<-s!%xo6Ls!)V0!xM=!CRwyfLLChH$@PXi8573tj04aHC#F__CM4_IZ=fKZf1)7=C zy;>L;{@~zkaB@@QJa4&gkF4#3lt~VVcZ|NO$xy#fLp-F5rSBBF;kDZNbL6JtkgCS= z(h^8)<;H>1`QhwCoa^wpBVqZ>RR}FE3TJz`VZ$RM;6&$#;9yLjW`#K;uY4A`d?`)>50l40rK6;+;>Fw_84_`*)eA3u z)lRE!TKAu^;oQ7DkN{Pn;QIiM`-aCGRe&W5zzh_a;((U{IPk!U}`3~8C ziwy_P{S_NlyoZb2!{%owz5$XWB1w^<*K7wxDHCIkR>ZTveymwa#;$>%6Wo=hmKC>pcVF6?geQCXi?W!wrR>Uw^21d|Fgpz-^)m&NPX*mJq1F2l?UkLWpjVz1=ZpJ>`VBf0 zMBVTojM_p6xr5ajNkSxk)338iA4~LCfbxF9A-3m0?J*Eji>oXkEm{7XoK1BAEvFm{ zubP1EeC%ZRV`#ez4-? z;tE^|q-b#vvhHNvpM1CGdXA#(sjL|gvQ~t6dy~A9oj3JUIAl12Y3UBN!SM6L47Vyf zqQ`T*qH=Z6@I}j&h#2FMUZS8B)h>rItZV#IOE_$jlJ5IEi`tzIG`*oVRYZNgN7|F+8?w+9c@rYPeCHE!W0PZoQ(f4Iq+L@dLU}Kwbn?L=MQlE0a2b}x74uZ7V zYmA(noOg3s+S_lupoS=ce(C-DyFy`>6((6*hHtguWZJ(OT#ZTde|{I4-R8ADOFw$6 z_Lk}Mp;Hf}i1GD#$B{`}8GPyxqbwT2YiFIP>tdDMeC^z1ri>X>7N9dbJ0R8g?OU|_ zV^wwaSF5JERa?L0k*T~xk52LODs3sb(^b~`iA@wn!k0%JvZ(w1ZmNbP%L1_DXTS9{ z4(M27lKIxt66maD@I{CQR~h!pYKtlc4S@|z4mz)U^c~t~@rOWsC`qJe7aw~+U^0|- zuajkGZM9Q)?!0H?{s&XC0QdPf0RL(lfqh*TMX*4ViVQm1Scw6&rl(>gY)(e4)Xe&J zgL17egovOh?GDnN4SrqL7VG^?^xc8W8^85O-W1VZpKUQO&f*-EkA_~*0d|X6p0403ha?Qx*#B{w# z#{wY2etIyFXhAB~_E|u&79(u)9BgGfRjb#o$t6r@#H}(#xrco__3K_C$JGhA)#=FN zgUS#*kT?ewEP98$BM$9^h^8?Up&-!*WBUeyEoIz+XMYiMWbTZA0Z6T5u(U)7xx?ub zIXUFwD)_g-5%I%NT71tKL3Vfm&s}yHrK_#4x3RSB_k#gog5P@t zTHM&wsd;>a1LQ#W7#9%RY%!QBx_P(oSJFRN5ZqE&uc3ijXt&}VkF6MBG+1-~ zve5bNF;EmlK)9^{ZPcxKlAAv3sGJW(xK|#U9%xn<`6P>_=quVC8 zZmWRd(GuT`kqWL0SlXc46%u9ttLy?t(FSkAp-CIGC?G zd7Hv90t&|qTI8v^xsV0|o;-Od5wqRF3x)m>r}b8)*k+~z;{`j=e19`C%ru23g}@Rj z1_PdrWh^+UdLi9}=CLO{sj-LjYfRD*pBv!hKq0M%2)_TCuMuE=B zWk_bF9Ui{)AcBsqvUD}Orq{P~h2g?_~ z^iP7kbXr+mJ?B>2nVeFXCgFCdJNn(KGh3G64O1lrg@tVRszCj8&F|6FSO65YLrqcd z&gz-}TV^EIyr|MAZ=bD(1Svr6WvAfPL3hdh!%{KFmmY@41*{OuAxogV?qBhYw${F~ z#&&CS;8B3XdE!q?pA;>OI#6?6uA)gx)-TAM>MqU|W`~D_@t;17;6dmFwKIC$_FSbE z^E>LP{B1Tg-L8hnF+%DjCI$XOG)j54-|97Yeh%&;?be)-3kS@c*Jcy&*~sV!q{*`; zY&)C`w|n7+HErta2kTmvhUTBP=F}9cRilb#X`L=RgG~xhHb_f=_~wlR6-T1h{p+~} zr<6*jhSEKpAdxOfPT!u{@ z{u;UN0draW#5q4ZY)r|)Qx!k= z%QGADn8(!w40cLdIGq0=!gng{^|bA%D2bk~_1&c!pqt{#oTxhVdSiv~DLkuDq%b1c9gWcuJ%NOt@ zs2Oyr9u7U5hS7M)gvL7GGN|Wa#2%K%vv6>Lanf5D$=_XGT3QMd_R46KR|0DIcnQ8= zw`cYB#H;7eXZk2TotxvYm8`a=AK)sRc@`X}iZ?XRC3JbkFJgqD+spp^y#Pq=_4mG?~4~;kfCi?(~mXF1xoF`Q>BSl4$gxnJ0 zq9~706zaay{F|HN#Sv(nKYFd5#CYj}GWv29FL4OU;@1&s>g)wDIXeoLFekYva*CMv zvl4rxxhu`uucX3QRj@lCJ7}8nXs6GeYSN7PdP{8OM9q@3$g1OU`wg+fc$}UT&%zf~Cyms^(U_kjKD7E;&=ou=^dou-cTWAVd=lnm zI3VylA&$CnkD;rUbwNbLsw$&A88gAJh5)JikU~z|)>l_+PaF=dCy&9T6ns|0>E+wA zy&Tp)d6(*%eIjP!1oAme;@h8+#tc-dD1nmuxN2uDrpIG#@-ssg z^ECxU#n+$t&Fd$+3pKZ1jfwK>T()$UmI%{y5`~el(!$7aZi<7GUl^KoVj#_Ibqx4fjhJ|S^*jU_BF-CTqR!uhSgnGNr}>Jud}6P63B#VjaqBKkZV$CvsB@F zXMAMcYr3)UUbIia3G;>VR+g@^G>o*gw7r8vDuauDhVB8;=mixyIsDfWp$pZU*8)0~ zgVlplhOae52km)_fhP_`=-GHeLc++0E}Ni1Ug(sA(}o7#2y!-T236;)T(B;u@4jy) zL`HlY_@3|4?kq`{u0iQT*7YZ(GY zrV6KFlI^XepA`mg>H{;f+W|>KPVwg3BT>s09M6xS8fiL|(R$81fr7h^*JE~ndRub! zTKJPD*EstdlD4( z@F~1E`7-gXjGus9@U(&F&5o7zB;9b^C*df+pYR&EV?R@k8R|OJVR60BbZDS@P1RcK_SB4~zSFyVQH(8q|QkMKsH(k|Q`R|9QvxD(3>JWKe@0lJ+REZT*X zO?A9C2k~cPocHTA!%rvuf{MzTurX*tPwvN$Ztz@738Kkp8Bii*D2JyN@^*vkPsY!# zp@kb;TYU(A+*sxg>ruWyWFg8(k*)A_XLw|U z^58)_z7jxA&??UD0&k}e#+8S1^v!g1bz!ap9&VG{ckX0={P<&FRz?`KW3;^bEgx`+ zd#pOLz zPzWO1af zTfGk`ljt>7j+XTB`T%^ZIuj^$yr!}JgSD=O#Tb}^S!n=m22@#U@b0$rjIZdbV@+K- z@4;Tb)mH4O2N4DA=L6{)_a#wDUKhN^ciI6urYs^RMXaB)irgPeHs4HbW*>PGN=D<; zx~P$JN!Ts<=oykkVweY+t2l(=~-&i}ZgI95t4c#B#tI?&~THIdVW^3#+VgI@{v-Zho-8XLgCYr`X zj5ClI$UaUE=h;)=o(|?EAPZ**xLU|4T}V@N$4S)kJ$NT17e*@FVx*@}0fEZ_R|p^w z>nqqhSZg#6ewfx>Q*`uDNwIXYU-nOxDbeSA`i(nlj6!&z(Ya04`(IpB@hjeC`GjR2Z&hFfU8u%o6G5mSP^f+h-0l~uI&r^&0R(DIbq*%p zw1$R2<38@Y25uKgwh+LP$!tLw>IHLyM%Eh0maXp(!f-2?gw@mdOeC6a1&+zJ#^2Qz zs7_Lt6sG9V+M@SSe=xbGA?n5gUYp~$&R?rc{g9i~UERk!K9QVoGJ8+r2u32kDxRA# zKbeU^CM3miJc>w4F@=f*Ci19jPF`}{efee#bOo~BzlY}syw7xX!&W~Kb+x~QUh@b; zPv%2iZLR76H7p1ib;4l;kcdnwVdT|90eH276O~9rwsH~YvycrwIPJn`o#EbtXMYDi zUJ>!%mzI`jj~?AKbu*_(eNyp|C(hiU-X7eGTqB{iu~6v{qiJ&YX5$*A69(_+`)A)B zeg>ns-Dhxpffp;+MbCm@5x)N2kMdX6en{PrQgl5NRb)3l%cytHm1P`)QvY%1YNnl_ z#0TBbkT-7%?u-R)dK~jQW?VNCoS4(&(BN~*Rtop$!F0glc=q*e(B-72o`#}iU>1ao z@0QRO_UPC$`{EP|4XeuT2< z!`7EF^Ypd(_xqeK|0KXW@3L#(^0T7pNCy7`h{bt(BAH1cc)=Q>P%jQ* zKlzYD8Ca^PxryW)6rKc1c4EGyxX6ekF{Z^`+gTBk4#fMc zMD-Q#y^95fCL=x>@J?PxLKDZ??c0O;x0j-jl-t`!$G5XsQmhJ<&qQZRNh9xpA*<=^ zgF53@^88A$qLY^X0xq>@@BWo)1-_%P-^AV?qNGenNPsltY8DA?53pf{BQlPoktuO2 z_KtmyhmiD?K<*{J$1DF!G+}SS9E<`TC_j{i;a@#Rb9ryJ%?Ue(-tF{Y_Be}jNJhuqtrQb^u=)4XN& zt?g#4k^IQPCHlNEePPasw)9JF4(PSr;y5T&_L?A1Nm<%lYjnl^3fD_wcM=58GZ&Bu z?;;j6NO1%UUKU|{abte^O@QcpiHY=yiW`ylhG_-TPZKaKv<$sFd-Wj>cheY6&M$w6 z^=IL90m&B(%7_oDFIZ}2w0s<r5=>?|Pans0^ zkf`9QqqxaNvpKRo4PvI0R%1lo{&XhJjr&ch-a%) z4xTbs8^N~G*<#xW_%F0T5@oLfsd#K|-d=TX1l(jAy*ExW_ z1}bn3je?PPiHUg-b&Ktu4h{|i``m8+6Dk&<D!^HXv$^O4AFAh112lM`41>WOHRCDtM)=Od?D!m8;2? zwdt0ZO>eesFF!neBVT_@P6yB(dm9 z7C;0*WzR#AhJNmg-K&GuL3}~4Uz??4Sm$1^JKNcPTQP(o>(OLEb@bxrl;b@ji zNhx<9O4bx;S+p zeRI-Vl33jD0}GH<3@$mtL#qE=%3}930umz?dPSXx5Ap3pjEbtN>1}7{_UF$YY2w;P zqE{DyL4g`Y=XF$2(2+A(L!Fe?%uAYR%{0B+Ar8MrVB(%_4txZEuRvqu5`c0Vx{L3D zqc}08Ov?X!3yg&oWw2H$sDI5y?dM6F!jz1aD&!1D{}Iu&L) zF4Ke)zJ0Tizz8*_wC%;Mpj5RvB$` z^3En61l+31%62dqAZ_7YEW*AUgHKdLibA=N3vqBj&&UT8j>w8C(6!+vie>I}lZXdHljj&7ZRpj)Ox~vD36Z z`ZGOHH5JU;l>^$Ryc4Sk=Cm<@;p1g&Qfb2*Pi*H!44y_A4I_h#E=6a8xy8ghL1wP& zIaKoswBHr?Or($IENl}xVC!INnvS>8HgDzj_>(}gYpdo+96`zn`iqojk z1W#GzF2F6XgM(c~7%VKo#o@C*ZK@?0g2@A~@KRY@TUbCKF7>4`nEdM2lM2zNx8ClU zX#f5_K1W%lw7B^4JE2RLbZ^ak`}X`=?y*(j8JUQsF!ie)Ew>7a{@_Cp%v&0ad5ap~ z{8%Rw3J4VmdhF(^-5~sm`+APb{GTZMX|vi&HkF;79gL3IN(>4Y-6x^l4K_a)g_U3q zmdElJ3|3N}yvWKQc;$O*YpCU(5xj()hPq6_yWff%E;&W9SHK~(Qy9=mmyDy0PVsz3 zw%#)tL2&HQc=~d;Qz+$+jHk#AgCkY06utm#lGB2t;+Azy0yjB$7 zW!qh#{_s%>t!Z3+&b7Gs_G7JJsFEgi`bpPZ%qehvamWG)?lBDcQKrxC| z@bCxLTh77kaZZlF4uB{%Qa6H-iWJrRX>ea)cQ6!kt~T+ z2uS|9!bOpM$Dl-i$OmS!pgk3cPt`B#9GTOmG0Hn-ic{CJo?cqpzkffNaBJi_dir0R z^!*B5-vdsfJ{zTzuInhAw^lvc|^IJ4N>&*&Zu$;k4{RGtKH9*sR*u~8n9U+#XNr|;RSAe2Z@e6 zpNQ-*G?5NA;1s?dS*G{-pr54CC4YBZ))&E&3%;FUVPUYf;lMi6O-OR%o0}k2=5!RM z{EeeVvS&G1*paJ`Qpz2z`g6-4ktkBkgDfoh*P=MX9 zpT6YUvle-I0r5Cr?2i~9w^aA3t0Jf|8;tclW^FY!H@`ImM+UrGxSu26AM)xIo9^(? z(7W4@K@<&zx@v;r>8|@H&FD+KO+^l5raB z=IST6>>d$NOjuHcu*Y?*)NXn14ZOC;$-Fi&%VWc(q({k5>q-Te)WBF90(n{n#PWVG z?ndy;&V9l|`swWOod3YY(hrBesuLLeP4$1GG@jsRjXivV_g4#}BGUyAwYgvB`bT?S zyt}mUzVd^p$KCaaDyWbVl#RyHb5eL!jR1;U?9p1DxEtjm*jI0U-HBmEAr--ZB=AQ@ zsyJZmsFD(8FyFp#L%H*TE^E79x8kBWeZ;5Pg$Eu`|B3C#1MuGU+esM4j5}OmkWbSEkOLKmYQ@i%jT=Dx5?>rOcnt`;6xhEiW&Ja1Hq;GBly6 zNc8mSNqA=VU>kdSc*A5Jy(17nhvZ)jo-#2hiSNLrDnd#NHy%t6fu)0Kd7r-_+13+q zYqK9~V%vBFK@a@Z;xJ&9LF|(=jnHl<@^i@;Z|SYrhcF5A&K5Wqx66ziJHNtE9D!$w zgM(w?R(yQ?yfGRGAYk@9N}}fWdlCFDID75fYG(h1d(Zk|d{+*n)Jjk2k=v^B{uezR z-q+U$&PsrJ;c4pV&<=g^xfB@xjZJ9kLok>?UO=HpiMoFPc-br1%-)7X&B`9Sd#l)$ z1M8MIZzksDwTm`@MQ-XQacY1?5bQqw6Jh&Y8wp@^pIZsocxrVP!H7guPXyBhO!(bR zpiF#qBgd+_UZtC?wjL1;!JzO)&tR+eQxUi7XQQCVe@BPfJ@+;W97L~XDtWadSBN}J zpH(<11TeRmW`EXGcE3A#%`Z({5mf=(llaTP-mL&#fqjM{9;;4YD;ZV|_}*^R*T5t@ zujOTuNBTGfXk+kk5+~kX2!V0E*`MxhG-suEQ3dT}7Ww-B0EH;*r=Skh?4fN={6fof zvS}9*zc?tz^7iQS?cE`%_ne`>_h9QS$}jZa3Qp?qb@Ro?-|K75d2b|c14m?qu{hYN z$n&(lQw5}HeB3T)Pq=xso~GO-PJbb|$gH>8inyttlEG%nXdzymQ4rXXbT3`Kyi%=H zgB9iAKw2Dy?qf(hz=qr_4@4Cd$kwD9Sl-x#rXAutmCrM=k(;ALA|B3sSi1j-RM<`% z4`uWew0il^VJx?nZHp7-Wp^qQ%krdx?y(?H$V7^!;<)|3>FMdsZ_O#;Y z@h#JHGnxh6uQWM*t-v&ZsNC& zypHT5BWgqfN3}*QWAE3KmqN_>Q|3ZcL68i5xyUm`+zWnYP#$l`6{H1+{0e@VDRTDm z{*2g2WLk)v!MT)Q>GthR9uF!x4RUp;8l}?#)27G+pm-K^s$fbmnx2s z5{Wf&CVqmaiUeq&TXG?ZUXBWwA|tsJK?$1}qk(?XK*AG|z{beF23~HLXDx==Q zNKn;+VfEFk} zOBX$|-ejVtKsH^KKXI|!X}Y9us{X`Ohv$Ikh|9AktoRM`?QgoYUbQ9qp^TqOU>DNCo!XlEkPoDy^+RE`On@#$W@XEN-u{PjGsF( zF^Z3n_UiqWIH7R(Ln`j%Rw;f+xCra|nsVRaJ4iAn^0tRLnCUgpX^YSow)y$7^ZuQIV zOj_hk`$>;Qk!cAkJyyr!^z`%$48#Nl*%6mD;C{H~JGk*s$UQ!U^w}oZ{DE5lh2?$^ z)7A;HwubupLkAB+=NP)q5yB4aFN^k0poj!AIrJFCF_C;1FY4&)Pt7{(oe5_B4!t}z z0|9t*NRMo6ApNetzNVYUYz{|z{b|8jlFGiz>Dh#-WpZ+I(i6RND^Q$v+c<%74IGec zfsKE9;DEtxGej`e{KZHD13~tXBp9Wq(W3>WMfi zPETTw?oBqcfWX4H7)icU7h}@W>a$srrH>1O6fNw$WKz&quytLC3ne)6aB+c4N|459 z0@??2M%bkJSz z1HE6%9?Ops6i)ei_g}db{RH3F$!BlL9BxUTIv8eZW|mawa>k&TN4TiGKL5@b#12(x z<5Uj3kN{&S)2BzUhAvY3J!mQ%(1JtcxwQ9Bj=G`3_hYY2knRmgfJ6gKTy<>ggO7@Q z7UcxPqRSoEl{pR^Is~JcGK(cH3zkt0<^^%((#NT9<#Z6Juxc=JtfpqtY^MHO?kNtI z3R8ssp}*TPdTy898T4mz2+vgb1Vqg^KaaFMKe{EIzmsWu1Q(VWv2$-lJ=-gco zx9{5NK!`ib$jagZ&RM~Lc3e!S13F7(iymuaMCdDu>wIFRtf3(Yy-NWBbse1~&u3*^ zm91hwQe|)o?I}RnA=3nR)P}*cb4Vk<)YNyIZ!miQd=Z*lNqqY$ zDD*uy?pogl<4rgvDoaJTMSpa@-8jI)as&#~rjKML6hmMPYYt4tJoTBpqMZWSYX)}e zb_0%e!}`{(=7fYmE%};}zRDi~tKHV0!Q;v}ndddt)xgOjJEw%TDNV7pp}9x;Yvpy*=QNGTQ1w0ptz8hvN>!N( zE)Oe+vJQ zC4ql`6)eQShMoCGJn`>+HHLpu*#GOohwzGsKNQ9=a3CbIi>OXSV_uW8F;l=6bYEy4 zN;wQ)$`~Lz$CbY2mLdV9@9cM^)|j z$l*e9Ao3}29?;jRsAzXXl@W zl+2rlY9iP##W9XN{@o>DbRx~E;=^j6;9CQg=T;b8O+W(uQlB%f3fGj8iz&h z9FIY23UN$`zq@fb87K$>81>gsnx-etpiy-+D5gV5$#^#j!%7(mVSvYRTaRHN1CnTv zZ48P)`#$UDo8x=fS7GoZFeUjQa3^5jWtgZ|kMEfdKE9nv>g+SWXI@Q4>wPDI~B0 z!t+4vF5fIoHD?MGJYiIiAZhfe;Svp&*nIBh>vg(8eOp!KC&-I5^$*c}{;{sw39MAu z+Q3)A&ImybnK-WZh-BlH4%=JH#(1CjtFc#gTERTBTRZ#4A?us5hVSd@6z(q757h>< zIZ@7v=oj393zk(2g;Y`ohuLkXlGwjf2?937p-M+e_oMJ%ZLjak+n**4T*l%+WRt%V zW}O`td|)TXgtU?tD)J3e=_Z=n*=0bBA!`e~($?HAH=jN!*z{aH`D>XsCa$xy6SU)D z#yEkrmSbSW@~{c_X%fo>4A2-cZ5@7qX93c3W4^fk90jwO*cLQ|!1097os|iuxDt>| zE)6Y`$-?$?cx=wMmp^~%b5`DwPnO2vmgI?)KW(g zr1U8-(u7jagCa@vOUCU1>74XMdY_aQv8#saZ`JolU?{3h1;$wL632~oUZ_LT77!8& zCJw9Ctb;B@vLO&0J3-5xb0Qb0!NEK*8&SUwy{Yt)&4LdX&YsP%>AtA_?zH7ni0ERw z@f)xen+Q4Y`YB!FUR%&;C>SObW$(Ay-odY8$x;1hvt}2yP7Mq=D2Z+SOgMsNrnCN| z(BP4hm-kQEho<(mXXW^A`JZ58@)S*CEP+NP-Hr^dE6O&qRo);U&Owh(-S=Z%?~PB)pgiSQX8y9Wl=-+wBx zt$_j97wEml8;~t|qP_1(fR)tfMfW-HuKnA94=*z^+L&LbP(41p86v_f9Qe`k?p;G4 zz`}7v?1Eo9#u#OEK>%+l62tH6TlMOfGxt>-LlE|ww6;BKuo zQRPeljivqEP|gz&2nwp{(WNzTJTAJ4jWNG*L;Q{`7|*_3R(qHU)j|SMM@&=p>8Gcz zJO^MEbl|%GZYpLb`(EG@m^l$o0>IYt+APW!PaO~zjoAJKJnS|%gW#7iuy`|Q@RY*pjPiH(nE zxOhj#Oo{`H0zw&{U0YfG3aOW!oV&H!3`+6UCI#FjaImn3(VAo<9A4s1#QbB6gpQrW ztK^$SD|KoIfO}u%yZ&mwnTZJyvh4{0mggWJS@R%i*OFAAg~_K-?~E%npkKd`h*b#F z)98VZhck=+L2bWEe=isysLfHo*iGBss{hd46{QOV8{YJ?1aQ(Dti!7}QqTSG7qp5r~`V zzP88|`S#m1g1G(qh$L43A#NpEz-^Sr5ZuP*_cv~PN&wszElP6kAJB$;4HThoD(8hM zlH)sD2%dS`M`Q??So-GjOs<{`ZEzGp+$cvA>!L*NHQig(Q&Vf{?oREFhFJ^dA~h|o z#2Ow9#(86*E$1`H@$dfnUM**hbB$>>4iJrh7Xen=vU5k*xn9BW@rHUyigl7#$6x-Nc#9o+yW5M)s*G8oq92zKubn-*C!02yu zE?uXKqH6dIw836;Co7O&cI!D|u(AlG|K5pz;U@oHM9XNKceR6kZ6Q1d1>CcS-cus18p6X4{dteb%#h=6O7$ zThub8d(HQca1IAz7F@uZ0b(lT46rJ^*1Gx%8ef1; zz;`VxeQ(wOO!RRcr)uA!;2UEw8X~k;aTj9p^74v{Jpf(}gY>Adpa4cCE_2}fK^sDl zbe#Tq+O@jBv{W42AiNDuf7fYQ1tnNaObkRW;51+&tsz?*V`MUIneyRzYi!SB&NGAoNu`~26RBu6#?U{oix?tu^s!j!XW zE@0aMXKuW^4LF4Lq1+}IRB(;uoHql1{qJ3EH=8AzPw}By3KQK)e`Q(51eN_G4j~+Y zK`@1YvUs-*GawOzj9-qMyhmE>Z~C+p&WUzI{0 zXo_*&wM|@S4&o&?BzU#sVGMK_7+cUiW#0>xm6vx�(EdHT?ujj~|TFj~Z=!GbOmx zs|~qsKKm18`b7-(>rs^{b!UNcm`})@=2WLP~w~Twx>$&QebC z@64835wigcj)yTD@>ww1*rrK*RJ}d;y6$9xx1np(4bV|$XRC?EJr4=F=^T?_Aq&Ql`M<-7LK*)Z zPP92-lWup9LL5zobZekhA>-}pTgQcc9s3?AY*8gR*vjQ70ZzjEzT2!F5TbSKzad0+ zBn|dx+Ur#({euuC|IT`AI>>s$^K3r>N-rTH&F%%03&cCIc!#(}d~yt{Z6?C@3O`9Vet8zJpSC#sGFYRj^^mMwcXtS0>Q(i@4d3rHy}nrXLWspMu=%}r&eoKW zs`yUX#t@@1hC)@VEzD*(+ z0Dq`(*r*QYG>-hum)K^XYq*cCzpH#st@S&&{OSszVjDQSTJiqtfI&N}>ce9W=m-}# zI=`rM6L$<%(M?lg>wV_4!V#XPj5xkSZdVm|=ki7>8o`1#mhV9Awp(n>Q>g$&ig_6BtfZMKAMFi5WMIDWeT98AfzDGKe_O z%=DGR0IYpIoUt;iLZCc?y!YvM=dMN&N|E;t+J0}>)#+TPJ9hvb)(RhC#&A*bvaO!& zgX?V$JnqoKNkP2cu{#)iMns0F!NwOjF^qFW;bC;ow3z$q`TyZ68ZOWKg=Zv0x+H%t=Ui$gOgrQ6Aq&u*Zs?9x2*giwjFj=`2sc6LCX9;2MnmTG9Q zz^cS~YU;G+;tkKO>8+42Qa&2>!Nv3W`)LmyVPpO%SmJW`JFn+TxQ>Wj@8NU#BavP2 zzJFZMSW3-xku34x#j%@=CxND`ghe`CJ1K9{(STPI#%>$nw63=D_CT01_WylLGAbIw zR6mfASK65qrM3CZ@RhJGTam^!ya1M&V<6Q0O7uCyE6q_JVnRy| z>LoHt)%rQG5hpqrs|jx3Tdk`aO1@^355b|XDq89bwnq=V7^yGfNr;JN3dZc~%UMZx zi~D`4$3ofPDDB=o#$ctK;n~XE8o|S1`$7xG@mtxCJ$;TndfUdcK(@J<>rU{d*gvgR zve~SeUhGTQJ4^kM#oe1%@;n#5rp9PrU@iT{6h8ChSRv@D@p|0uEneG{`xOzj^-;Bl zm;8{3ijGT^hq1QXVu$TJk9(srTOV%^^c=P7(6WDraC^h|!_^EvdRZ}}M`aC+MgDQuHq8b&F^Onz8 zS6H>?+$>!0B|oj=Z8f|Qztt>et;)W-F>M>Hb?R5#b3DG}dq95mnLs{Ev0pYFzoX`T zH%8w+9UJp}^W3hRpr`(<|GRFH$_)*TFRDuJU;8xgZ_?n} z^UrT;yCE73_6FlQV*+M&^^%~R8}${*C6}6sd%+9pp$~aUZC>c+N30* zDc5Eruv0$k+=lyuL)X>4-yLlkR2^xbx3uI6U-OYd zz38xX)#k+2vuE&dIXqLvG8eITlsKjaiU)asOC0foVW@A$mH`~T{idOUV zwZrF9w%%#kHU<6+)iE~gJIPzJE~*-I*?pBP$9ZA=mSx>F$Ht!XyoWhNH%$exK*nMy9aElyt$;(7N-O^UUp&9krD zX+!Y~hKt)j)g!Pmy(4Q#%p%~n|AE%XAtpKRqOW4BH{UwS7ihg*RCwiPxs>8La_L~t z#*}8x*Fh2N;j78loM=h+L}@LqZ}ljf7FjJ_4iL%Iv!THA^> zk{QnCxbO#>aQI*AX8ks5+jD36Y-mxkFBZ4E?d&=?51vKttNo|KNP59`Gk$K;51pUK z2{#z#eFQaBV|;wyAYHUhRXKgSrj4ROB$Px)Ze;jo@{-tlY41n-;TLf_Ac zA3uffkJ?&k1s~T9d12Pxn&XpWM>jsn)BZB3k1G!J@SOQnB7o+;_2x~Iq#Bwq-jc4z z_tthW-E)%v^x|YL<+;H9idnf3FchOat`zEC4?F+fpy;A}YU38)EChCgC}%pGxNkNZ zw<8Ox&#aOwL`a2`T^G;;lY|e3%bIyse8;~w4Od$&*dKay@Izf$r5M8|CglxQ!&yZ;yT?1#^fyC&eR=V0$pX|jvx6mM0uzMt zk*9NdtEh5*topsVXKdtw_xTSQFRyF4tVS_hnDH~qy&CX=cX{rMcN#cL=ubYHM6yZ$ zyS{3xi<^?UuC3{@!1ArM)C% zEZs^Dx;c9BZg0=lrsLr_9fN9f4GmwH88mj+ZLnhzy7c({h1vbrZlzp>AF zm_7_IQQ;+}UoNNpwn)FtutbK?)2_vKPVr#`=q=t9Xr&O&uAipOU2 z%Be6tIQ{pWtt&*fjV_Bp682xB47qylsSJ#)a@6kgL9A>>5}oadg0c#-ip8>9H(u+# zXRGnEsK#y-_G~2a<(o%syw)n*6o^pK(f?VQb9y0tEPM8NwHY3NIj+0uH2-+847aED zH+41=xoLR&wmyo%-_59uZ1Wm z%{8exzS2?>$%x%G`xavERoD&+$QEb&vN<5O*+xbx@6115Wus6}sjO8W(h&tHK@hy;7Zo>Y8Y z3TId5HF_i_sCECoi4vsbWJi+~AE;15lbL&C_O%mh))>qTRKU~@%4jKBrqy03ACKTk z3}A*d8gs!=eEw6(46hZnF&La6Rhu5G*J$}eD4vh;BY)J-f%oR7!v&5?F8U%B^>!6_ z;UxY)-W}#OWpE!CCa=bJ&k9#(9{HbYYXzxlXaCCz%d~>k+5KPW?XaBm#79wuFcro&c=3sjqc`T!1)a9qaU@nEMfy7_+{qP+_4K(5=E2(1hfO8v*O`IbRaQ!%pp6Rh$1pakWV4F#-_4(!KL*rOFq+icHKpM)zIFay5R|V z|BvBWSap`lkheqIw;`Ia|2^Tk27dmgTe~ HDWM4f%}2|3 diff --git a/doc/source/images/sequence_overview_watcher_usage.png b/doc/source/images/sequence_overview_watcher_usage.png deleted file mode 100644 index cc88a789cc5255b081f7d39fe32580414a804aef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46482 zcmbTebzGF)`!$L!BBG!mA&8W;#DKJdbV=9H&CuPLAl)D>(lzAJrG#|Dz);c+(lXT9 z1JCpQ#e3dA&f()napsw8$*u=)()R@KE*pb&%g%k_xs;IlNy5qmsU}3o?7AR}mpkCegtZ4Qb6%YGx!%CQ* zLFM^t&Ig~eUmYvvX!Zp14A+-#HqoE%*KuS%9->z6Ah4b2>RAu+D!^qh984!1x@siG z@c0cq5tQ`vW~|WWp?>yl-1jl`KN`g*-q9LXeu3;ApY=Sr9ZxYsN>WaFLqLx&|JyJ8 z>TjIVCkjb^50iKHpT>NJzB6G@!@1p5q=0i^Dc(%tz?(6D^%-ZSux!?MLC%Cnf3U5q zYU}9hQ#=r&$iaR`D^MZgdSXpG&h+<_o+}7`$}ICyIO)v z7aQDlSN_O`smCj~kkP81?4Ed9Lcd_8o#bz4SY*S4#JF!Mf_k3^{I0j`Mn2-1qxWy~ zbHut5a9e_JP}H+b$HCX~IJD~5ErB-4?fr6PpWm$?#+}li_OKP#=beq{O9jDqt8NAv znnHIIKOK+OO#iZ<*cjwd!CP{!8{MeF$-6iD&G%9GbN}b=-?hZHUph}at1n3?lhesE zENyP{6e>iooGTq6vjlV35nA5ci@5v~Oj!ie*EWu-3o5o&9T@cj?r1(S)7?i+PAzym z4k7%LIlRgF0sUA1YEA6zH{N8rk0iN2@O2f}f928>{J^SO`Tc&-o1W;0EH+9fcqtii zrUMTTd1JffH;LnFl6a$avtIM9`xa2BZ7k+aV$(uLb$zhjoZYje7xW~7unwYR@%a}>a22C&-^Z%Z_$vBRG&e)Y{J{0_`=J*6fgpKr}J8M zevNDn^ENFUM&Lj%jlEDp+xl1pSC0u>IVbgbzY*#&fv~n=3kmaw*ZPR4Y6I;3!*CmS zdv=GOxTf$W40^caydb6Ppfek9v4G^kc7JVcq}vbM!jcC(RJ>*L@`T>kEv) z=G#_-9QWUlhbrXm^O8rC^>vbqMdRQ-JFA_2mz3Bq2S1(YRrLMQ6mh!;6;cZt&g58F zPvNrh93Ip8fl^9GmPlEj{Y4?q<36gjsk0Fgq0_+p)w~EJlZ`~yBQ?M!*mZ1&maF00rSjrn?k54z_!#XYn! zWJ5eT+SnQDBfd0KJ~cIUcIJIRb8+z}UHFj;1kUwG_f}U)JlgH3bgX2)^H6ALWMpJ) zOw#NccvgfRci*8dE41Q`u=;P0Z|^T|yssiTyxG#t=Ytv;u%B)A-`upu2La$IJUFcf zeHt-LV%>f>et7SiCF?`}k)fe*UNM{y5fz?qWOB z?{gckHyo16O2^d2*BKynjZ}1#PJUFP-l`ISC!JFTlk*CiV3rk1ZgVp3ljF>)n!3@H z0wdI}(y>|=cnc|`eI1Oq18&}o@E;i;9|v=KGT8bA?~zr_LOmnJs9mSmxaz4i7=W^G zv=3yBA%e6-mQ+0~@bdV&Z<|K>Vsx-JC&HC9V7j3=fsM(qP zhVVJHk~Hb%-|fnzWZzyd?cGEn)}qyP6f|TBP0VmFKEhkPe{g`V*I=@f6;9|~@|gKd zlLL5N8G*#{epKoMsA!CjqJ;%){AL99!1%bw!TOj|mfRB|?+Mn(4eg`sX2=oqpwLmai;M)NV${R7 z;Ms7LQPL;ih)@aqf3GNQlHO1*C}V` zjW~PG25yWeSS)#JWFDSu@XnIFdwbxw+rU?P*lm8a>o~m!nC4N$E;5T2ofn?e| zI-+#@I|Z|`Nr`^upu_Tf2iI*bm>Ye3JQj#09MDX%A@JV(z@hv|_JH*j^ti8m$yr6$ zDPiwkgWH+0+wH#OXIZ8(!ez>B z2|>x4aT3h5J10Nwu-?bVP|VhN6GWgXy8;s{J8f-kC8fdl*KSc4JGr=&*1RVsLQIUUeoF0PO98#G20f_WOKelS8>X!q3BKFv7}alW#fMpgijZ!$7W5a z^>}$fdATaS7TALzWYv&fgLq@|OSlVHkLvp>PQwVp`}!i9X?eu4Z$o^@pM=8m`x zYo96PQRk47xHA{Bl2H5TMY&BrGWl8Y70?|eXrA}{F+MWc>tHNCJ|0B9@^UU`?iU|t z;}43Tu+PK}5B0?h=I!Gt;EM;BNDRLXU0x2N ztYzw?qCicsp+9^`vn~+RQ$j*QuvNam40EGwAjZO%l(PxBXJM-R>?(To*LpQq0# za4}7$5+e-^)27{)exaI(bpF=Q=Te+ksweE`=??cKEIskM2X+_O$$MZadmNSGWruFK zp3i1wdXdW4dDCg2%fyZ^z?YB)EM;^f;akFO~WoPuX! zM+Mz(t2utg#9&+W1;B3Y)YJK#3u&EHFytM1HOG@^;_xbG;*ZA-Zf$7paZ^k@M`vB( zxjBmUNe()WSo2$1p+_}+`jkMO(r#nbGmzoZ!Om{CIwW6OQorn6lh~7|s6>>+Gng*? zHCc0^g-AO6x<=PpuqS#2p$R?i4<*;Gw5{)o6)q`lNSEqU4gb1quf4Gi)m8t?jSf3( zCdi|r5?ZnK463|&A1Qe8qyjFJV8yeLg1$*4&_lH0k(5HL@NW7HnyB4~v!w-Y)O%9&nu&(j1+1N)s3M zG=oLg<9Ng3bTr|BDpmGbl0H;g@~EqmRqb&-vHNVZ+=jl-`5Fsy7{!2ITcS=B74ufq zT0=I#m%87A#u?%CRF!8kK9dQ;&};V$cfmH#(Lsdfex2>Gq7 z@Y7W_gqxu1iuJB#wOljSJ+nn6>p5OWP{?g4V1j&|{!$Ic+D+E(`TlV2~o_uMih zICx}nnf1VDE|_AaDP}q$b+tg{*iKf}?O@D@2D7lke?jUVm@-9=wx&PFdc0eCuwOWx z5O);2`V+Dr8NJ`~;lnLe_(?98>Dh_U`Js<)+SDV=Vj@gm>^U9k^{F5Jqxw-P%e&e@ z)VZ&6WT=ddeJ7H&_Rs0zw9s>*<3pR6j*HH|Fo>CYOehKllh*g-k#67O)A&Y zEurUM1&*tUbeZ}WC1?W!Ykti9{hD5k8C%V;T(~zBKjG{ne|p*9zof$vXuO@G$2^f= z4nDJ<$U7ay%b3kMU`F>Q2n_Wc4;#~4Ax9OKU9l9_ahGY4gAE=X9WDCiP1_cT>QtQ$ z>8~zg$cutmO#=f16_w$ipK(N9Tm$L!KJp8!Q6?1TheuI=dN8hH#^<;kc9zqfz%4E= zUIT@@?JfcwqR{z@T|q}@bKgBlZt24f938Ly;f@X|4KZ?BYHBh}?&fH3F1H$|zSnWp zx`L07?|6Nz)Vq^Rz@3wsSvFbavjlf)^01OaM@PpqDyk><{P@?Gm)klz6k!>enYjkn zkoXB)RssJ0;VttOWug@8dpE9L9x>C4Q#5SW z?#lT~ef;=wZKPn=l^;L4%uo04Pok@H@kWSix9+t$>grA-8_t~C)xA5rQiOEzhZ`H; z$$;E6KR^FCBofzaMmzIB^fo@BmtaaQJipR?S?lmFGB-DO0QKy%v=+543E}OFQM<2c zC;LIp)3f$O^)&+ngOqGwdV2ceqKQ?z*YUy1oSKJ6jp_$}elK*{LEnb+tyV-NoxFvp zcC};sg8dOQm4myx`&|o)`^d8Xn#S@^etx9z1!RnX#{o4h?M>$U0Pi+K^mgVnMMRpK z@PzUOy1Kh3s~oc8oq3Zq9S05KG(NF=nw8hiD1H9?IXpaE_Z*GP%gyEG<^4bim$)M% zFCY8vZx-vM!($f0(U?XV?8`m}*fI0*jc-X6c6D`0`oq2hpkG$TNk{i@cG(N5R)oV+&u2eBn1=Y>yLa2j;#da*iCD{BIUMH;a>{!RgRL#3+Oalh;kNR_GqSRzpOKN3IIWG`z#+^&0(U=cVfEHO(A-_> zRjY7I=6AE7`S}@D>J0-3Sa!7#LBq?dxd`@F5IbD9lLFjA(~g;qO#we*S|%>d3oGHI zMXJN@CD~5R_wNSKIn?H4jZMR;vm0~8cv5t9hV~ij>1QL>(_3!Ko#8TsL_~Bjx4NmF z9s3Q91_aCb%18eB)RP$V(vEBIp7{K=njTx^9`2Pt-^htawDfu%_7_YaB^xEtpYL|5 zf?Ep%T=~LXFr^R<2N=UjEAxSQ2&YuM^K3J=GutcZwRbrM85tR%vH|GhSx@QU2hKO~ zRKXFM02!48UYRAqlCc6mx9*XJ*(#~2sc|q%h@|rjJU7>LapA6c{1ooB3@~;{1(pm? zQc@B=OjA=66kUUrZi`#>FO29@3ZCUD zTz??jgYsIINUC7uBjje)QZlnvxr9MpUf%AJC!Jj4ZyQ1|R?P^!y2B|CJb}GF=86gX zZV2*aQjfgnEWygk${_vypw#c*pA8enfh5C4u0>xY!{-h;h8PA)EdWY>i2TyO{__q6BkSDWDW02rnH#>D_! zX@<;ImfJ5##hs5DhH50P2L=T2lZP5TgzS1uA5UcootnpXJ|ZTjZ&{7?IobYY?543Z zM{)jlXQ7h@$KtbD*=OdC{_!dYQv-up2og#qotd6qyEh<>QaV5OIp508ex5bRgKG>K>k5mZm=g7A=X<1hpA8t-I75L`*KlIoF1L896#jc>M zYZLSSsush3RBYvpFDW%4X-IaQ6-`3I&!R`~jY;aIemlCkxj8zPp&W5>aZ!0~b_OH& z1FIh(-_esH|30+Qo4wHrY1d~z=WaL(7KL_pcJg@~bE=np|3TYA&>Mq*xBW9!~GvL5L=YfGikC-aElC=y|Efo1>qBIl`(a@ z{&=${_Z0gU@YcwBrk#~X^YwUtMgWvo6bke_o z=j?vp*D?oN7)4fyWON82<9Rk{FNRF%xrVIUtX}JS_TkhXUU#6GGtXF9`%L4Fkk95s zRY?pSlkTnJV{j8cbg3bWcfMfaY}r9KQMc$H$ZIt8+`U zErB5SFtkAU(qQi$_R0p2dXo4eBO?o|7D?g^x1F{6>`$Viqb2bbaX_-N*ebbk&rA|p zv^!#?*RrlWH$Htf2Z3|dMMYS{`|lhq#$2ya0=;B3yfZiYO)-k$Op z@G(j&-EaAj@EiB$AGh*FpHP>zJIc$4?#E4iPkg0_3DmrWHkh?*0Zn+JO_UZ!@tm9#`kSvktPa!qR*9V6qj zn=pO1*7>57!NH{59R|`dO#Qtwl9CE)3or#x+Ns`g`~Cjb9&0MgQ8fq<02NJpYzYLy zkXI`rx@GnI0~*<{EsX}jG)2-My(gN98}2?^Mu8nx?0t$BCwF_}-PzwZ&Gf>X+6DKx zr>`#vCUkNK&+`QACKZwBqlPMD|$!)Hq?-#mSbp9gl||v zMkY!n*=>>dv@0vcThqk*pfn|V#n<=duyeZ={_hhV;n}DmrQ|v=9*hhOU9;gcswGwQ zU78QnO@5Ol*vUfPa3Sg`QD47{P6A}<_yq)EidM)|P5`1YnW%DNVxsmyp4|2K6&^kl6O$?MC}WMcCtRjSW9ZK1 zF3N!-ySyuzY$?4U*;fEO!`%4A@^3VGsGdtq31%<0>& z$+2GHVPB^AO296NnO?`d1+p<1A`D#$mB6{U_7Wigm~bEYC)|sSkSmR$9IWE?bxY?f zx2%@KuI==TQy5nOPKNSt9P5zW`#FUa=~$NTIF6r7jC#wb1t4q9r1g;itrZ(g`)j!5L>raset}j63qaYcn;#Tn_^lLs z_DDm!Iy!Rm^AouQsIg|_i?Qov1%`SEi;Ig%xvdkqZKj%yKKq-Wxhjj4phBMTW&U6R z%0Vd{(9b4`XW?I?FbKwqI;nqkIM1{<`3X6>j;d;=(;@p4V&WCsmqUp^fByXP<#udm zbY$cp5}9S z>5NJ|#Ivs~mjOcQaCfi~Tptk`Y5a_yKBu^tZ8--$kRe@FRkdSJ9-5V%-L{qVl8?&t zy*x3OE+Hc;tD3A%k=~EI!uo1Zk$wI87;)nsg|Qlj_50@Ek^gFJg;6#C zE94DrnPHr%MO5UPSg|MqgY_E!NTp&^K|UNOJGL`YX#G?(Xx-lu3vY zqG_M=Q%$uR=M7X)&|M7^J-w%_tgJ#py4|s1GrLDOZ{0Gk&u6SFpBejU>C{jf>$q7G z6h)i$zPb5Lpp#mGW=0QN1oJ#JgT*48AXmQ~z$L8;>u7&aBLe>Lo5pLBh`6{hzBjaY z<)@lR#$UkdnaUkk6nXF@BLR`O(3Rzry%Ve{qf2orNU~e^)b#XQ_)j^W$V!~WuDCS( zR3oASYr>kw9=(LIA33Y^KD3JR{<5eYU#B7hWY-<0=g-5?Fw!^V_h88J2`OLg_hZrt zPhzNE@>@onh(oWvaF|0)R5>7|JNK9HS8Ne}3JMAv8ygmAP&n_LyXf)Q_k{U$;Pw(a ztH!F7`SfKxXXbtk*V5Hx&61CMPEYUe@2}7=TN2WB$f!CJji3HX#nQ+qeavYOB{KFD z#n|yPEfi+b9otg&ipgo$BirgqTw!4-=a26sX%jfkv++sxw0139)(C81{OO!batMq?AJ?OD-1yoC88G9RFEz-s1 zs4ax|_V=PhCa^x4D&fluT#CS2zwFE47w7AykTwHE+?M?@NBBG;H%S&<_V^Yivqd!HY}tsN4Z zY$u!i@c5E?Ky}_;b`oQX(4vR5x3_n9cYAXz?8QVyIl3*Me*5(4x{4dNCwd%zcV{Qv zmM8eeRE*hZ^*2+4CfdbOEmEwN6#I;FoV{(ZqI^Z1>tUCb31;R(hSPM_;zt=X{p9HA6liz zcN07@m)-Bi_fb&~k3oUO_`<`yv}wRnDPg!-4mxLOf8{>%{?4tjW^dn^M3@`W{5rLw zgcvsB>Q?34y_8;}JAMmh7SC>_ziE1q?>aLSHz!}`x?M*eA`eCjVoS+vhdY=0?c2A) z!YvRG(A;-KavO=g)swCNsC=fecJVpun&MsowXkOP9M^sh3@X%sm8J}+vTfH1>xyad z7GJsdWHA`+W(8{@%ef~OufQv};g}y&fdd5b%525L)9V;PKTL59Ma-33ELnLpR1Vk_ z1!L-P04<(!;wMl^2A5g6FpuDs_lsO@Mu6J!=Gif$xA$F!PDA&AI`Lf$c~PrlJ*6(u1S+^q1DCN6`Xl>N=(Vjjp^qB$Z)=7q!2p#2l9s);GWF zUto@6mv{ifD7Z@issL;w5QAhn|78q-Tz5$&0OatF-kIwTPMQIMMoQS^?~8i~z-5uH zoT|q7fQzKWPzO(&J=$hx4JyW&8-7s>d|mK=K^6HJN&E6JNW}}3^$5EkR;yU|X9bKb zE!SIvNtXay3AeNQaP5{IE=DTB3bMf)>+8EuM6_gI1)>(n{P~zc$-rc2P|sXs=n*AMhCXz!8yE#FRxNrpujC1!S?BeIg@1yI4bc)17?{Cc zQmwoW3NG(L{L22WRboen71A1j5mdNyWMDAZ@1G{59VT9PiAv21$98;gMRXfp0|WU^ z1&^Lm<})477Fh9laS?z4n*RO!cVlBC04WQW%p4$((wM)}Qplw6{PQ|a9SHPdVq&1K zVoHpS&3ZITCm`Sj_)&HR4UM>%7(z~X`x?Ne2lvAL8(~Oaz91u2c@0bOR6uSUm>L*R z#D0$UV=%~V+1n3aJ*o?s5i8ZjEv=-b7blD{py8*XC0Q3Q#WgM!Bn%`jM_RfyU29(~ z#xPlpxqf@@bs*wp$)|8>y^`yinVDg?TOVc9P*0#BAQ2vyJQ1F>@vpRVYTsfBYJ6426nMN@8B>9~~V9l?!BWX*c5{|Lw!$nr7ba z4h{@h09{4cn^m_S%2&8%jhCsJY8+u`nt;aoWEWfpg z>qF9=@%+&4ma_kT!)sVUt@U2VWe=jVv$N4?^!xW$;$o&zaau7NwXVf7Z8jULDxuFWxS#+Ao5TWM0Bx{my1 zdHHD0Pj6T;VfM{adk2T%;|Tkh;4fd^au^HO+%pH62(4uA>3NbVn}DIit$KWMNe2c8 zhe^|b0b`KGWI$O}7386#ZW&qG$r9s08763DQCXQXenPFPoLtB>0_G*c&dz>yur*(! zw7R-_{THOv9Tf`G^txM!>5ljQ-Rde|Qc!M3b)&yBn~dm`F5u!(^9A1kRNwtKva;PL zN4rUKrXXS;kjKaXPWdI-y+Wjxjt*QNJ7O5h5}O!t8u1noJWQKGGaYVzbNrAlqe=Zf{=x=71-f6I)=b7#nQhQ zPF}$ZD)(F867<=r(%wlpBfbpaW$ta8r#48@NoJfx>p%TMU|WL?^GOPCY})J0tbMm; z(MQE=t+6mAKf~uHhP1@G?N46faXwzS1oxP!9$!o2uz>3*Z%FVW3|UV(Kn%W(xhtVS zI#;49-%=;**_gJ)+X7S^|C9#cNHJaf0h~_TkG3#;?EL|BP4D?mY5;1j0kom`Aa`+f zexo$Tab(MM4ilpdWTA7f#_EG@5b6M z<+czy%CI|~fB*vro-I8iR!}A8nWt4%9cG4z%OCwqFk=G#1qIwsdezt7E>Po~EV;drot_jsK0m97WU({T<)QEg6FYewg z^*?e2Cy#l23k1UU@}=wFKQj>6h%0~c!GZJH6sFstbi~Lrbab6Src^XEOw-`(C)cTV zr03>FF4#-9J!fFpk*0JO>$#f3z{C{vDeGLnH z`dVBozp_%DfhteB1t9L_Lxv|$o@i)jSXXGNg6#?Jn6Dv#o5$vDCtIrDbTfy?$L60! zmR1$U^}pkIfleJS2S=&P7nxCis*zOF4HSZym{>qS;M%ooU>hAB9ia>WEZ$gMB_ShY zVrGthmFbiHEE7{NZ7T1GeCO6^*NGsJrF4N})b|DS{IPTrMZ^Xn_S?y!ZLAt&Rn|T`{0OY*gwVuOh8?q z?7Cn51JS#NhK2yO-_{JOJ5XEF6ia>q_;$O-xxhvOq*J{+aS&L{KAqCvg=kz-uJo)d z7f|`{6&wEQf-{$Xxr~;9ASze9l@I|~-$Yfu)Z(uSgr0r{Fs-Zlph)eNh(%FG^MG^x*o09AuL%@do_|;tVa$sQK!gD${KHjMdavR|4L~ger*4NiTjt5i~kjEtq);2d63ons6<)>Lq z6O%pj%R^+v3XXS+g!2d$vyN@(+ht>dU`H6^i{m~-Z^gWP_E{r7cH_3fY?A)Xj+kOe1 zZ7+1jHUK5YH}Cqr8_}_`msSNJO68=czSdG)oOWL;kjruaG78w>&Z@;4G)-c0(MNS# zUR#4HXJuWXK|xX7-%X}$n{Z1y&Q6bw(JNUgF)P4|s+y_mw(uw}W}!Q`%wb7RA)=<}-|8k3+JLycDk3;l4A6&ecHQnH#FxVZ5&wI+TR1 zLdu*m2V63Y*WgX7v|@8@5!C&)S;2L!}r`Ge$ zXUpP35pk?_dl4tzKz+SX8c&jw$(xAK2Ney>fdbpj1grWW6!J&KHBG)h}n#eBW&$T8-K*U%mL*3k%@)?oh(x|MRZ9G7~;h4c6(j*Wo0nx7M2>fed4zuuy@6 zuhq^4z)sQOwvrKB#+hl6t^Fg@x8n@}HZysaz;BbRU1H?#ota)&lv+cU&mDzc4+Sfx z0=RU`eOo|y(D5jcp%8F)ybhE-{_SS;{u+hb{DUKHRn=DAE5HS?Npg^s3Ia-XD@2IB zp}zhsGr`*aB^k)*tM(quuQ)k5v$Lo}Bv^`xkzhT4uCDH!+x|OAY(WRtP&QYma^4vL zQg&d}oP8B_o}_`&?zHP1u1432V{UN;aO>tgpf9a>KtMpSFlL(UCMKjbse&Hs^&W8- z$@%>lD9yc7K&vh>?&3;Tr+3^3NZCW9rluE9_ zSro<^P3xs0Dk=&#x}hPJ&yB_?g1vF?qpwgsF;5wmOgjt#^qzQ+t?I%sg#mXLqWSyE zn|krB2$4|!_Nl9utq9RT!h+Vza4PS4{NO?ZdJ#QKVsh^h#;&Px`2vKKo4fEd@)o)4 ztDc3pxHw>-m*>i)$6mf@TrYR$$P*Y1fE8r?!;0KN$vSLo?zPZ8Os(+hwmJB*u*fYa zh;}$C{aSBy-k}rY#q3cFBpoV>iwFt9^35wK*l2uz&9h)a<}W(l^lY=Hq|i9j#l^*8 z{#r9avI&$iO~Yej^h;ZPCz?D#8%X3sZHEO9U8B&$g9CeT4JKt6x5WXgHXmn$>OI5b zeaQJ)ac^6FjKI|NbZ;pX zCfkme>jR6P!#K!n7)LkG%0opcbltgT;qa@n_w{a2oH^g2Sn%a_Tz;#m8SfzhhKv7h zcpC)%%Xh3isW~kwRS+|Qs6V~4i$nuGVU>iWq&0SPtw0fxRfTeB8Q+~l0o9g@ii+C` z#reVrP_pEqOQXexgM#qj;eJtQLNO>oKnnFFJ#*Aqxp1J_*G$S~aX=vZz_2#rCQM6B z>{_RzAxKG?ia@Anne49)0}sYMJUmuLMx5%RdZSX~J2*I0&z?b9)a2ykbahh#bj3r+ zj(!WxUFtg5#x8XoR=hgc$mr-}P6i-RfxN zoj!x;#-k?GZSV9RKoA3@`qspPhKPtrIxziuTn>YYLWzbf2?@#U?5xqu^0JxaC*Oh> zd@)@ixRCyV0d|1MC5KJ?vh|}+bhN#t+wrx4y-rkiz3gY<^FX}~?}_Gx>JK1}uP|rKQd;kj!!%8dnCh7L1q*X$I|C z)Oyj9^77NwPI)qbgviXuxDgCf+yHL0DN6PSt^Z0w)|^p`8tXUc&;BMJKP^2v*2$&DX8EoA<6yqKFY^=C8=#8kLRDtc zWpN+f;x$u)pSrMg@+HYZnCT_~33anRVCJWDjDJ9Yg^i8cAQ21n?NM5Ny-v>ryMm%3 zJ^?}6_i-T883U=L1RnMDMy>%^Y>3r3h;00~y6;T<`IVWhy0n1T1_aV+u%4-!1x;Hi zT1--)K$3U6cnuI&8!$@hv&a5JGN1>r4n@B^2A~R;YrAA+#e`-lz=e2x&%~}mAP1r zmb;**lA!hgriq87q@;PczDLZKyXpfbQVM`b#Hp6tSZ7Uu6c64xt})@P8Mg86tFv@? z#D`f}AFcm9P{Wk=s_PRLPc^c@z_mO97Vy|JHC)-8L7zUj=k<@cBb^d0dqhI!fUp8? z6PMh;f%%F={5=~-hJmoQHi?Y}AIRw9wySbrS3FloKjgHU>ajNIh*%$v`$jy_Z1me- zyNqEdH{p|H$i+6ud6zhJ${<8**L^YnhkdhV*zS$vCsZY+v3@lH_3uq?Wp%H{1;fV* z8wa3xVp~k)O}h5T{rNsz`NCS?!t;ZfdYH zirOz_BKC=7l#`9cB=AQcJ%UUGe{CbREx9K9uE*s+*VRj2av3cG*DY{6v~t1YfULoI62Q)>HDa;6maB@n_hM$+#>DairRh@Mg0+j}$s~DcdEj7>iG>fsR=3MMPa)_Q1 z*eX}=Q&8wCDW#c^3wn-%R3EsuP{GgWaMB?oCwK1zONGaWj)q2qQ?^G8!_!qJnq1ww zTgUJCBe8RVy@rERNN5^jw9pZ~N`5=Aa#3x_7ObtYtM``Qa8k?{jmP8k*YfAT1?s{a{V1XVd}O9LG%z@LveIs@)@`>H0+;EhlGTHMFcdD z&TM&MZvIfV1leir#wF!vKGahMoZhW4VDsX;FfQ zT|6&6s@$L@KtsexShsSfsx~#1l7&T1=NXt=Fa6qs^^J;Y_<7?1@CWoJ@u}(b7xLt( z6@qkCK7FfEle^hKi};=_G2DA)fM(2{U8(%tnTXdZNQ>fL`f7@b5$JC|Yq$og`C|(D zbacccm#zfA!Am!|%Genj8{6vgfFOEw){Ho^`%7F9I4%eDoJryWO@x#vf&WwcWTg~) z76m3Dq4Sc3toBYfcvhCFt?k>MZ~p!_KX0ip&xbjps{y!% z5WCAL1Pqw-<`>elYis#|W>R&A6KI6~af@O|4e{;UtC4{_+{f(CH&W+?HpT;nq|JIa z$1CI`63aU}q8YC>FiLs}g(?qhtwn#}0?&D*GHmIzk?>hrx_d{--5otrpv8*k#TI+k zsnX5$MWgtD)u2?v=U-u@jbt^I0AHJZfcs9^e~38@k1w1ZGSy3;`Odvx`Yz)%QFV4ATE7btWv z6M-TI{8`(WG%etYKwt;OSbmQw`*}(5mPOf1UGzWfTfvmyz!Ywox`L0vftHLsBLg!t z=I|8l{2`EF3z|NO&R761IRq%ra#O%qgHj{2=-r&E^8msv7>G*Eb1VN5(jO)wUFFf; z!yD@%fLi@qprfvCR_ES#)D#S(%4;}%!rRe(`vS>PmsW-y^zdPn)hzc;NpESz{waD= zS!iUJnyzl4LCOi2G>3Dmbo%oq>FI-w@vNPUlM&sSblP-IuNl;Gd}8wO)4_#Q#;L1} zv@Ikf)RdI^p#JXf??j=dSt=k)iFe%2rcMFA1&o*$KMy3|z= zolAH|GeN~Cu@gB^dyxk;puoDPc(@F(n>o>^8!Sv&qX1aa%kIRT(aN@4l)$g&H+Dd4 zUD^JP>6$aZ@PUnuooj$Ek66eUJ=$IB%={72VU2Fdb`gzBXg$zvF;5cLA*fFivHQ>;WPk9GUL+qt2 zTnQnRdDP&&BPwaRvl~geDWIqf1eRp0tM`$@%4RweYxY%{%_EWS?sZdukFalcDAQx$ z)fjubUz|=P;BvNJ-r%M`f2IE!jql}x_g)P*1ar@*bk%_iMJ^=st&_e z-c9CU8)Lm|cL62^D$(c$A-i~4z}a4?Cm_2u?uEcGy49t3&n|)Yf^vlLQeAR<7#Rj* zizB&Y^=L_m2CiP51Nhj^b;pnq@S(FPs=H}*b?f~OU^-J|D7;4Y-zx;K|{xAyzTU!j1gmDtN@&gfp44nT4Q-CyMNuLBq zPXQ4wLrp^i;o^!HW!#v_kd6flLz^Y`Q#!E8H#~fl$uaeh{(=9C@g3V12ClVsnOQ5z z|LP2sjPt3NCG3 zF672DZ@<8Pt~lD)0#hPzP?(Pr0|}URJq?t11#_AgvFe_mp?dF4mXX0h4#44ss9i9T zT$8+M@87=%NcB_{&AzeiI;58{PYt02@=vRgBlG`~8@`{2Q+P$eFgz+Cpe0DhK9_-$ zS%8T5<^ab33d^x)1Rxz?^qFR7;5SH2Bx`^n(Y{7TM%Od5HA+q5yh>VurW+9vagUro zo4mZN3?oa)@b!`ajnvH=BdMekV2MqOfT#F;9b4qWlLMkV4M+R`ML;lp^XAR(q}1;K zxGvbo1s8+vfVt-JYx~4sc9cK81jlxJl#|*ljvYmokD0m5a=`i1rHs=wjOAP&h3?^M zP~lsW2H(PvMe4b(fD!L-9-U4u`5e6!-N@RE`bI0XC8*T9d2z;DGOZ!(z;aj1!&+Ct zOT~WQ=NP)LQeox7x|u)STYvq9A+W@P&trqufGB_fR+sEyXi}}$2!094JK~TF43RoO zoRb2y$-&HprN(JNhI7@9Pw1rx^UOFC_e@#vl>oDvX2I|$%x>OGwfQI>UHJaQ{%dy! z+7wiA>=W#F{TQk8{%R2B(E9#_)A(dc;rXsdZeevW=EQ8P7}!;jtbc?xt%_uEOHa}5 zn$C3Z;{3r`7~tu_9OJIet~mm7C+0ED!DEhhSg<7WHpyX*x`S6SGmC}wBEts!e!=nI z|M_1WKSneD57UAnUM_93k@l2U>Z3;1DeYcPSW34qLO|Fun;4j^Yr~s8>-oJPS}_D zqU@${?o`7QqZZ;shE~a02T}SfO`3gYT0Y}V>D#M%sQ343D2nO&) z1~egN#fJZ<9_ee|{({FGkET!p8ywmLYYuoMM`(`!50W64vQEV4*&h@nFupgAL!3q! zTi=9PyGZYS?^0yB1Y!X}O+vzAQtE`2`Q}0owqvqZ0)##|Q22I`J@s=aFk%2D7vgoP zO@=;&YuCCe$VzwE@g|MXU;xwC{EU%kDR8uGfEV5kfH0#fA-O7a_h0);xmifEGCe>4 zge@C*z!xeQM-w7!IjCzuepH-?EaX8mLO(jc!lav|p#=nskAyIR>FscK^7 zT&S9~X|Q;3z_4AKl!bK|URW4gokJ4;h_uWJlLfxOn%06;3w6Muwqb5+fW~krNVzN} z=%J|2i$j;QUMPFKJqFRz!QlgkU&kd+9Cjd6si*09q0>6)>WkdkT%_nee zr3*V^kP;yzmN%PQiv|vkk3ME$-9@a8Ur3skeIvlr1^y6&={^9ORA(%!3vUv>IH*d? z7`v2rzJ!E0za-n<-iAOR7A6-0B|BNpjz@<Cfs3Gl!qdwPdm2w>|vYf!z47LS1W_xweF*jk@AK!+m)F*Sj}$5<<||XGdqoBh1S$|f&;fu5C*G(EZj6uKj#+t@!~>^MhFm+Z z%00Zp6aWLE`m>Pg^YF7CL#?4@l7Ah65=ICR!=}8gI?ncP8TWwiOvw5NIehg*C8eTg zI4=(21pp&nH2JACcmkR8N?L^+U|SLrC-0D9<^uXYvwG31Kn*k#0@S+zv_FXDN5_IIzn)c) zfc%!o=OQPRRZ-yrQYw0Me)kA4ch^dX5_ud>MxI!=r+H#yJw{$g+DyTJxtvZ_3`nvD zR}R;Ksj}92L$e_cWq|TDR;LMFq`}gIUKH{m007h46UKk1|_BM^UkPi{r28x-+Rv==X2ISYpr2m-g)DxZ#<51;<=1gx>1Jb9w5+zqocPoAM!BfeY2SNA-NhsjnU!;)9U#Bzq2)4hb2;JVD9BVauo>6kU#Y_#)Z_dR8}hA zfljP)^*3$Fx8(#vT7oS9w8+!{?c(RphUb%(SP()~#L5g+?Jnm{+MobcTH%6~;M|Ks z#wKvL9>a;Ck~5N@W1|QqTlVyFOL`kZ;<;jg%$06DChX~5hvTa}pYOCj2kaRyFR$>> z&{PB+g{xkjLB&o*fVL5bfY&o`#~X8+i0R3CmkMxL$p{Ir5D=a__gna}@Z$-f5RmrW z)+;#_BK$TKb#!#@-u1|c#pqKmKA8S9Qeut4QeG?du|UFy0O4#>iYCZ!RHrA_H-RtU#PK6+uoxbE?k0>KAFY`Vy6cq z`lYJOxF|~HHA-y7`)M;Cw7mH2N(q5zOwl zJ_V_Dr39oTtdAQi^=a%u9oyjX_B2|EDT+Ho!kB>hREX4-%m4`qd2uNzJl>E;${{hH zq)VcCyoOmOMX3m3_L;D=NTl2MPsh!cF8X9%9~AyP6*#4GNZd~T&^+eLG=wYBd~PTS zs(KD$aPtl}6cXp(yz_#a{57x5r)7S)*Yf8W zq2+^P$4MtU{-FX;vYwo)6+(;H}L zVfW0b%owd=7+JV=Hxm>%`CvE|888BotFS`b)ZF(h*||Fx`JC2L77Up=`bLz;OBkCQ z$hzGC(nky=VBWHw6HrN&@-F#m)%)`|>G98;Nt-P%7w~E4sxoQc4NJ|+qP%no1QKSh zK?8*raiBkv&QlfN`(A&dAQTzw%h{8dbC(#zP&IL*O>;o}-RQB~&2m03Pn)Tr#_*WN z3l_Qy(Sq*#tZqL}^9>mD+b2RDb?T&AQZopK;pw!0Su!QwV!v$?+xhZFSy@?-3PNpV z{eH^noKnw^6MHWuezjU`G_2vl!3oTQ(4?YJESYbyq^|Vf5jgR9S&~5IYC}n?12Qlw zE~Bh?^4@1JCW|~HZpPGR%Dw`2rjCrOeE2+r{&y7os+0Wu{YUbRI$|7#Z)PBm1|Gr5 zyi8uOEy>AEOKX_IUMJw-rXDkR{P;L?2O^1uI7%F>@ak>l?h;;eYpVmkYO!@9&C}7? zyP)0fWcGmhdjA}D;&R5TYJMFEGlq@fAt7}^+h1vj?}~~x2Av}Tb`9?QAkf?am3qy< z%F4rEU;i3x6QC%=jMU%Xi8ja!+Ezcfmz{~%$jR7XP<41SAtr}0Pnua(I*m$PMOz5- zF#kScgC~zu`mo2#0=(!m+(rrciq0J!8B%#A(52xtX%G$yuMCMs)O&pa-3D!^%o@?u z<-<6Z=EuA|(9fSg2SPpYIwH#gr6J6E{1x6Xp7^o8s1S8zIRqLAb9Fdz^|IAb_Z z12CRPI8WQ5Xv30ku{_xKi9n;ZlXH-a=NbIb4`c@^go!{5R%Z~$5`10 z(m^3MGLSC4Yd}MSdGBA@cAq~RETluO1Qfq{DLwdqqbp5v)If&2SWzQOIPgo(U$`(^ zC5p{a{J%7m@!oW7=%2S}2_XZm{0)b6#ZTM@XE<>gnJ{lKTdAq3dH238gUa}G{8T&^ z)#^Bex?m^ZreheaC|j7f5;d5;$J&s2tBk~WT~){8q|brg}f1aw(IO{ApqS^B_S3Bd;R<3Wc9GG;>!1^ii< zH$Ce?sdl=wyuHbAR^kMQ$t*x1E&_4+B5BN7wC!z{*Q23Lq5a)9g5M1vFK=hQl;P+V z28Oh{!B-Ji4h+Z}}-_HadrLNbZFM(bJLt9nV zKET$QqY~(qP7x6BXf(;pBZCL<&uAx(OpMF+2XYVmy($J(3uGqbbAgEH7<0E%xk$6R zx;n^)Y1OK0YPdN$FR?8D&IIenayodimtM#A-gY}0y5_x^s1p80-eBL$sL0R^I6VPs zM!Je=k0|^_@e9OpBjuLvT2^hE|#;NFMDJxHuWP4SyXG-UAJ>QYdXQ zGBV1kKqRR4vllC)ld#5!61t+eOq4-Ysw3wn8(Gbj7sLf4D0$k7NT3Wt%nkJZH7b7; ztQZm;JnpaCCdw$%O-`6*ZNDgJgmTtpa4r#h<8vg*GHCF;im$jlG=Ky@}T4 zzNP(-7PG>51u(|E{{DntXca9f81`P>h;*ELcT_+vNXmVmo#~E{Yzu3mINDZw^?*{2(3r%d@kK+?PR!f zu@=Ra8D64frqvMV_3+&8Y>gg|O9>frosT}$cjhw4@eO>S^H_hZVCQ@O*=0*~UZ+o( zynCE=&~ogV>9;qlM!@m5mj4O#nk^3H)6NFv)6EVokvH9T6MX&IdtLUL0DhgGoD_j1 zUILP5;Brz?z9y+|aq+4ewJ$}Q04=TIj*f(^rc2$>J%2$*(l9cbz<6zFsA8wR3}V+J zR8N3G*^l*uV2Wm%y+r*Yh0)o0qpbf@!4}bm}xU02d6;Y z!_S`TL=qAB3Zlg80qGrAJzc3$m3R#18q{yL*w+ySE{xAVV(4U%#4jn zkwe6(aw!o0H%HvH4*~|)oyz)cfoLWz?W=PVb11h~n>ciDI7n9zVFzqdb8o^}wTF9q zZ-B1DRqsR^OqBqqb2hB<#D{YY6vDQpf`S4AJ-s@qMU?&4G)%UDB|m}G^eQL~;+6M5 zG>92=tRRHK97{)&nAhpou^9cea<@O-HGII~BbED$DM~@NkMG`9F52^vovG&p))thT zbM4^i2nvSj6OkuKIQU$*ZQb0;3zmXtC7+i&G=G1ea9cyr@s10WPub%GiQg5*)}ickWNsMcXX9E(o4h*TwG1mjQe>t zeX(r4yzF{oNt9pCl|xPXE`i5yYxoqkq}UupLoro{A}znnL2ABF?G50#FEUgQ>?PjY zc{orfHgKGkCx;Jdq5tZk&f%CFJeb-X-;Y?vR4*`#ld*T}`X(h^6?*z2U<^|vEC|CN ziWn%@ei>0=`u57VZ%p3)2Dsv_BL6$Ul|(}S3*h=k%m2{eru9UfLZz*VkmBG#>>{i4F3&R)*C^~?X` z3)fvI4d=CX(LFbJ@HGRb8Vtg4pW!S^W6HW$Ca>NVL7cY)ak8-2nKJO#3+e@&=!pdY zd&r*_-wVj!Hj+PQ5t3}{H3>KJ0(-Z)s|L5L5RR62P5bbehL*?U*o0`2o7u+vUIO0~S`vvKQ zM%NMZSuPxfi#;IZ)7uA;F7uiEk*yX7ft|kA5)IVPpHHrdx>qVU`OVxpZP)>^2&TgF z-^d*X!8sx$t}bIDEsX(uK&-wv4fSZd-@+O!g@MZu2kje@V`r*f5m}RRjqGm}0t*=g zh@g~_QZIy=ihn_ue2Xxcajj>$Ytq3J*%YMQrs;U9kI3>{$3RnZtKA;{o;sW2%(%roxY%A7nd)CSib>ZSgpx44BYRI1m3Xs)% zka0x%8qGvWe{GvL3e|KH4dz#!s5YyzzXxM5TP}EQ%+I`f9)= zOhI1eJ{3#2$*vVM71bT0pWj{(=&2P*Ch51_81KI^Fx{D|klPJrW0^ytCySj|Ch7x7 zS&pV91ACu%8dV1NU##Un$H0%u8tz>}dYSLTN0Ms3eYWt0Jn|4$7qEl zFl)@8zefo=n)RCr{I=U`MtN7(W{xtAy{aaBDRq-*UP9pK4}W(iLczURS^Y@xGI8ZJ zGBpjEq;y@A%VRocO^SY>K_BNLpA`3G%sab9Gy2U2BAsEf2NA&RV-Q~(tFS$=DB2jJ zgq!rfib@Zl1cp{iYH$uOX*%z3O~=7ammKRPu@5H{IqjYksO&5}2t#Mj^>~m)RIF!O z41rHZ%UNrh{oQFn?FSE1hyrxyG#!Q}EpLG0_U@5&$9S?KP@yRM++}w$x~cJCJ&WK_ zUgs5m{&Us-_jO8IV?&DQh~>T_hDe1*9Z3+$et)fTHTeF(C27TpU;v&6gi|36#F?BX z$87jxF^m(6iz7Gb*tmrDr%1JfPv2eYnZQ*5W4I`dr`o`b&^V0IJz=_fC|sCj1lDn# zPa_>V3=*h&P$`@nLLs>DN7=|H(BSKWsH`sAY8+0wh-j%`Y@`5v-MqlYOoD%d z90x1wGDK7+9MkRXo?30s-2wOr%x`d3jJU&t0D;>4u<3wexqbZWpq|)|xRZ95R(#$; zT6=DldT8nuh^`>c1Gk|;ZuLTC>~zJCPLGf)S-g&m^^EtmydcDUq(nH2hhZOrBb>l1 ztI5vSFS-@o*K%Gt71E=ltxdTs3P)dg8~P)%YybM=%YS{_!{n?0T|IcK;E>Pk_NdUW zKdzFcHHrBtfYCl<%A8NYww83G%OrQg>KMZh;A@Bwl^H3)Zzz6W&YN7PGTM z1Nd9faqfM*0G-`~oJS(Cga8@mjP>KOJ){5X*>np=)xEj;jnE|}Gop+LpWS{3y%HkT zIQFms(+=n46-n&@lMkv-%tSy@NoiJB8m>IZ68RM$Jh;KeW~s?zuOH?HGw!#6fmDcx zB2XH@lYjx4SL!}`1XC|OA9+GRkfG{ozoI9!ly(%1Sg@eBw6vLtNuJ+RsO171jyEqa zEQH`q+m|K=KqB1xu3q=~P?H3{5gM=t?DJ(p^5p->0LlY>q+s_lu zv-9(7XlllUg>icp6&SWtSB$XI*4c|o)1>vYl0g!MZjOXE;pfkvEiE!;YIKSjFrh|p z>tcFHE_xclllsiweC*8bi4)L^FTO{(x@MO$*BsUv8$4>qe7q{~u-!fXKi&omEy4szAIZ`<@de=RSY86$J?#E}mYkQ(8Y8pVD=&|pbI_uQItl}lcnmt7nB7quX_)V=qlf5p#_^iqnK!@nl^zsCS}(XS=*mqTB|(Y+ZzqvHH)C zvfJUvVgSwTd9di+e6adi11yS0@t|RAnvaPJA~S8zg%F%yiEpy za$F-lv@sl%Ds8)P^DYvs-oP}TbKaCccfiU&tX1n^Q26-1ZVQ1x^k&}U!s{hSU_1eZ zam2W)nLl5LLyz`?@77oUiv8x;sX{ltHBtmy2KdU2^5a?*cS;ha3}(W3_sF7i&&HsO zMVlqUxW;+7tyygOV=rx6J7zfvA?smAj^j1nzb1&(B;l!tG4bbLTxOAy9`hWeSy5v( zv0P0}nwKtcr8c$};lItai$FEsbXqff^eDdIwHpas^eU;>Cc#d-v7uIGPCN`5kAVkf zVPr&nOB(j6jFBW!p_Eiq5N|j>%}CiRzv9xeDf~|U1zkq&3^a1nb0XNC(JPbxWq=|& zX(e63b_Ee3=dn)_8cpH=JByH#a@q1m{!JK9!9blQJUu%b(a12X%E_GtiWQ)7jV3nW zat8A51kpN=K_z{N|G?-??kF~FkB8xp#ZU>?&6}!Ah&ddu9YaQSQyNWrPgj>wdgBWC z_x0~WGY`FB{bz7|OG}e*Kf=XS`^d2j%$6SO>1j}IRi+mgs~+6H-*|WS2zfQLT9c)E z)1yzJj4GYC{h~%d21eYnXfH_xbB^uN!hn}vrKsk69Hy3VM+XH_a|8zk>G`V{Ml@ga z*4Y!i$k)1^83up; zl~;3nJ(=p^(a{4VBZ8cqihv=3mZuqk<;WEE%E@{sEaRJ*SXskg9=@`&vNC~UumSr_ za3KIM0hs6p3BTJ%5jice8Yt7MYWhX7YlneXvJ9+35#>vLlNQQr92Kft;1clm?OPaX zG~RV}bp_c;ltnN5otG)4M9c<7%sSOv;v8-j+7bcyAyH!c+1gKv=KGB}^Nz*wSj2`o z&?d}mCXLqgy97Sy9FYmF$-4qCZ_Xx5CtKGs`oWt>5(7OT>rR+((DXUH~st2<(3rE>bL& z*poD8d;*dS82!S+mKdVEE$W<lmTiZ+9X^Gp)IJ%29arU z0EytOkI5Z}^-lT^p2xG_MQ)QBtcsc1T<23drD|Fk#30n01wTqM3;l-C?Tv zB=ybqUw}qmIDcMjP!&U=iVnEwy|?YZ#r5#9X4UKu&Y~%> zC;(h6#2fgR>sAcVJWiQwNJ>d{#@$PZ@I}qm(yZx<5MJtOO4BR&SUnTfm!#^SfrpPj zTc}8C8tmJ3Y~VAwCaJKGY7?GuN>UPCYNqmZcDxc?B{DyDQ_IPj_JpsO3D>D}6b`Oe z95XFHv*?eCRO&Uo$^uNJz3p{~-~H)`%$tK93z^iws?hyi zQoEYg%F5dSckfGOoP1+qpv11OmEKi(B2}r*x+zQrQgj!+|Lgo3P+vBw(ed(@Bq!Hq zn2#1jG#7!CBDv+Hda<=Wkhll!gn@hdqR}Hv3^t#vs$rfG)x8(5$yZG(r{>$n2GQ@Z z*M$hFm-X|*$&zw0H_jVoGCRGXs-Rs$hzmr(pRb^i&V5O?`y7P_!Y&)%2hhOqCK@~f z^PteCuyA82BW}dV+@wCHIr;d3U-e29OnTRcl3$aVbaZqys~L>f_ghAl$6hH0ARpWB*RIA6&szJ45bU+i`a#iRrZI2kSZFn@0wW>2o!tewhc$h0 z+wGU~7&x10wz?YYQxwQ(cziaL?ml?S9m;~Qp$S2RiGOp31Q%C5QUapBmI zX-~iqh&qwut9hx#M^Wj<#l?eZ;;lk?x_oyQjK9NlKOltg2fAnaXMMBE1q3VLa7k|w zxP%P9CBC>pt|N;3&=wJ2Jm*dD5gm3+H{7GxycoSd^J$1@Sojg-s)VzURo6&esWVAT zJ3RaL*I(WPI%l!BBtqx>Q2Jyd1_o{8&cIE8zY-7M*(E7ywBe->xIG{Z1@bZiQF3g{ z!+#?Q*yplF(wYAXoTYO2(SN0YRR>UyF&H!9vlpPJ8W@?HYTjReb^@a}qo$Su(-2ra zASF#rg_73S%|)P1wCd+cYu}rP{yIU;fXV^X@C%pxQS25YtL9i>1OYGTDDI&>#G9-+ zH6!DVkB?2U25bRf*CuZSw+}!xKH`aa9QngLP{P`2SNJ%Mz@|Vs4y4eqjmZVX ze&B(kwk{LHd!hRg5wlTI7wiKA-9XSweNm)vuL{<=Nz}*)(rhrB0j_u~BO_yaVPQ{( zD&D3{-4y%aU)V%+0Xz(X!rtd0m6Z@1U*HA^yu)=rmN0ezE*Iwg?}c}&?kY5o@vUTc z2u1xVLSN)FSV92bUcs-mm_SVsIHF*-4<&zozCTMld%AF2$@X?cW-joyysxoqfA&3c z-pl~VGfZTcfcQl3pcD!IrhxSYZkvKC<59rAIdC@~_^9T87*&>B|G=K6oR-}mhgWBGi}x?Z#m1JK^sq$Ua9DbKo<;7mo`!-jmvtSW z^!@p&Ti+!W8SpW&L2*lVj?h*`sdWl_?&X!xA3G(`p-ASj zxEISvM^_WC+B6m=Vnb~HYGE4&U9dH-{-`{^RUi-^2|n?FoUe#>bM{47FGZl$*rNg$bH^!!o| z|DW&M0@mxphyB_H!`eZd-{}# ztr=c(Vp*l0=I>AGB4*b5SOnJP;25A{!)@9d00a(KHn8ziwJQj2UdV+Q7nx@^z+IK> z*eW<&_bd+lb?%nXZD*(e1|=ouS%^Zu+mVWUg4l`T_oH0ZiW^{P}H3 zZ-;?BOn^EC2)Ocr%;?C=S6rKYrd#)>LAuz-fXnA>^$o#%&*V#*QhMi2dqUEl&t~GO z_kbS@V9YB*+r(tdV+c7*Dr;`cR zBK@20x`kNUc9*xDq%oqqkGy?8zUW?(GXTXzcAX zF)&M6bj|Cjn(@rtdd-!d*5?wLlA7ub6=8*=g-vZ_a|kYTL1V_QrMBtDd;$-Kvqo}D zh;9PaC#nB=On9|#np!}I?E0k^TaIn*u@AnG6>97JyY9QSYhB+q;W36uq>r*;~ri{nM~ zh5TqQnU}`IH-!Zz6m5^naCMAbL@8%HXz%E_a-lchD41+{-2t?cbYK&-Ah90(A@_|3 z&5o}{x997=`A^yJ7e0b`%$Qr7jshG*LfJm|TM;>Tn3gs*5NuW(R0ueMC`>-8zsEpu zi+^p>2Rs!>!3>OSJDWK{#YonJXyXp(9e{(;ZwthB9f!Dx zh?KBm*hT?%YwLlObm0e9UF?uZ`U%3+6)Q_#oWXnrinn-$Lo#v(jdcY*ot?H04(ftg zZg1}?U7kUuwcb##Y(c$&^Ij@Wx4=`bO3Gl6b-c7pL zr!Pyk%`HSTV*|Z=_%Cw}(pA3C+2&O9gytG~SKGGjPQY!)0pJKw4hsh|UBA2&xI{7_ z8=W5Ml-7g^k~Q;JDh_FDXqe{V8nhZ#OnHXp5JunY`646d*$8{=pm?tPVG1q@bM&pX zTRT6-O~K*V+Emso*|MtVg^sXv4P&)EU;?}bpuL7s9+cp-j?5^$P&fW- z*Ti3kjw=fbzldV3WeablB`@4N1(z6>P#K>#1vP#%k2;K0d{@qIzm8P8cT%l7-iJUn zq>mkuO9&Yi0=l~%7|^%c|HcIF0=`mfsGh*mk2yg=_f&C@DFvNq#Xqits7n35AIIBN zR?9W}$WY29pJq3>5W2sUfnJaLa`;IX(M#QMftrTb4KCyrKQkC;!~o=C{w7bQr(0rz z53jyK&EfSQ0cjD;PVB+#iU1D};o2S$cQmE_{SzIqj`@4$MF08E&mv$I3E{78JR;i% zejU!^Xy6sRGC505WCRi|tY0G}4+!Z9{7;;;c~=bvI+%g&t20v2nPDvd;otB4fsLKO z2t711aYVJu7>`VDkXzJgmk41Id!vU|)Mg_&n`da;q@B{u^^&h0kE z7`$EfOTAZ$#{_^R{;xdCms3^m@_2eU-L2E|1I=-Dh2ii@E?6x0=8^z zT zfYZQ2%zsC#%Lv)305C{>0edBu{u5q369eA>E;{v96eNem!Q#A}9JzDqFfRpv%`!sx z$t#oF0A(Mibg=~UqoCe1WH-KT?zF}U$}WT3>QlIx`wK~>_DU|v9+tV&DEp%&=6SNh zzXQ!q^bb9&xmdU=J8RKKviuv2Y);~1YF}#5fCZZoN4^S(Nk3TO0RQHQh@X%f0h7fT zv?6zKb9IqD04jX<1_2s7dqcwkYFq%;9xQ=7Ii?a}dokXRAg=`VXE%f6cJ-{Re!$^A zEPB0<;?`7G2c!IQ&6wqScvAx0$!B|uKz>(RDX&B;4J=vf7uE9I%$+SoHjM zblH2uk`3aRrD2!VW>0^$il2-8`%z^a_eA&S8%o9 z!-u=BECxBKK4r0|0bAfN<{>3n8Dw@(re&22%>xbP~kA9}C`;4@70PD{?8}1|^ z2O@i5kquQR->peT@B!#8uv1A^!C=-z>Dj;z>w40ous(HuU*=FGSwRU=gSYJFLUfMe zsep4CUJp=$tT$olVz|-GTu=Vj4t92_0MSO7i2xst|w5!Jhtf zU9Mw$5Zx}De9`dtbEwh zlsLNCq(EK2gC7-Qv3NMjOdwN7frNxfOfznqn3MPf5^I#d8G|F ztc!Pp5uZgq!gRQu=eGJyvTl8^Jk0Gsh8*Wh?hk7d!Lki&um6(thc%{XV)mn$+g0uSmo2u~yXXSay(4a;n52S4h9vVwL0B=NtY z>BwaZ7`(6%`X1Q~*Cc@3aYhgs%mM~pH41x=Kn&1L83t6|jerU6o5uzOBLIe0*5& zYFPlMIZ^0>g6+M1;j>c~f-&|dBFJDCFLqu~=&01@R>HZSi8wOv~T}bTBX+tECuHQeSJ*bH198Hb1Z@k1FBy>H`afe_2Xr4GcZJ3_deXtj)TKqDt`Bl z;`%y(t7|e5SA_P3SO8%EPPlyg1MC!~yZ8%J2e=4HUYO`lcLl|)S(16@Nlx#CPcZ z*4{p_mKMPJgOtt2ltG)%q(EfYb%i3t;Za={HxwvexSHeuNn5dSg&T{EL+g;itK>Y& zSaMS8Wj@EvdguGY-himDi9x<&vxKEDKtJMRcYlo^u`bv_|K}}$DXfWA;`x|p7(Y#= zI%sROqDJ1twz>i_R&`Z@e`P6n^W8_n!_RPTokJ2K8`s1Ms)G*EDcPJuHlxW;CWh#j z?>N$Hl{tk{oJ8;|C=(#3V}^x5(Gr9L6uTC&{UAtQ_b-Cm zj=cH~K1Hr2jzR}gvPzTtGbX2H0an9QkY zrPgdZNF^&8&(4Bs#@fL&TK7^96A8cGC{7Z%>cadMJlW{)P{D))CT|JSxgR@bJCi^~ z*FP{ID(Ex%x!bDQG8B|nVDWX~1#k>RFVdc_dz2eiU=A@eEl718Z+T87zvH8%tn4?M`eG`5 zLeIvA6Ic)506_mP7w@zEY@wGfT{=PB^O>ZaL|7x(DQ!kq zjDeDpGV9(qls{g9BR_>1Bj7CxlSh0}FH3K3kiNwzU7D(k45sB@Ak_2_my^PPm@BS^ zGpXjneu;Ws_6i^d>!r2HoC$up_?9f`B02d`M+YIhkXdgw6+L}2n2IK~#@VS zq9__c8E4SN;cTpmtaSS8gqREjimtd)?)@Tz?r%V$vBtCqb-kYc9ZzA@A|nIRZ>_9C za0+U+FfoyomhPGYc2J*8TEK%Vhpx^vv;~~igT@NJzsy!pFAO+7NWfxQb3?-u4D|m4 zs?VE?ytV%A+r!^o63wXeLs&y(3C|_f|HR_K+x|TkeRsAfTOJE^K;U;+=XMu?u4%Z7 z_rB#hE~|8DFP2J0Bkjp;y;%$_-m0~E(LF-GXgW;l;I+l|UIY?93w8VD9HT1JtP!p6 z{giIfg1z^U#b?ydyTy@)kd$Y!U^7h0EMB*js=N+-XGA14bS(}=fW`PoKj~^B!1G^4 ztrh_eA$1**rO-^)^=>SB+K$Ay4{I&&EVfx-7P0`)kTk-owbw2h z=QiirfxM-E$0lH-%mly1`8oE*{}tR0LFiXFO2Yr$cla3ozoQk5vLAdwv>zZu zhpseoM_CCpDsrCy;m^ZkwP@m)6eRE>{UMd&FJPUycIuze2iYHc`@cmW5Hvv}xv~7; zI6SC%&rh9a)&3bk(up9y{{ymU-hSdf*>C}NmvUCXEz15D=tn}Z;%}|W!u}MGMooqk z7GJOvlaS~Kn@OE#z=VggDC`<|8>ureFfxvVW2T#q&M54CUf<1DtBF5~7hZUk-!M*% z)%3wCOb6;c8>Ngqpu2z^2g}MEpU8Y@uCE7o!BQz8syu%DIDHG5A?URMFWu|$%^LOT z7l3lZqQA;lz}YNLCknVyhLC=kuF*3yo8eiR!FUnDsxLv1ixN{lk#1R~-_!t`=VHTD zY0m+VfSBkiX-a`2x~ar&{=?A;?+=wAM?s_dutx{ue4SB)olZ-gyr{<4+pt_#$9@x8 zw?}Bu6rKv6^S}UZuu@B!w1DjfFm2no4`pr?_Z#Ky((McSpMjNmwDV_h$g(XjvygWT zfpB5~DAco05fr~Emx84n3JM)VLuT7iuU-e;Q&49hMM+C?q-~w!1zuI=v{x595%WW$ z9=y{$1IeDJqq{UtwHHE^$}7SI(N`^o5YFT(vT}33QZqi7IK1a#SI46+V3-736^XTs zq21%);21J5EQqKYMZR~b!EfN16AL__R(fSZd;+zid@g-2`8V5*#C4JL|Q z)^nIOjrl691p{qs}co2&HcWz(2zPCelQP02s4-9&HFa^QJAX zv;d61fVlu%u1Em@gLuoDOx+%Y@}w^U<)7QEpT=8y3YHx}L0{!+T#&8WjCRdLKV2{x zBY~*!%A~-9KXBvs0A;TUIta+5?`Ua>04X%-2r8k8o6VvIhED>xD}H3p&&X)vdIT_7 z&@npLdg@XRLTM;kF<3D@4T!a1-%&QqsGvCx3VO7mV+iyp`!YR18T#d!N5F7NJQCt< zn03aqmy%;%KjeL6=Cul{a?@n}d&>$wFY!%SqM1lpq-lIBL*GY`W7tWY^L!h_&%R{U z3x*E~2s)p~U<}ou3rOZEpDUebrAet@G+m^?Is=G!lDt~voGEr} zc-DAqyHwqyrF}npp0GmF6!Z>n5wo9rR}jc#;mz}70|HNqy@wF>BXG97W7>-^Y7g*O z_Z=E3g%=Q{!fatT?bDy}P7{}g@jvQ5crbn`6Q15A?zs{8YbDm*abR=(vY|u@M3|QS zb0t<%?fz93P8yW-8K?uFo?NDS+@=%laXo=Fc!);@LeN7ZGCFXYgQsXWhU@rG@S_FE zaH*)CzJC2s-AJAXJvK}k!z>yRCxaAv;njIqxC9d9tn_qJk{r8vW&c_mz{D{37*46E zzDP6ZO#(eeEpM!OEn?|L#TqQ)q)JLkRMS3CQbEb!n1UfTRc7yA1`COny1D>xmS3Bi zGT1{9i(gQ23EaQlpFVwp)dDkT*{K}5aAAiK#z_c6y6l+R!59dpJ_e`AFRAV5Q-!y0 z8NG%1@a%M*W@M^F3JMjL6#O_mGEL8?JFoa;!T{7A9N;ep^=%0-m_R^dV3uyt1QaTu zJ)Ug4GoasXNae5%mYTTFo~mhu1vZD@+*WCs*^US$vmOEFE>0dz&e0*)i9pe{<7#$C znj0H~x2*L-6Wr>yuu#h4@-ig}2?Ln;F*0&635vptBPbSvzK-F2(TxT@bkm|ADpUT~a%A|!(@&~VM3SRf%LW*^2b zk{K$Kb!)J^YaPv%;jdaxwMlNpJZ6~+yMx|)k&!!1yt}d8j%A2)$q-t*0g(t<`sfOo z(DinEcouzQqgoKXp4z@dJ(#QY3!fwD%ok}^f~^4Tqu@n`Yy4B90fQ~~%)?4V*agpR z5^s4rKjp(c=4<|G%0WSs-a-Yjd(zoIZ!2{BdVA-XgF`>B;|wr7JZUc>N9hLlkE%*0 z>;}##b5k_KpVw8ku5$&C(;G->p*us>EF{}DbiJuZfznk??-hv5j$|sSsv6~lH8afS z1&$)rwxdUn2H_AfJ$QH{=)CzeC;5#1yo&iNln1jmZ&d5;)mohhpA&8D>_OiS+f!R8 z0wd)+!NR`TKCF+UKh+-ofsRkAZUlC~%?~}+#C$xI9hGFSO}6FoLC_Yk3bK#&$D%LE zR)`_j#&@20tsM0yX%MK+90Qjrc~{Uy5gO@3#QZhRILZHgLR|a4jP}PTR+^eaFcTAWQpfv!?U0iP z#}CFbw#6D$f`eojk+VH-jP?DH@xtxi7SnS3SJDy^pdz+5g{nN;&jh+XpvK?fpw+@8 zq;o+u?lW0Loax1iKt;jkBJ0k=Vb0V6SelbK1y7kLWy(wShXHWv#P{2j4_3k}G7g5h zR7FUnOTXp7Ofl3nZ#{#DD#caL4O{@v&tQ_*x`?m2`g+P2y41=JSwZrAIRjqoOG(f*mxRxCqRFv;#;kTlfYnAGSP31~Vs%1zf0EhRLjx~~H(<^W)b#m{YQ(a&B(-nMuJ5(>_SbKP&m!lG1rr|TJkmU-02#~ATc z378@zcrBPYZhp`GI>ckJu>a+v{m7G6icOG^*J2I_=^%=|6swF%$!GgtsZ~i6JI60a zFJ#<1z{5BxR4NnH%YMWz9J^~v;0S=l|DaVFUNbJ_82?;;C7%EQ9ctu=!2IAp5v!1| zA@R){RHdUhe^(?&6UnmoG9u^x3`VG!X5Rm(jhdS? z2B&2AWLhB+SE;#daDf^g9O-% zO$`Vmu?`2A!HSw<ic1!C$c!H%Y-A%_*ma1qM!zpubAy^%V zR+iqZC71wlVY{8{!hIo6|LifaUTcG zg@I=Ep+GIBnLU0z^bkT$lOSO+%G(WCL<&g|4&e9@8UaNPywHx{JAeLs%GGR4k3hy; zA9KrDIp)R%%;-h;fnuAmL27k|ULsiJyRM`_g$EtU;&$ycO8jeXU#>}=78o}0hg`YEa-&G4C*9Frkp4Gu1(%{}MaU&%y z?Q5JN?aj*|0Ufnu0{RqC1fz?Aq68=E(w*lNxnj(ow2Xh^e9`a5BR7Rnm^oIh0QEc0 z2Yvt3bM&2pMp9j!R9Pc9Q>)1GeZIYf#OrM~PD93~2FkM283TlfAZWn~EQoK;f%RS! zt_q6=|K`~KCxRWLQ&3iuhWw3STT$UO*$xRDpi=U0@r>+@ekw#pINW(%K%?iUC>+oP zj#~LMTpWXE+8?*Ch?`muY^F|TPI9U2uylYk0<#bho1WfGPu|MB)xOC!7{T%$D5qsx zrw<%{i6?wcK*{p-OSh$hCx|CN5%J5v7p6Cup^o`n!Hy@ZAtAZ*87L^nND^RN(_Er` z@WXssRM^CKwY=njAU6uwrE#}PecLfhOAbF3t2ZvS5VMeI7;>J^rJPn-@^1Sn1h%1A8n~VOhgS?)T($0-UCnhH+g8=IKbuc9d zibmsSC`{4;1Fvz6R0M|9I)y_NLCC6vw4cJ@y5<>JwlS=Kmp#_v@eMdk{%Y# zk`Hl&Cyv0Vbv>tkc{4dN(YHk&kcrc3JcB{CTjz1uQZdcvU6NJ(x$V3!=aV~#*AV}A zP$FF9RA5e;GWpEl$S->cC%7M(FpJ!Mj4Y}+ab(Q~c1qv#e}+#$0DlxwTW?1u)V8kq z_cLMM4oN>x5Y@+DwXj{6co^E@a_h<0QLqwELrFXRoA=q8qC=6SfF?YhRyti%D1F2KHxbDW9NjGf+{Xvtx3y?+;lU^MezvwZnYd=q`L7=Am&lhC7*` z(D~`Jmv+t}^-fYpeX`d5DkJri#wew@W2^aWsW8##_WvfiQ+~yA~g;37>1R- zRbMqvq9I9YvBLmf0GJ;L@m=BSO~TKMah~OZX#v!1WBvI#Hv%_cY6|rAd<~2kYR>J3;wkw=AJKm zx7ZX<0jiRtUG%#FF6cewv$9Um2PZ&x9x(}`j^|)M%_~>YI%@fb3iVPccNiRro&(yH zk)QIk+Gchdu$pO3zV%KFT64H!BrY;?*1j||x-R=WxQ9G{j_a)qO0w@;E}oM8tb%%f z*D152J-ocUP*yO!^T4BoM4cBD3Z&-LP?GCk8DOVP{-**<(H_N{cVIWcSk$^fgC{3; zq#3P$cb@UEAH^+(P)?1n23u5+N0HW|d7&i*bUA{8?ho$*_xd&NFG60>ASh$HycMtw zsi(-%TY6z?O%lpj{6+#?K0yV8a(7vZI&yxm*u((Dm60H>)SCy!#`-i0TwD;o^J**b zor#fD-Yt~;h3i_aC1h~|HKsq`sB-LXKg@if4Nva4Hu?_HsKtj)JpJ&2#;Ypl$&WGE ztqpe6zNFyau5;w1Qyn$>QdkCfmjnKLq&ehU5Ezh3OU?z=#iQ`hPmKFYfYsXb3N}TF ze~zv!np)ig7Op-cxLd)*;DMENffJB+zV<^WIak&8E|ZPXeT^mCT=t=PiS_7M@x@I7 zw_d?m45@YuQA+u3gZpouaA9i&^luinQ`J|N+em<*Y5=>~e@isSg>WK{y8W2*r^%@| zJ8-Yt?z0)l+R#jrhR6PmqPqJ0k0*oX4lvr?J_hlZ9MBx&qAi~*Ri;o*gjg-es&F9i z5Dw1SsNVd6VfAng{^)3V=fC_eZcR{u2p8rGcSaepL z*ZgRv9QpCWPe@dLu#mG-$qw7(h2RIy*V7g%Vd`je~-rjD@wtC8awK zf+Y@M01oceNCPIL0j`;EJwC#mNmUZ@-ywpjxJzBO6yGX%pUTGYqQOhszZQ1HUynt@ z_pvgg1F!BNqXp5cTzTN;T~N?%DFP-!WIc$gSZoWthmd4u5CmI5VNKf3-hTW0W?mE+ zoE^hGRK?CrLE^3v90XeNnmi)p>`1@39w+7uOYUJ*1#Ut`LrVbncBQK{l0j8JYa116 z^rP)ki)P+ccT$)L={58c8WM0T5<|fC=ih^1b9%q2AIa zfA)^&+E9x#?`B2)-*CW&tMS%wgns`!m|aNzh5|}l-I(JROQ`k|yaNYhDV;ZfW>V*2 zKXXm6P3Wbq0rhI{$(KiYUvH~d9I zf&$9QjA6XrufpyBY3oX(n!46@v<|mYq(!JjC{zTz7IBCO#0XRosSJXGq7Vj|6hg>F zl!yqiYC$HM3JA!gjK&0k1i>)1$PglgIbjHtF`yzMLx2qDzVUwR`@SD{t*rb=&f5E& zecpY}J3P{3^E1#Oy^iNxakkD!lw=LRnM#mdSMk-gN=(BK(wtfQiPEgiHAEuX@PCHbzA zX)j-P{_BbL$LXo5lAwKB`z?u@99>=T4?&nvg$D7=0fyfBD*u>CtHzBR+#j_t__kq= zmCmOa7%T=NCW(X=-`!+zn4v@l!X+WFUlr;;_HbcEY8^G8T2I(cRAQKDV zW)FqXVEQ6Zx`D8+Uk39(LBa)BnXXWuYDbgSvjo8lLHK$^|#(c83H05@~T8#`GK z@s`jHex0sAv+Zu~)N*{bQQ%4AUG&uaW0J~Ny0p>GRTHK2GfnSO!cs*}m>ZwZ6f(P6 zcMwsZUk8UgVnnVZi7~OTH&UuD0>2-JQ`5JfzUJ0Y=;GJe-ya<2HLN>x{{GkTcnk)U z^p8jjNS%d2EITL$dR~rra<31E0VUQsGzYkP+E%H>7Wex>g0HC8jbH^;ueTuY{rFSW z%Id`LqaxSp*UlF!Jr>4?rb;sw$7Ar`DzJN*fsj(SQ9R#io&K!uuFXhU(si}%kzzg+ zx$>JC(3a=`Y?f^HT(CeOp!6UG^$8ye5KV+7vDFR!Oh=*I4A*8X20bKIu@cC{J~*x%I2u zmD__qi-g2K6ul0Y8cGY&`b(--;*r(2N!oJWbxP;$J}FZ-F{0m`N_(#pFX%~0LCiHi zXOrtRO{7P*x5&dy@&Ui50dU!F$a;}tcKBlE4zz^gw&v+cOo@8@c!n%`H8Hp`DTi*c zQx6&!lWS}Wy;BlPcdy$wUa8O!{~dD1w>;mPbQ8?d$Wc${@*MS5a4TI&%JWn>v)ZG+ zKBfQ!Y5S2d;e~YRr^e~wVQ)Owq_Dq@E$Ti<4vCc|Vz!`y^l2=;y?gC9CL6rD0H>Uu z@KZrn-O5NPbJ>&lPAAVsGwTYD{Z?EbzVxg;LL(yE6nXtRborh00j`aovScktv15F~ z^jZHpmrNLMwEHsTF`6-Q=l9^?!FPQMY`TF3Hf%0*R2LD|BZZZZALvB|1iX4bF~9ru zpND9eEJMF%fx9^__CZ>&N-|#roHP1T3y{IWOpy>z%Qk_Rrm^uy>&KpSS$n^VXCE#BV8QTckLNXl0KzSVmcx101rTDDHL$+ zZ{$PmvdQdsr^#E6t*@|iYRg2;OZ< z2Y2u=q1yL`!MgsrWM@9sXs!2lCO>ljKK6X8WF8y*;hMicPzVeQ=H2hY%Nw5l;2f|Y zTX0>&*QP+B%|dkbACE z={DnoP+%9rXF8LZZA#9TY;zx+7~}viK{3#$-NqnNHk-*)ki#6uUN_z4B>JnBs5&VY zLbs5fFw{eO6}URGrC7o4srO4*W**_GD@Z6?m>wU8YQ1$qgz(hb^1-dQokzL?Ec{;0 zgN^nP5zgGGxMP1+ZTZ^fSt97^hJ1OgbBa_N)Z*}5)Ae>}k)gHT%vVs6`phUGi#d`5 z6iheC@wo0~0u+D%PyZ7d;MQ&RiQv8{2D+vzdzpC!^^*3M#r%fZ#Wk_?43aPJU%2CT7ePtAW#qQjRoh~=JTyAj9ce?yD z+Wf>1Km2<6l=(o6lG5c=Z0AvzXjCMymc#YbnsMq@&}eCD+mGMjAjdgKR8XX|{N2yJ0|V9G?9_*uz;#R6 z`M@Z_=!sFWkv?g>(L4)37XRpDL^{#I&BS-6X-AsPTbI&QWV^nKQ;gk_#I1JO<6n7s z**wV@Z=8&fJE``o+S#!y?IRvl#X))<8v~E=mI#d*A#$msM#hF$TH<=rIJckqvARzd zn?Kd2`efj8{cXJ$RM2fc(1$te|E9K0{4K_qJHG$hs}Cm2<5PsjImc^r(L`U@sWV-E z>L)j&S{aLrCHf}K<`4Qz#FmqT(V^LKXy=++3wuM#>_FJ{P8Y?Z<}1{7g7vac2o&do znlHv!!xbmE#;fQ5xa{pcRTNV864GyA=fD=v@AL39gR&$}^}Pq%r=ny)ZVy*;@O()x zfGa4UJ@Vj?)o4f1KZj|b%`waq@zfxb*!>`!VEOTE^zYD$K8?8%CR1A055SXa$74(X z+OJ+Vyt;&KDkT%uL`!w>FI7L<{)Iy+uSlqjO28Lf0r8347eR!_Ngh@a?cMi7P22eoQK;6V)mu}H`G1e>&9k;`XOK=OGiq1Ql^1sUffiAT!#m1W*N(6=x0eYejGJZXeFpQ| zvSI&PXtdbXY??Z}>Gn8p;Wu%oQ(j;9?2Vw2v}BEVLcgmim13Nwl)1qGRxR%JOhvo> zRg1{5&>k}j(e`hihpNAjBPhAdc=_U(ZM5w66gWPv-1B_|-3d*z=fUSk_2fWOeL((lLLR(Q!OqQAq)AyvtlqY?UE$3)7Bd%Ab)*;B*;K9oR zCy!0s9A!6QYJzN=>ITxh&+aHUtXwM@s#MKIE68xX<4eLconWV}AA&?WFs?e*-9F BFYN#T diff --git a/doc/source/images/sequence_trigger_audit_in_decision_engine.png b/doc/source/images/sequence_trigger_audit_in_decision_engine.png deleted file mode 100644 index 33ddfd3cad74eb8ec62dd9a156f9a0be996b83ae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 73345 zcmbUJbzIZyA3uzv9x)LUq!krK5EMyCB?SZp>6ns|qZ?)cDy34=Af1zr0TY#W(j61& zu8qOQ#(iyodd~Or{r&F8ef~I`YGP)p!8yzkb|Deg-C-Pk&Y5 zbV-DBMohn2r}Ncodi_<(9VYKDXgkg|xICGLqx+7Zdt0JcTzT>5B4J@cG{M`5yF*a5 zR>daW%*Qe621-q#?<-Z%<3oO%Cz6IN%dzn*IH>#oJk{B9^3s-9n1UmfsaLK$2d&xGP8k4getEf8WXL(@q$kB&^dp#7tJ92V|(0%BAtmdzy zAg+3>N$2@M+`A^T#JXx1o)@RpQ_WSWk1LwmNsjE@liCz|NVzOOc;uaNYE^5NrM6g% zEIWM@?yQP`q^!=#3qp@mPz0ayOU4`d zM+=T;v3{nx5WyIvf0ZTKP$XX~N+5LEOe{L;ESJ!x!-pB^niLrWm}T@EXT`f$N@o0C zbDW*rD{vz6L(}qQGr$A;cazOGeuj%7c#^-J%j;+lcDR^?t10UB)tZr`I1^o{~+?i4U{7 zq#o7{|7vdfRaqxwmiZs^hgPrpQ#ZZuTM30k=R*AFA9XHt=9PVCBT-3|)$mBPB`i}> zJ*B!UbNiuN|6CQVld+cf;w<~=^PBowKeMvhI!>nj^e8v1HfwTHNME>~f^t^Z63AM_ zIuDiWs^=CN=CMTz^{94oSzQk`v4h_%`&K3${Y{zUynUGPKH(e$Y&Rmi-U7KFrIBG8W8k*`jE$5QZb_;dTPJi{v*w;`m2alJiS@_(0-o zUG*j^*NJDSVd183I>Vk|D^_1GxA&?lA)hpe#F`}*`UFOyiO~aWNMpoiW5dqZHhxoq(IEXUqp%ij{OHXxad*_z-OaytT(k<2|}hgX*_Cd9JtAr4t~ zC#O~Tl35~VQX`;C01k= z)1j&~$ZJsXZQvpzL`9l%@;S=M_u>&QMKKWnX7{eT#TGT8gEy*vu%|W}O zy=xxbmzb|w{}oVquYfSckBJvh_}N(FRJJoqFa=@PH2ST1K7}?D1tR?uY0; z!2x&^I_!eU;_z13a9R1r5Z-v=vZRRTR4b%!9(p4NJ!My^(WI6PdF9uLp`P!95lZ3a zVvJ|{9c-&N_R;gkbJD@on4tOkmkma+)mtP2l#88st@))r#UtXFAm~uhawd8=LciAm(;M>#q~O&g&r-5)$itvj6O=Tig!31v6#)s zNJEc19OFtmNyT--NFD0HR`iG+~eUvz7U_hVnVtU;PqZ4qDVz2UA^ zHsXi&#CAc$zK4G}S?z|L9`Ywel#mpu@{rXJeppCuj-)7)=FD5Xrj_c&8ky-+! zOx~hpg7_HE^s|aEp`kN9>zzeGR*mFK-ba*$IZiL)s&P}*-pK6zv!dujGZ(N^$*w8P zN_-njU$+w3FC~Z~Do7VipS`ctgU&aetxnh+%kjE{T3!02o!7jUll3kK?bN~6z&Bn9 ze^P2a5KIX5C@QX6OlF(c3{4=;*%^-_ie~wVL|87AQ?;?KS;?-P88V{AaHde6`i{k$h`%8#2Q88AJ8ZY2lybUMbyJMiA_o^{b`4)B;1H7FRy2z?kV zoSX`M-tWCq{$th~S#m^I*9YQxXqvwHreKGzqWoR@My~LMip6TZF5Lpu`wP821szB8 zkfmsns}F2|!|5unCT1DU-RF|cU$*+=>DOh=&}o_;;Zn|7UEKV+`qv7q4ra}6c@4yV zp$c_}grgnwh@=jOI+jRd=RTJ~m+8d{gM8cii4BgGMpYl}Tesuihe(;X1P{TsYzj^l zD_Bj$89NVzuMOtCzjw10f)R=&tzn6-^C{e~BOMe|#=a6)TO`xE6mD~1-@R{~jQ6}^ z`V384`}%$^cM2XGOv{4LCKAwv3kW5Hp@H$laMdcch-=ex)|||+_w(;UeKmc4y>1it zWGt%o7!SWZZZouSc3foi>DfGf-UoL7MK0xcS;uVzj%!@H1^qtGCx6}?RYFqol z1wY333&bRlFoP%|nAGs3U>343;#~fN+3-`LVK-->C4G*VyGipBoDIkD8!ZXpRmTsm z7Z~U^v?)7>ZHBKHhp;q@E0x6g1c{=4;<+Y;h=@53jO)+~*P)@S*J__&FEr)p*mm@U z6DY0NsRKLMpl+Er2=`7Fy@YGLp86@g!$Bguxn#6N3Fhc3E5Q;o8PfnJnbvUFrSmI| ziZ3-JE@a-t3dU{u4C?pyIG@4L6Nh>?3{G-qU4;pV+TVgnb7c=U5Q^|#oD1ycH@s8E zRv~q!W=GKhygn+=J-OTT}8$&S-V1NKfQX6=r|NXenQ{n0&Q)@-7oVDvN6N2E?j0$4&q@!9exqcOZL3KJKJD+L zy$p>@s@~3z$_L*=d1Hn2?%_SaI5>8*q5HCL3E|sMJ-wJzEsYO1e|$*RDPak-*}hFi zJVKUefct6ST53DhJ2*UOrmsbU9>B*=3Ts(oiYJFTH)4;j>R>C<$M3~@!t+TJ9r-bC zry2#LD0k$DP!vQ-!P?L{^{So(wp9I9D=SZkqV3oztF7U9+f? z38xYiLh5b#riX3)Bfk9YNqloBQm#wNC5W`8siv|8LwB{J!?q&tx=Z6@l2s4e;HKgJ z;$Z#2m~V!4c+{DRP)nM`TpCb8*2S6G5mBqz&=}#{sTVKT+YU|X5db#5>Pnt&sM+;$ zP!ky+C9>P7(Xu`Rxy|Vf^_j0sn@uEZJsd11oeXB13f3c8yu_GWAnxw zvr%t}XHiek$D;Gyrc*ChAO_SUqUHs%s1r86w?#*ndJ_g{tsU=UZC%!^TK^vkp|o%EJX0<-NSgLBP-Qiz3V*gXBePf5%U6R z=t4QmF?iVOyPi!XKEn7=0Z*Sm#tYW7!K$-O7hQ(zna%%1^_+z&Mis1UF4*(1#W4#lSGhL+r2K(@ZZ#M4-2R(Mv|X zAG;TusP;b8xO75-7SaVU&?$$KQ}4tih|?ii(o#RpY{VXL*qoCsiF@DDc)dmaeqDzH zKDc>n&@!}h-$_}T?V{lg`4Bf}?-q0dWioMvh-Ag+^Y+hV! z>jL34WhIW+7@O0ot~5@ccq0WX-m+>imls&yL%cR$U-N=>zS`_pwdWfDpbr6^PY^~X zBgNbjk+G~b+IYP$I_XR|hfS(Wx@`c;OK{J7Pbk>_N-N}6(EmiEyrgdn#yEQ)d6;`5 zQcM`2H)cryS!s#mn=04Afhm77VSyRp+AKcXqxKO~x5yCBB(C(X3tOBYfV9s@?ORVt z@0}~&ZNuD}eZ+ChLD@8<1XA{=MN3~oI6`u2?r z!jY1MyIv#B=uTEuug!&@pYsV@q^U+)G!J1|J47{MP_g@e6P9>8;mwT}7^&s}o7m%r zW5r6e3X=L|c8@z)?gv>*i_H?5qkdlST4cQSrJmTkwpNfi# zrh|w3Y`JF)9`z2Ds*58H!0+GS$|iD*UOMXLj}^XJ`8?ewh3690miW~W4@k+EfSy>@ z4`|}AV0O5!Ap-Lh^EDe@UGe|!Fo?py$a%BN`A4SQsS0Su_D~V`yX+CCl`45AMwN*C z%#%2|r$O-Tfv3N^(2G7}34M3x_Ij{Bwch7Wb^Q&AtRaDKk^fIM#Y=g?A0fZoXT~?b za{GOtvkI_sw!?_0@dwTgNU>72LLSk#u2PvWHSD9pANhSiDcCausg`|jZO+kB)g~b? zNwQbx1R79b#3+B6f@X@E;?wBb1owdxBq9$klvEuTJ4-$r!0mg$%jXQdUYc}QU|^sy z^Rdvz6rqN$E{%j1^L*_&7eZ@mYX$ZW42DFRg{I#U6&+o6s~ewvEthNv@3AZf(oRG~ zgs-n}ARS^MtN0FHiF^{OebdmA6U);M?ms{8smitVl{(kj-OYhjQc_~iob9GPdeluU zzP7ejhJ6G_@bvV&$pcaK_VyNLR^bTc3*gr56f7lKW*h2CAD%Xb9l7VVp|*FYW$~W# z4K9*lPUWY~@4>o9>|~N3kBqV6cKI#=Jz>U>l zSS|Mp?;0wwD(UJp!>D{OalYoUF%$N8ZwodxuGPK(jinh}KdMjMztguE*Hw90cXxMg zIz27ezGqK+W`p2u!^73K#GgMOSy^S9)!n>#6C8aezt37#Vk71Bw-`i4#lvO;gp2pD zQwhiSMc{w@_txjE&6Z($c3|BOO|vnimI9 zW`znft*viQJM>sl&$%G(ai|*`8(%-GuLYA%x_|FpP?2l7esXeh);I>^q^=&S3ENz^ zC&saD`XF_k@k$Er7W)xLcE=a-+7mN#^IEG_IPZ-JNQJN`&Rok-=OPC5=u=VzALycG zx6PIDX4Vk-rrGYu>#IpiZhZ^#!xj;k>!!3zx92CPhX*Ll+4>sU~s3RwT>L9t9sy zL6bheTF*F`;cuz{U6t?tTc*#bYyO_x2k5ATBw(M=FUXUCX{qXwn zRm8P{#VXuzp?OPt!tU2xV;;+F3a<2f!@ymQJ1oj!aX=DixGb@JXg)pu*%D>4g8M&T zu-RU_NGrYsr=KWYzs*eCPi{kc#lsk~#MBA__!a`*NWu0{K9Pczo{;;NjP&zeqKwf; zGQ@G8D^t9pdns2({@VDpJ^$l<(h{~bClLDhq|Z(vAs>a>@ApymW9TKRDW`5FpATGu ziwaB~^YEVHNN>Ke3u4G9f8DD(Jp4?qkF&D|N2*#pBO{~Qd~#bx&up=7u32GuYO3ku z$G7!f-O2rkM54_K8&6V-g|O%`aJW{;m4ieZD=R%PrJqSjWk)`*HY|q2;k5o=8%_A$ zDJU>K7z=D>J9~D?O)MZFp#A$I4!5>eFbaN}cp4LVV);@c@|}VVR|^Uy!h9@OAIxfa zi*U*9i)9}_vX8FBsia4HyStYc6ck`w{u%LC^Cj0=Fc+^<%O)Z59)*3e2vA*YZEZ_* zbNlZf2yG0@tzzx^F8sUQfIOu24}X-h<+z(z^>j8;CEd}%fqY>)B(shu7HKp`CVsc; z;{zDio;(9JPHJw={=U8_zBQq0Ieaf9cX)I;V@)vTo?ZcWdUm!_S7c;lz0r;0ikh05 z?(Xg<{8z`Lq8J2ZKI}|rDx$Ad60)PCS9#!a8CnVBq2 z-u+ShF{qU=(hu6C`x$9za(X|erZTeFSXm=3O~LmB>_H6KTJ-hyo;^zKu0%!YTgBTj zh7%BOe2l>3L3rfuNZU(UU%-evGy|_SN>U=%ed7M)?ZpRwWal;JklZ&W85je*^7=D! z6H38)v~FAxC)U0=B)Pqq@PZ}DvD4b=ke-FyI%S2Ef{9!eK9z*r9r}^Udx-`Y7=t{j z470l|1!MGdb?NBn_-4|gLAPNyyyyaIZA)ii*n3=I(+X(6`&T+!ThFal3@zm6=LgQ; zv{kLEUBlt5Bo%x5`WQm$4@pvytMSyL%9mH`Mr~+39&aZctITogwamHW$BR+9$o90{ zTpJ4-&Xf=$%08+jD@#!ip`5+VE#>84(Zt@*a(XmVSFA3=GD z&bQ)1LqlK7+-X|T(n&jL$`{qywCv`kmq)sqmX^kROulZdujF!76k}Q}_FP|54K4U3 z=(CX-ILb_Y)E+DqVJr*%#Y&6;dz8G*&CQ#7(d&$?A(stQ!OCZxul>f)a(_b0<+_|{ zc#e=U!FnLH-P+FXQ*p5XW00b|nVB5T3Cb*YHGd@Il|Kc&aylzB^G1|1Po$NT)5GbB z|6`5lv!+wMhg^MeT};e6UmHWOqakwkXm?jvVDc1OBWLF}VOtK|~3`6{`;i2E_F`|yM>M|g#qgFP3@FXz|w_I?<$O^Awmz&NwIu~0Fa7ifFc zXvU$O`oIBtdU|-y&`IIbhdCn+TpRTus&1RZ?JqE?u!YbkA_UO^MR5by0EF+Wdil*l6KdmQx%0?Lm z#$#ke1V&)lO^nEB!xuGJ<%#Q1mCS`JBdfDvg5ds5&+PU_Wb*TyZFLx-mJ=0P-PKLq zy}f0Xl{r^wS;d6;`NL56R@n%MX^lUN;o%=LOdXILKW1iTz_gTMf2YimdM^OuTxYM=#%Kw{NM1@x z0j5Drwq@r>huqKP=H}v?p777$2=gw8Mw6_Jj3tjMb#--cn{-wQPe#5(-;-^ShR}`s+7-)fD-!{#-l4;wp~(y0HE+`RaLZlQrcuWqG1Zco&)Jba z$_YE27Qxr1t}1Tr0mn=lmoPn#Kuh*?*VjG*pJM1+A<}0*i9Lr_lJc8~`bWXUxL!fx zW3Q&dM%H(E{sTuCa>=lYNUtY7+`fdN$~QH^Q@mQR(f~FeOQTIKAJx^NmMwH{g6F`! zQ0Ug`q@>Fo1YT*q=EG$M0L7>5agnd%KDz6 zJ!F)*7q4gO1zVDs{|F~J@Qxf%;iDk^;KP{Gf7%uZgoc>4H2sB3o1gDelVLxg2bj`M zauebFS=W1xtiEJBdW2FTdAAA0)c@tR7XIJ4)l9tK3UT>|QeVl}(X6-kf4sMi&dlUY z@6gfI{02Eb_^mGaccpi&?|pY-S&;!Z*S9*(3o9>kT|y!wHpR~p1srzAYEat|oz7OH z{IFpqy`C34V1_0ODJwp=4QT)ydNVTQ*dopo*=#K+Bp`54Rn-a$#`e(A(6|+lw~Olz zSDl-g#mx`UAEoYEnVg)=&dvt2PlD?VojHxx6o}25O$jZ$nl5oha==;GTR{JyE zCq>awE4m+d5K2MsE>Fqk(9+WKmPp8Hwk@Tlp&563Kw10}k*Uc@_Go}U8ckLJ=0Uh3 zCmaFdWeqX62r4NNjQXDYKI^)GKv!#PA|nQad8IQ^-u~>_9-Yk2%O?*Z(ddBzu5h-^ zp9kve>i9$HiqDvb7xWgc<-}7KEbiv8K0pUDY!+-H1XtQSt(0Nc0BazR!&1Ax!B=iK z#O9mw&i#5K7*kyEb>u@#Oh=R2r-6C*nZ@v8c#TD*1*rB0JdqqZ8#$fRPI6$Wssx2j zM~Bn5vGK0GDd$mI;DB^KxjNX^b{;EY7|C4%mNlFpxYg{gMk^Uu0Dbkby=3ADu(7Kn zPP$Rve7|YNMvUl+?;~hMDkj!ua&dSEm+6iN{ zMU$qt%W*VF$YO~0P2~o}r1hTAP}s1tSwd`VXOT83tM<)-Y}6|%V*s+q561n6DoGstKXVf6-gIPR5m%9Uc z!`Y0Ej*cD~8Nt-{TMORfuT26AXQz?teE+3mSE;$NadB~+@Eg^Bm35e&_bZ@G51*tW z--#4VNfV}}sp;YAdCdxRamF&@SDaemf^R!8JS|4q1dG&APls$v8%->&tslC+1$C{m z&$`-cqabV?F2*G=9YEuI$Ea>4r+J)_g+(JEE(_@~B)o2g82jxojUG2P)%toYPC|N@11JIL^vj_<-g`|4 zXsp{zVHd9Nyt+zXXJ;h}aKB`ubj^jCMZsZO)1d^<-upMfZ?1jZ#)F_X;R8r35mXep z0;3_dRkj6hwA!vbNma#6`?PwKshit+<>*9jxHFDVyyDu!hSd1 zk=cxltSmLj(}M#8gnlJ&qUq4TpxS+;Y;3#rqeq`dm$`X(6mKHY@QAevmG1zUR$iha zgG;c)2BLyq0lvBozBu1U=T<4$$;!&snW?1TJo);`IQXxrNs-)6=)JVt!0`Sc9wCyJ zo(@E1Sq&>IE5Lf?;57bUQ7DvVwgy7YiZ6;!P^mmIJ30xuGtu|gcXoz-9ch><{tM=3 znRmoke0}cd%>IH6OiQX(G|RWiRd5swyj5XeQPaxGiZ*vzzd{uw+hA8$sOae?|@M*RNmc0%R@nI>8M6pv)23EO6^q z){J_z1(~p?nu2M@&9EcI5UX_m(}BD=>D#cr6T70%6fgA9|IhbQ_EWqUezifq-r0um z*FTbMNp|>uBPV!yj&i)>{K28PA4d7wP$;}(8qf^Ty6x+j=?-@ucRkH#6p<TY+ua^4XV9# zAcaLm4|Q}hds^*;D?M=wPC~f=*GIV~D-Hhw*pb{pqce*Wa^Gg<`>uXBb8IT~r)6V3 zdsZRyA?jgOonJ&{xJDxKQ~1tsugyiqAlMX5O~>2Y-&b?6hNMH10p~)vF8Tls5*t3< zgpI`}^!w>df%TN3Si3|yUEU9+1MF8;C;4rUc+45-b^F{l>z#us^P zBeW^rrHGvljR{wZ%o~Jl@ouHrkv2CXBIueKg$C`U>kz9~b@lYH;cZVk7jXmgE}@MN zA3PYrdZeeN2^NiA%iBxbKYVgqTg)I$oIL?sn>&-PF0-n5<(rQTOzHTa(k~PTPH~hL z;@*Ss;jCU+$E5eYrpyW{&Y8u&Lak%WWom{#<3h02CZ=J#TBCA4kw#V-ib|-c1nU|f zF0CAYgIepCMGGMraNF{sq6(CL2QsGEOPWl(+ ztNVV#llx8h?6T2dE)9)A9R2SF z{IAzUVY1*b1%G@V5ELNho_#>pVDZht+w_{O$?Q^X^7~62rlxYef9B`nET*(q{KL|| zihUFU)BI{(4czZ!+O!gn8ZF1>{jBX;w-*#(vJ8wrBAbc?h(;oz#VEgbel02Ke3d;Q zz@W^!XB8ISWDT*B$WnmF@-t#Rs(`3wEm&Gq#LdlZQlu>qWz(g59#^q*Gp;xBOb7qz z@9+O+3wVKyVYtU)3ULs>g#WQ3| z*NR4EpFJH+nt_u5h#_`C1l)xDd1l|~a9qB8>h(*okXmxGf~zZb5M$-t$OgX)bbn95 z$qo-s`;(~m?t%9(L6WPg3>YT=l2k3vK`E+hbJ^h-RN6KD%5osUT(bl+O@ZN!N12Or zbIJ^a?A-p*Pod8oxLIa*Af}dO-VgW~6`nz0Mz{exW-~5nUVI z3qFRj&>J<2xNME|+5aA1uWAvTK4+ar{O&F&7CH)= zO94i}<9{u$k2gn(vxbCC-`%B6M^bZ;K(Pg)n`JScBD#5;1U&;ZG}9dsa90f6eeHNG zBZfz$deN`8u$=O_qOlETgy2R%wf#go>2rNzhdVrYz>Mem=swpAs<d=#5GZq$>d~N0U_u+30XD`E%B#+#_-||f#(FX<=s%Qh`JCW#Vp){}@ z5N8T3rFYvDNDAnlX(-tRaCCG$=6}K4(7yZ4o5Nj({QRR=)uK7M!tr5uu^ml7%Fo>v z3;B{s6Hn66XF+fB@$pGYs_q^OE)e*Nmz~(9diBO{B*FADF^ES2nqXdN9cl`{TtBzL z9|Q0m6iP3WmzxWK-!9drZ$*2-Nw?yDG&VNMu%9>G$fnmM&bbRV3vw1_k9-m7pX^h< zAs`UG%qIwXm8>j~9Oj^2G{jyY7o^p-xi~wUuJ((?n8q+OGh5r(1j!|<#RI8A+MlBF ze6UQ&)oC(HH9TtA0qMKW4)g{xcLNO!R;HqutLsD(aBp%YC?jGmwI0a?%oF+K9=veG`dtpLh( zLKghhxbar`+Ecf7hivnM$ES(=|NILq=Of#e6>&V{$^&B6e91unz(A=;A~mbpjfY$T# zn4WP5R7*>CaYv)*I3Q$v%l5T5`xgY>vE7Cb_c^-hy zWBy-F_)PV*@cLxKYfaPzFa@Bl)GG~?LKE;bBDX6(R#fPs$YpzqH!Pa;MseoyzGqS( ztgwy-z@jY8p{crBBBZ{s$dCW+-P~&M59VW+L&?Kn^$lbq6RDx0(I4@uc5Q9VELMqw zmRUH_gulg^=IGIeGytC{pqMUxITZ-PuYI%KJv}_W1vZlBJ|6M->cZt+e}?%CC}N=d@HKF}QVNjnfsBDL&MQqFLqpbp+5*%E z8v{;9fK>r=v`iBPFxKy&xu?aKZ{J=j#|zuD`+J@Gy)e8{YembjKQ0{rRHwM{_X1&$ z0tG1OpJ`3KmA8W9_#p& zG*|R>hx0|6eYP#t(-jmH%(5EjGmN&I>EyK8e8E0ocFO1H#eHPO3q@WeWMKk2-`AVz ze|=+rr#F~I7SNcF(d$A|EBWC)F%GMvxz>We{SJCXkS3X$oeiiTjpT)N(>h$C;j1oM zUbZJzwSYkc0FUwXBCroYKkMd7B&ZTAWi8&4+PUchn#VoX`pg^2ax?x2b=QssbTk(^ zKR*v7zEpp*s!2p0zVJ)wE(|otybGZlb_{iHSGLVS4$`KEVE|UHI>GApbI+|Pva>{{ zbPf;C-hsWSc~lpZyk}d~r6QMv+e9E6@><$#*lwdsY@!Uthm519nn?I(q*wUQbiA#5Rg4fqb?2BXVsJ zpS3%GiHRxT?}lAT(&UI;78by*+Q=sc-`FFOiYl@8l&Wf)PC}nMdQ$ z3R&3E4bUgYiV37@3*Jai&@5yms}wsP$>Rf?dIqbyuBt`(A!w`rmj36Yf(sP2grcDC z(EtD2`=`HJ|Nnt+-;-VF0>0aA7 z?nM7a1NL7B@_ce_*1s2n`%ORn`B-)|25^qkv83*v9{;Z+CVWu}7x$7yIa^}5AaHDy ztQ5|B&jEp#vI>EFI;;i5zWPV&;lr_RG*H47E`AK3oR|RohGI@!0b1;Odn@{PsXTFn zBb2k0l&9CCg{Cyo)Amlt6`H?mo>pVF8$do}*C#d`3GS5aflq5+%XZt7TXZ~Z|=#ZVKf7|=Ox`C?vwr5n_S-n|A0$vvO`p27QxraP>QtReqtvH$%ZBu&U(4crKSgivkq_h{Tvd6QmZv}9KQ zV;@WWOH=~J;6uW@cPIS6KIV&Z5Mp9s0oI!u1OkCA43-1qAv&5d${GlYKxpd+ILRjl z#bE`&bYT5xf8`ryRJ#zz>B9p2{0dCKe#*XZ9W)O4${8@F+m7Hrd-voT#tQ7N!Qn=A zS;S}i4un0Oc+%b0b|Rp5jI-}zn-y^k+XI0MFzwpUd0*e^hwZ87WPFm(~o7nTQy z;RK5*pJ5%x?$1EYTI8(^82U8D5G z+Ip8qS+`W6)abad&d$!jtV>E7$vWn9Zu98^71?c0dWDS)zO;Li*{}Z@qW2jouu~f} zpA@X-%8sS|JRgHA8L+j2LeUfJd}(BK2Dp+G(Ey|bva;G2=>4{dcFY;MZ;^hacro{a zjno9C8kZ1PMz!t6f_2mX>xm$_;)2s* zI*L_zkJ!_5E}r<0GRj<6udZ}LD0Jx~aw+x!tk&q}k9cE)zt!2ML0+4dc=6HEQMC_o zapAdYk^}(wLyx{aa0`(Co*Ht=gp>~Cy0$v)L21KNK%PTLNJuYN(`NmdpyGKpB3U&D ztV#%&gn(6*_f#i2hgZcjql&&vccjnG%%uEW-T2mUNb)QLMNfcS@g~ksO|{bcbV1mI zWZ3c9$eH(9Kws#Xj*X2aQ_KitpOeS|aI*zVUcdo$zI#*PA05?_NDh7h0TvMl8Hqy| zL+=^^`;T0UXBS`4O7wPjr*ZsAZ{;yRU=l`vhR1^7QxIj|9E5@)FLw!*@de;0rhDh{ zTu0ijZhP=0428K|rg@avq=|AeH#6%zK4Q&#@56@=ZyUj;sIj+D%J@w>jS+gM02H6T zJ{=neAWiwnqw{06WDpju8$Okl9oFKQ|( zZt3`dVaB)#JbLct78f0%feN*W_M^S?E=tXHb#l%&q612&RN`)>2Ep!|3|McUybP-q zS{sP;Ik)uW12NRsC}3N#6j@lJwS6G%#H?dhQpspo}M z=dg|4XEgD$06mQNOgQ0losW#p5|J?=DPl+jn146?m6=d_g{B=6U29}tuHPa-;Qm7i zuV;`6VgU^q=ZU^gpn?9Yi^0iM+b?3e1Su4Y%(+M#B{Fc08~(E{|9Ee z>_E!TW%wh0?dV6>KP*o?0rv-1fv`kUozacR=2{-|zcKVjlaN~73n%J!{<$q7?;}g) zG^t3C^f(HN(6-#C3*^2xNf=izuUL5XiQT-^mHotPTwHpk--B9qh%m5sW#3IN0=HSr zb&Gx+M7$yss&)=S28XpN$jBc)gjFcwF5`FqFEQFsdHMR**5gFg9bvwR%zu}#xGp70 zpU3M-^;Z33obH>=>6fRacbW8&OBv*Yhz{B}U+9+nu-=6eCu2dQA;QX-&aLC%&OoK( zB#O7=aIU4!k}EM=8*Pvw`pN~5%y-C;u&wh8WZq0^4KpvX)zTMeVuPg8ZGsTpcy1&G z)B8SeMeEUeuYdEr#ac3_u|OE}BoSoB3FVaH3dsKmpZ1;^I3qz6_xV&3dIJgqbxQku z01W;7bhk(@c4^$`)gg9SY8vWn;-5VOSoeDz+aLKyc6t_WwD9b*3K7o+#aDwE$WFLO zgR}1O+(aC-GLw7Gz?wt^P8QNr)y`5-15Mpnrg60#qiTa{)h&Qrk1} z+t`HcECRj2z~rXj@x9NaOifK?WosK6UI_*~_x^{{5p$nNni?7w0!NM<0o7S|d{^EA zbqd6jy10B%-9MK`zJh#<8nU9|Dr+nc3j7ZA;9Lr+LO<`_yLTHK?%t^#(}6PVx2e`zk!xE~p(19zKXR0r}6IZ`n6>(@vARRR$VkzI*~lBzRbdZBG5E9<+`yhBBpkJVRTjS?&$ zsPVRBnYkdmr3=lO2i_LAA#KKURi0-7m8z{A5s@D}Bmz-}%0 zhp2C~tFTbNxV-{~09hqPJR^|I5cvyq(6Z(h4e&>DE{_+yNWG(27zVIwbj^MIAqI`mEq*LCO|! zE`pmWF}ybB>&U0??-eeZ1brMT*NQeC1^g6*-65e2-9~(Yv-N~O%M!p8N5{f0%PzlLz@(tbbs7q~xXTzs zlgSx6N*Kkx)B)=Xw&8vHbYDvg2R3#YpubN$si>%MoIC^qBF^;j#-pzHL8&{tdJ#Bv z6jkaNk>;Ph{b$i`VhfZ(N});DczHu=jr4lZAYK<75|VD60@%8s>R1_Ym;`>5v@ zkoInmesab!YH_o5V8BWzIX&GRM5Q@L!GK-Q=p+z*G$a8)6JS`}{B)j(a+~!37hlTA zYWY77#Z_`4U#`Ej}wN3#hx;u+E0| z6uDgSQMr@91=_>Ovv9W*eB1~C%Hs&QlAcZThD)y3(9SR^Y_ujx<82yB-uvjxf-A=N`XD1xpO|vk~BnG`|5RUvm*iZ-L z`M`X)6&}Q@oPd(d*?YkavkO#>d#q(C@OjsYQoQtT44RLn_I6@z6m0S3-#B+paIU7$^Zg$5fQ&KXTHpD zZ;mkyGRgpPdi;s1bd|XFI!Tj@yM#;Q!UC7oX?xWkv^5hs)D5IZU6?DKz0R_+z%-1kSp8ZKkFWJD|74%;@X~*r%`ykRT|2)VqN??*`%k_B*`}8UCkCr&m z((qck%z!_h?;fHz3jmM)$5`5ZBHe(-Vq|25uJ)$xBun~0Uy;I=D*I=ZgJv$`Dg!nX z*nN;?Pr)#S+T7dd)%y&eQXL!DkFtneVa|kaN3^@Dblkt&=c^ag*%lLh)DE7SXV! zpEV%V&r>sPC1UsU>H9W3-ou7R$L-wl`;sBCd|D+oZ*@TRH?aU4$Ue69>jTV+M;yd~ z5GKPZvONZ}yy%BFx@^C3y$Z>KTy(h=N8T*L%gYPGd};7sV8>GFphJK{I#?UmK-0XB zwzfI}W^Y_UJ7xBo4p8q39-aE0YjX^2Y{-JO0SAzZcXkec3r>)T)H(ttC!Ay!3uL+e zL|^9bC=?chnL{Hb6N+C_l9BHw!hF+P!tI#~u5E%46=-|4r429OD(;pL*ZPM?->AUA zW);DUuF@F+_2oV6M?eB{TchxpRI!M#XTtnz8kP+$1ipCxhZOmaU`i>nTy8e<#fuj} zB8fKr0pgD%D$Q3A-+^@AAw z!h)@Hz93Hb4rPzo_i}us2chJeqvGPu zNNR=}!a<<(`R>R!bSB$9hDv}H{E04}ezVH2=YPn|kN zSexg%LZqR70W^?Hg%_w4v3~&zff$sC&+#%!W-Qfk$z+r&+=r7Z;VwLv>C?`AfeAM6=OL=td|l5R_nz|P6Y zpMW_4>$$A#!|vRALo&0nTVEOhL9$GUfx#JCmB;2>6yN9ch?K1VaVV_M8<>TTx})kc#)+blT&S-kD+j2#@*w={vkk$^@P zzXOi67V2A!`UJ>^QCym8JoP{_(nCfXxY=Ng!j&I}Fa{;cxC~K?V1!KDtp&|vTz!11 z%c#K+*-fq_bz+2SW#UvO0dP3>4%L0G1&Df0_Dx;r&TDLJym|0ya$n+)_-0cYzl>Al(nPwAYsf#h$$GC zbsfD44hyQJ9To=$JMcb1-pYhegfiTT`wdR$11(!sKu{3KZcfsu-k(kVLIZyh*pJ9n zX%JB{4FZZP_A4DHzPZH^91vXSKqhj(%C`I?O3s|E&oBX7G~mQ-U?AZ{)Uar_6Fg%)g6%rBrY6^Y5y{YPUn%T z!3Iu89P9zf;oZ~S$?NbitbkPQX1?^Wi%sdp#{CasOGdx9hbgMVVE8YCeUgNP+s%ga z2S!Na9idv#0U+B^#DcIgvK00fYzh~B5rQPP)WgPcDJ!r2e#mdZCB58NxLFi-g8)e> z7~C3?kboopIKV0hfg3D@*GF!Zx&G=IhS3b}-lJVER%moI3dwR>DEzruhE}H61npJ? z%eDriul`#wQyW_8)wBJOF$kYtya2T|8zZX*z)F6O&#ogGddbiHL8kk@?T5Bj1~-p<9lf-n9sKPe>tz?RqrPyR zvPDX%#t*`OV_-fZfVlkYGAp}VZ0~PM$$-Hf`0A?fWm##$N`8U>nc{AfO&zRO*fKrR zEzg7%JVGoOsApsi?IZFLH<4)Ki38hMfo5}QxJ4YN&(CP;x^L&>DQM%&;ue1OMJd_` z@r?kYIq#R~4c(p%(E#RQXhpHvsLD*FyPZ!};w? zr;B*A2XEEO@{D9iLnNjnH@0l6u$ni14nUE_Vb4VXmbpd7z8wQ8LCP|5A5#jmCI2rxqi+e60dgjxOR7mN zG&f)F-0=*sD>)g?_T^x`D4{zEF0fDuUg5 zgQZaPqy$ccGvStVR(ZLIyz$7t)H-Qc$86!XLO0Gjcy$$iM##3j7NWi?Shx{%=FGU^ zA^|+8q$B=4vG@?z(4hVlFL~R}{crdq6qV(#fO~EwV6E!vj-UxQxW)&A!1@AwXSJKr z2_!Q;4t*t}xZO`x?`laxTgaX0!;<(Z7Qz~lhb$#fq~3@sR# ztdJ@ZmA}Uhh3T>O83j*vu$BwxO70qTH~&21mJObC7Ucz=oKiWA)61pPxuev)2&r`A z7}Z}{pELJoVnUnd>zkJ%Q;}P6tW`N-;j67{u^9G;YlQ?hQ_m!Vjbzt8y-7plAc9AR z^;#qGp$W0%WN<6w>9uKK!#t#S#l}j)hQ}S^z#cJD78jzA(YzAR#U8UA^K7vPla+5o zcLqv+{t9?l%cLWV7RNyUa5{fcHHig6oH?}ou&qPt^W-W~r>;ES&BzHn(F{OXg(i6A zN=kj<;`XDfeAd~%c_3Hg62p+VD*4nL6qlwq+PgICl0rEG5l430r+7bWVqGa4A~gu_ zdckhoWqM9982I`5Uw(be8L$VWIdk*!%&fe|(e3 z<+uXqVB=i4tn`!|6rmU%XKn2jnT1AfiL=*Jrl?-o_J+Ar_e%%BhQ95bUMEQOgz0kUdp1$LBth>xd(Mnv#LDn%q>Tq(2&Yza2kGkd3&g2cNl|B7nynwSF5 z=z0X6b~olkk}H8%rz#tkE%|nLgMBfSSHSvH2zb4 z#J-mJM(`*kJOMnd2QaxRwYM4y48%*6J$&OCV8$*%&JfQWDAvwXpNIwznQ9?F(5@2z zM*;9CBgwZ~TlXW~iv7c|*~gE-NE`K?Hdk7=F9B2pV9Ra^PA!uDAJX1Cp6dVaAJ^8T zMRwZRl~HCX4Jwj7D-p7iy-vfZBuOHQBw5*U>{E(DlAU>yY}v;+oa6U+zRo$N&vpH- z>wEit|9RhTs`qiu>paKfabFM0ug|M+_s(*)iTH4bX}Vq)X{I5txGc@)l1Fj>kl-f{)Ava z6sMCONKv3|N`w`vOykesN=~%2%zw7b3Oj9UCynd;25xdl(MymyaHpc**5h^5Ic|J( z2mUg6*G5r_8~nD=>{UTl*3+q*V4Z5?tW!v$@UqBysi^1Qt1Q9sPl9os^^m*9d##r*E48 zf;tXf0Vr|cgat;ct$YdXCrVow)mJeZ8fYPEA%xq#nHJEb!{}8IN1PXqK^HR7Io-72tWaQ-9+S^5Jz>ER-S4Ll|*Zx23 zb^L9~ZQD8x{);5CM|cr{CghgUcxOL;#Yak^t5H z9JJjAFZ1;D*b{U?(=%UMQVH@=wl+3rYmr!3lje)FA`4d5*>$9(h>)zx4d{JgALAQAt^meDeW52-p^mY- zkS0=g*&d4%>2G?RE4~DHlA4V4^mA?BFVgtyjN@3-w>N&02DogJSVpbKKk|TZC3F$v%=X-!BnA?TA>Z@qZ?T$^N{*#x4qG`=MZo;Uo~~VbXl}(s&v~JS51j*zZM~vKxYs?7M=5hrg`V@HQeWr!=4)-|jR}-P z*FJ^M&-6DUDj5L~F-ZhaSTtEpR!R{8u~YZDvO3jZpTg`Kr;qlQM(aj9Q)X@DrY=eR z=JT5F@Z)tecX63sZ9H9!YnCc<=e#ALk-RT>)lmTN3ZoXG^sMV|DQD z%dnm6yG^s&z7Zds5UHFjuo@K zn>XJ+3OdcfnQ?6eb}gUSPf-$MTW`5^mag4blBAvL3I*9;^r1&Z zYP4QaZcYPk01}bod}}cD5a>@al%nn}bu22fH`SVdj&|={I6>)p>qoWUI+#1$pz!fF zErSC>T^?VEVco!jE(5G1JP%|i{Y2;$jq`R}({U8OrI8OT#)WPn&oG{v^qTMm>=d|C3N*Uv6aI$wsO1R)jQjNi$^e`pJDSx!DHtXh6M z_ZmWGBW%CQ0x?R}-#b4Fk0vklB0lG-?MU47BUp)1e9z=u?Ce62w-`bBw@RrcE` z=oOm6GOjk2{PRCuoXLe)n;rQ|fEw^W2G0+8-plW^v$0)D4@$HQe$4@Jezp`R+vYQ06bY39 z+2m`BmyuvfR2NgZvXvEVoPhBHdYWkO;nCXXpf&atK%;$)8LD0<<7%VTPP)}PQXqlt zc;Q4V+;anfWtIN&tI2xYwq>?gbcqbINQ39-`z&jFf$RBA!BQI$#}0r=ZME@dC;LDv)vR13{Me7yO z;(qslTrsXD#i+G#lq}&^a<>0}Eax@OUr_SU$1Zz|@YhA!c`ivG&;3r7C;TfQJ@$cHb(;#ih!s1j2 zK`$NW}(kFLz>f1&Who$|= z$g(V>bRA9`uJ;4p7ug}hcWt}sm7*vR0x3I=2f6c7R8ODYLIOD&xVc;4JrNB=LQn2^ zrVn2A3SV8@=>0zqZ{3Py!_UIQ6Aw|^Tqq5dsj^RJt2vvNmrB!?(c$Tp(ZV`sD=Vs9 zU#1TcY1X^~gYr2b{DEd#(BpTFtK z)Un%}blR*t_Y?P2*+ue`9__eie7Ow=)ExGO(JluqvF*I$X|N2BGY`DG(RHO?Za1Ne z8b5PMY>A9m!y@wPuodsVDt=b2q?|6CdLN z18u6FKi)S*%?1$We{MI8TZ}<8u1x$pyd{Q<&^?ZCaf>o1hY)6d@xAewvYp>MA6Rez zed*nc4l@q~Xdma(WcSQ%T@6>a$i?Hb-52$9NCB)3-2J(P!1<4qhER77>~_tc&D?r{ zH(QR?`ynf@grpIHRQ|njz9Y*tW9@xMw9($l-Wb`ah-FUwc?{gfTUii%BfmLp1*@0h zNhEXlu8n)TKG5|$n~mpoZ_LTTLt+#_sKkJSLy!7f0&|4FSZOy-%)wN|W!l09$Kc|HeAHVFipC2I-n?QMCwS;1NVFegS z;57fTgc(Qi`vzxSXa1jK;{q=n8-=`zI%QX@jW4|EJOh0x`lfq_y?pHK?7sbW0w4fX z5SpMmfo_`<|L|6UxcJ1+5`XYY_Bp_%s0mz40_38^;6cFxw~CeIRU@UQbzG-6mD!vt z+#~`r$$`Ar7SU>Zo}c9k0B0V^e5_Q(IfeE1V((mmT!B?hb^|Sues)j^0%HW>1>hR` z`99Gea(}qjrP@~4)@~PFyEY{Bc%h|&loZ6D1gx8#orSyjUc;s8i!D-G2On} z=hpPpXy5N6|P51^7U(_+RV`q`AGSJ!GBbaK4acyIinq9!Xz@H|yg zDs3))p=h2~Rjn}QbNv;RaW%a+&1x2OFksHICOJDeK%jt3aM?-Mix>IX{DU?!-5}Tf zsWv9JdvA+*8KDF>-Hr`kg&{d6Ah+P>bGRD&PpuwxX_fut4Dnc%xIO+#ex@yfz}c&e z@9@hYC#^Zi$_#|p*RN;Cm+YZE(GcpGETqywnZT#aEF4JyHvj}s+}(q|**|M;U^qlj z`%9Jl5)440Z3wCx^Tnp&Jx`Pk8^IU9Z0r=K0)jGjoT*rq852cLOr`wYlo%lCREKSm zAx&oCAu}kUCSdQt6Xi!h^$phxyoZ_p7IWX`>Ngo@{6ZRACY}{L5S|gd$Jx0%q+)$3Y-g zX52-f$%G8I`3SrCm)0k-gwnN z^&F9fA|aA-@4jWYRzl%5AIr~_&c${8=f}H( z+9@YHx+^2{QX&Z6Ppy`i@L)?fZmD8U097kdTSqGC?PCxV4LV%AUKm}XM5>Y(^?EL{@Zdxa)o0AH=36>b8AamC)hOfHw~Ap?+Odm?@rQ z-W+uDvYT7D3vG2U@xOiV4jX%>=?ddqO|yYKySx;_Nb%~+u$oX8fY#Ds9`x1jV?o8^ zijBp}Q-wSeI<4P;o=IN^5gt?_W@uAbmhvBCKpEeR&#v?yxlwSv%G$AI-9T9&?bbj& z*|Oq5XolYP@L<;?bTi+l3fFb~`0=9UH{{_c?+RAZ)9t&uS3N;PY>R{R^P7h+*xRd9 z%HEkPU=7AAU2IVv-<`27s;bBC;Jxh2aPhPGpK=GlKvL4oA*2ivSnpJH)rkLwW3zwY zu}Y&(HV3-QZ@7K?yW?fOp?SD;57sE;kG$9Hw*}dlUeF%1Z9#ilTrOYU!M#qUYQ{sJ zH!vdNxs3DIw2p1-wTX^GC*Sv`fjh;>jCJ}1t2M4X>qe3(0QZDy|p!7lP$S!tPyN)Y4U-6Y9 z=QJ2L0qK=%!ApFO&%flcxIfiRmRA#UBy%z{E=|ZvOE8N zeh>EDhBq2V`rSgy$(dGIp!3g88S03K^gem~$K%7aV-z0o-yh=FQcUTE&hI;Zb}jjp zE}U3=YOm-8UO2Jodj*-wu`C^mH$@FewA!uO0vArr@)RCLuSM!@mTi zw~Yf5xN|VmYC*&N5)lx?LvXs2uR_UXtuQe~J1>0{kFc@(OAusT$6i@ViixqV^J~QS z_VopYCK`F3Nk3=%{WoaH-x~jIj9*}IAh7?AY)b)pS#gVX=qxTkCG%y0lBmR)^QKW% z_?(>ha&=Y}EmWL4p;8^7mf)7tZDJ3l_>u4wf>b(w62fn0oUiZLGGm-Gwh&ILkfXRRA_;j5$ zbnw4T4ud05V<7GAY(vPJ4kZQ=^J^DW>ws$}y6yegdF@QCEU#*HFyF@;V8Z#((TkPt zigc6`&bfnkSlf%pW3_xPYMerG-L72N4LHHxe*nF*qsd$Zj`aS{+$k>``MFKQaJnJ9 zMUlsMWU@YVzQQ&@mRgjuyO1#XJzrJ!bz7+Gn8NsSdS693*RviPlPU4$ zZ!d;Da+0OJWhY7h)gAQh$ID@b89zUiXsD!ESdPQ9KzoY`{oR7i6Tr$SNMIkj+Wp_} z!*%=SfIm=hkDrV1eZ4|pJC0HoHCG1Gji@T;se%5%K@X;NPihsoex11?{q^>v!^U_kz&2C{{dUg@h;Id)d{A{%`>YL&(QZ^s)$V@r6 zT*$Ti&XRYLBkz{5ATSOD5bP99ZS?^oT;)7L*VBD~-0v>)D^qE6i*6;Ax9c}?8ANpF zrO0qUDC{rL8cWNkBsLZb)(}~6(;q;dDq1*E11a(3QZCy!sj1iXj^Lb^&K7crqoNA(6Tvgn!z7uI`2+vJPj@wvRLj*~a ztw$#pywOYV&>P~iEqKqd7I=ng<9Hl}(y2f-fqXR!JxJYrTq!I_IYkoNw{P@4ix)IP z<;Hw&z0scaL{Q7**BPfa4uG6dOkQ4uG^Sr}gU`>+ZE7{*Cn4-td3Il@X%f^!5K&gJ zgl~TOQ-#s_H+8RT@<5ylm&bK~(BA5{x^c0wvx8Jrc|IvmeLi+Lkc336?p)6-vu`8{ zlKeR+?#0bJ>c!W9jgJ+{`ez$Fpw2Y3Yv z{pv3tZ3n1LYhWrn+*8Ofv8K7A=mVQy=`QkYG>d0%&QISzmxZhcg2V+pW z8+o;33SnE8I&~sWLAa~RYcf+^QoQk}YN8(uGypL>VYHMhYWm~uUtLs=kqS}5n+6vt zatrFRz=8;oD4Tvci-ZO%?%1#oADa22xcJtlW1-3QDCR3;0xfajUB!y(e!XiYfU}5 zF!}Ay23w`rtk5(5CDG}3spSeQkKthQ$vM}kD9_0zZDY@n1DwEBy(Fmhkdw@gQh%&a zcsK^@_2^!e%50pvT)|kmV1HKtm(;JWs`1i8PX8j%$|)p;^v5o;}C^p zYGeHNc`PNgNTFdCtU_;nowKsDsnp^5q8yDTsfJ4tm6fkHo~iv6#aGia+G#2;8Zx;v zM7fNhdATWLI6B=W){HGE-o64~U#tA7TAFK2{^SAp*2_JG@{MD!rcSKWmKI0bR`Y$b zE!3b1ZTiG}yAm{p8{%W*QVCiHz`lc_8OO~!}c&q&TSw^ zRCEbtN2_xU$XBd-T2QFIX9N3HO4gHH%VxqvjSAn@d8ICW5PLaq58BUKRsmrx1X!dA z{UmLkT{F9%l*sFe%RmWiDEX@p{G|=U)*-VxP6XvfqduM#2lK(}16(M|i@aK*)UZFb zswWsd`42TJ1#CF!uq1*gVJ@SisI)8Ha9|iD=!qOWJRzZ|?Sv@0nxP483i~*~h$WI5 zmgf^xI|-fV6CTeHI>E?M@YoPWz7<`x;ale?yi1|AkSb7z%i@~POdTu_+pBL8;ZKHw z4dOP{YtNP!X_w+Elf1rc6sx;PCN~m{62nT(G%U^5-xa_YOb`#Wd@W1a#Z_N`SJGGS z(m!YMJETUv@7woZ@2{X;(<}P@EKXr6x9ZePW)$VkV+Bf|YQ*koy}47Qk-MrMKfxY9 zUmfT*0Tef+!;ShIx9U%U9lb4U_KBQpzJZA2UA=M$^;3Tm&eQiI_gSSGBzb-6Om|VV zjFn`=*K$aioVo}=X1)JVn9M#R87%4AP4j0-jnl+&@CVs~<^uuJ28JiZZQcsxSH=!# zHnN)aOBwY8{OR})k9=*P{%Q4|o+2&0G#CysMRtQ`Xp8ctL z$!DIQSBaWa>r*Wa=JV=c_d4>GSYsB|>Kr%j=`T{pQRle^>?F$^!yXK>b&t=0CASq! zG9G4udV0z%0cYf8=#S(iZaC7@(|#mn*fDc=`8lw)Q>KWx-_2Pu1*m87@`^m_R9ADU z<>+nLdG1TFOCUDe##5%8>@GExw+P|okhv+RC$Jp#PRA&Hsd>I{`KqQ0Bi!3CMheyq z!mF!|^LtbG{|GKkOw$R?)B) zjK0vIp)}}5MKloBpCC60#itnpLmnQ+Swq>zbAwuL^GF;a@$i83RcN0Iapb5fK(7O7 zFUo28)c>H3oMaYO!2(kfd#;xLgHZ_QsjpeL4g%oee7wBhqU~Ua^0M1U64oqT3|Z-H zAx}^5+Hg0=K&0t@ZJa9f-<%PKo-zQu zx+@%AIaaY8P4{2F-eofWF)={l`N3;H%R>a!b6m$?Pi$Xx7B!(1A3d;T(!Z16AR={W zaA_Lp1c=IVVrmF_MTqQ!*$Ma7g1$9YeXoBxP!FICfMdEa?@FZWYY1^#1xjLB(k%pu zkXL8LVT(<_5a2ngh_LX}R4QktnJ_n=lagGg3GJjQ7Z-@#%Ov$RlvyA+YR1E;e5}Q) zLrDeTyVdL>OBdf%j6UoOm@l?$^m~WFBD!NknN2mgpbq|oBnY?@_jmgEEHhbdZDP_1 zQs)N~cy(>dq14YQu0yu5rRr)aLa(6+5fX^KaGF`(ee#VDsvlHhpE;})!+KY&W~=P7 z@2QGv6hHIknw8a;u(YJp;tXW2^Uz8R{?Du_@KF}6aN4~b^FRwjGj;_(&T zu%JvLk2Wdqg~qb{Cq^0Q^bUh9C48R2$KEoWX#7$cAyp#t`+D8BpwAN%;(g12+v+BT z%Ix??W7TSjH*emwlAP_1u=NGOSWB0QI|i;G3nO= z+}|BqF9o@CjU*(Rq&;_R<882tBOnDVW5dG=LU18%6$6U0m~nlSrUiB*RgtcNzmSOJ*(gtk$#ULy$-xC#nRJ8*n|pY4hU1KvKkdNKKG%aWT^ts0`Gk>uP# zOPdc5*hJeB9WX9;R~yFVG6M@GebBgyqOg5f#XE zAZ-a&W+5E`keBzU4<0;#^#3dSIrc(&&qtsIh!}*D@%tw%Z|1kIL#YlgpXp3Ac8;=M zSnh;Qden5og*$w%+Sv3Rh;usZ9Gsk#Z5f1Y_^%6Ihs4CTrdWB6-FB#HZ%?t%6Welm zY{?QCs{*fqZtch~6mzW5C-QvY*#P$na3hAjq7SVPN<$~yRZBi*(g}l$eD|stPdfiK z&|rWgt!T@ARN0_!8f=8mw~Q#@T!`1l^oIf)21+iV5Nh}xM9M0Dg}IUQwF>8lKdOV>mJUYEu7j;v>EVx*{4nI4#uU8wGk+Ba|#Wn=+lLs1I`z{ zhviQC}`2^klu=8XEl67K<-??i_(cLrBHYDXNcCDu_`_%*tQcnnCg=cLCJ!mJrxWqi z@J^8ol6FpZJ%-Ct9Yu}({khqi)z*9~;LZ@LihgU{(m(%8{25Bux|IO+*bPLqO*lJm z)AvVgGbtPu80ZXeUy8!G;VsZU_5|LSB!zMZ*N0ZbS~Np&3aAn^DR7|)v0ayb4!#>W zLRrg|N$Sv>0>0B>7atC0oxo4qg=4Bw2E9?ipqY4>n54^zw#D=#81k34*q;rLiW1$l zYR1FM&Q5KIdc8_+T-=E{drwcpk9FAV9dsUg))51ZM2DRVlze#spE^N6?KY|$ac)OO zLtBb1KfGOx6G9&xe_}8*A>mYrr@Zm5;B%&Xw(4H{aRH8T_w2VBtE?h%Tv4j(5x~!qy0;En-+mXrvC}; z&~251iZYp_Utp&GvQ2ss_d`bd|x0x`F0Xv z&7fK;I!=JXLHpo@3aBmu%ahH!AWX6Hy2tjQlTOeEw_6YR-@lG=Q2I}YpmlUrzSdMC zXnh>J@{}do68DxU9^^ibLsvLF0#u~*+PVS;4pYHCbbNF4ymYCU^^<{!Gj-1OOzpAL z!1BGiU}$Xl>}4WMOmZ6<9DHn@X;e^S27XeR=V!0?nC7>&wn7rtaeS|+SOWq`P{|0> zzvZNC+c$N-DWZzx?`#2h^97;g&+W05p8cp!ag+bk(TA~@O9DD2i3kLv(F2`PuU^$3x z*!{}rg8b*g^8LTPumVJ-C8c-J9`F4}=poN05qy*HjjlE5pTm(@SWRrvHufSfVNt%_RVVvtJ>2f` zr~aH|`Xua^NR<+1wE6CTj{4Qp{XsQ|4=aFBksBEZBi6QVo>WZ$T%PIFu_GGaj8{_% zJxN{jWBS_z38@38hEwGxhc*UUOqoW`ReAdXu1V`%pbGcw8wNbCe%K&vrU@rSp$5uV zJ184YJa-#DVTUQo8N1Lio-jODFwlIyK-*VuSee>$Az)!RNVBi9gjC=d;>8R`w~brN4)+Z?>N9^RvYHcl@JezAPR^ z`^OrM$+5=LPJ`dU=QGPxw>1tY`8k5g@#zWsPG&M}kpcE(3o`o;2dCsQM6D{WlZ)u`i>XP{8z%Bzkq4ZC(iBB$+OBr{^Ll`I^QvyIiP-N_&Pr~+ntBZY zP;{VJ?wv!xW@eQ<#I$ZdIuo;^B%6N@P%F^C^q81qLL0$9ujWMn3~~xBK=q%tn%PRI}nLCeA!3&J+@73q?ITR-ae9E!!~5=km|% zxXn&le>F$DT1^G)=v@0f5K;u_Zx!#RdIJgtKE-$sj@nbTn2B5N^1H_u&o5WyP=uW9 zLm}v(Q55E10*MBA)1?TTG?Lqsy|&|u!mMstM@*nRS%B$X_0i?0IV4R-9`jFpeJ=A% z87l%9ls*$p8<$I33z8wHbqf4Dkb7bzq5kE&R`SzHfbRD|Rbx6;*usB* zP~$>clP@2jcHpAFR6GXmze#456xWbRY0tUV8R5Nm>1GOV}KdEsL_O5A!Nj{`9EbB&8k_x zu{6pTQ^W}I1{}|9dUKBN?lChnvsVwXGg};#AsK7gRzLa8M`JzTzvk!08ion*fOmJl zc1cSFo{`z_!+<^vY5A;jREH*|Q{S`cmDGBJ@d|Lua8_R4YjQ27C4;Q5r?7wjwyU9M z%IT`ocsE`D-ke7epk|gO3a97s+sdoALG(R9$A1{AZQg*}LsXtp2oy*0k8hc$bOe=V z=>%233=MzHQ3;&d{gPNbPapINQfyH3+vT?aysVjKHg17DWKXj;(pke~HIuWQtcXxLsI|xfxPk2na<@f^X z4=g3^Jj&E2MzCt-k*^kcTrDE&$5NjZk_F}U_H5EP6j7;XKrw`trOiJjB_%gE_oZ^e zK62{Yt>o4B9kF8_X;fw6a|xFMx^oHYYQfG*>JaDx4R{Dy(EkcvtO}!rt!{5gdn3Xb z!1)`7xq*BBz8Cnq!Hj(1>HWu;F7+&QWvq|>Z`_*v|DI=eD^ zUG(>4=1aidF|-Q%I-mCs!^AbW#Ybg%rPx%F()4qOU2mROU(L=K%Jg*3Tk)w~+W^LW zWqRdG4~U8DzI^!twQI~x4akMN8qNm7r!p2(cA*yR7v0FK`9tzQILSP9RJ^3PB&3*A|4;D@Q=JL3v-JE;QP9;u(j5y!0MTn( zNESz>3j86CL9sUAsSQ&k6NVBMsoxeTQ87uGryu{Os|7F*2(iUT-6U;2XMuD!jxfFf zbT$nnXmugsUZBtjJrLdF&du#x>1F!Pme)n$U#Dv^nS^9fV$AzvL2G;LbAhEP6@Il< zG}&1e!^N7?FljYNhmbG>0H=}02<2^JMiKfY za@U$m9}2Yeb>E2F*D|C(`L)KKx8m=-cx{n5I5i6zG7VP~lMlb0^c!I`#TF9m8(+_% zx1KvMx4;;t`=+KrH9UJ~0Z`xwi2XrwOLjJ3wCb3s!VFz&TfJp!h#9E#@%xZ&O*uQ8 z{(Vi!+?E}bge{Vixd-W;+634*th;CA=XsG3hvW*Qd z)C)`Mi9A`NdW{SXgkk zF_KNuu(U1h9NkiDhg7R?&t1pG*2qrbvEnJ#;l%w@rIt#tlT4irvek04dQ+{Ks(~Ix z)v-Q+xs8Xn{;)d;YFSV|ycsAt1``rMiG5#5)&9GM`)v(@`sQYV)%|($`}SS5+0G_d zbX9id$QC|6XX)prQF9L&>Q(ftveN*ulN8#rX8+70x*`(m`laaN%5ykZ-c{Z2Z(bn& zdl78F1l362bNl>a{-vm@qWy5|jN(BUr#m%^r!?HP{Pf9p2`4{`l@g+w-?gANT(l!K zjuqn2f6nwor5+M&>$_)Z7)n$3Kz{)0V;~WSqJ4RJC|NCC7irIOtGA}lh={~`nw@MM zY8Nxb?-zdlPPV-Rf#g?P6<3Ybm_MQG%F=WU>J4DX54tiSKtlStBqt1FbCuqz~*Xjuu#P+OReDmF}U#}A1e&4ZrUL^lwi<6E;1%vC~?X6@S%SP8q?87OHfE}P* z`f7sby|#s$;j0!%&`wsVLyv&rPPaS;7u?h;ZJg`UG_HY#8tVeMxhw9d%MoM%X>Uv5`E0&e*FKZlnim&8Dgn-3$*|V_Q4N$WOmK& zTrT9s6L36$tjwj=B@bFIh88ZGN&*Ob=Z2L%Y{1~dY1WeW0}gQU_Q~)hq_h6B=#}#y zl|w%NnOf+=_RU;uIirjj4D#w}1_RI7wMq7TNPN2_ekW4rVBqFofx*9`;FAoABPx*q zjzHzeZC-t@Ervi>5Dnkc%bS)XzIIjCS?Ye~`)G&va6dz~)*8n;6cYI|7p-#S147ak zXq(eO2tk*@;D!~$u==C6kH$PC?*FTa)GgEEI{i2sn^lZC2As$f7UNUAEDLxG2NxLi zY1kNhZOid=&VI4>auQ0ypr15Kir8=zgFk$Vy&V z?$C=C)9bN+*M56jUn)mXGq4ewHWn89$WqSc24+u6!(VSE4KF={>oLOfof ztc^>a4Q>CD=~t=mHBXsRIpSqZ@M_Du)z=R`V0+q)DeJMl0SecX%yh%47z?O0S?O{4*bgH& zeCw7E?M96#6QxiljIPXYQDSCkldqGS9$M5YI8_h_LfET9mBuulcil$L(cWHQH zMlY!9S(#6tJ^N%5YEF>8y}H>rzyr&T4WeqGdkD}!dBiA@j7@8X2l3jXUy#TEO>cli zR(-9tjg4ec>OnAjO(qm=fjSl(HvWyA{w&A}(?Jq`b#v6GY{Bxc$SB3L#=wf`9d0XT zNa%U{Q|ZTWu6h*1{RI`e*R^ZsLQu`261sUMNKY9OGM1lz&V?ZA>)Jk0v4D$>wmR4N zD6|);E5J!#%&0%q&kJ#{TY&*pFn<*jxZ*S>8ZX$`eC-(zX^3F(X5}iHlEw&=VbEmm zEhMZT!)#^j%=7a7wiq0E52-HexR+NO2Yx!=jOJJ|2(yWp?xkQcQh{mZlHFA0-KCd* z!d!Wp{g>un+&br%K)~3pT9-pGj+9Oj2+7Pqe9?0SoDt`p#s>ib9KqbL^~dz46w#wrwhS&_ zF{tyvU8Q@9izAYS|E9`Hh(G!vTZe5T(+8 z)CKS_fTj*{F(=uY&VtyEuzIVaUP-FG-k9hA9z?FukuKzKDn)_h#!ZT;B3%vZDAykd_ z@X+&g_x`Vrg#SkYAaWcrw1oc$S3Q>|S0azLZEMao2&Ne{o`*c&;6I>-e%rusC}FM( zhAnaR(@lz~YlHOx!dR_U3x(&2Fnsf_sU&7|aQXE=$a=XdMT`o1Np3Ly4qPpwMwYQ_ zcimTj$Y-chb!9mNAgDb?e#2epQ%~|^ZJ7ltAkADtZ1f(L-V95w?HCyu0s5t}p+Qng zs^qpNBt9PolPmEh@Fqr_#cthSo`QWvF?ws^-5=q`Fq@#pe3uG`KUdq6dSVS362Wxy zt<-^gL^O(GeTCk!Pa&s4td``dYiKCJsiBv)E`_?EQO2NUY5N~OpF7>0ycP!jn>mEt z5-h^2tt>3cbPU{Dg1~d*WipCYvB_v+8h{6hZwB08d;`p^Fgr5A4z2>U29TKnFI<-v zUo)7+Rw>XonC&0h5CPtvL>Lrz*~LXCq!z1XnQgFIHp2+S(;&o`Zv1msIjt{-tGP1N zwo)6HR7cE!Ou!RhSrWLCsq2r4ieufY>+273TEluw&V*#crR|(RC!bZ=MfFV0Acaxttkjoz& z&}w^r*&TE+Bslavs`dh@l3uv`#vllbt;xvDybhiQOTe$dlTdwF0Fo^jI`@pf7}ibJ zp|(A_EHC%6iIjd}z!K1af(8_c^SB|kegeOSfj{vCu>$m8gSqF(?i!5aQt@Bed)L#5 z2ndO`Zfk4Ps|%#Yv|2A)FvE zDxcO{brI6YV`G()d1zYxlV8-il#s(anAfbT2nhopl&D)x{_%gt&?q`hJ#~ zWZQ&X$0zrWQ_$c-{=+%P{U_tIc`B|?mG!aVb+s|~%_9@%`+4j0^ z%M*1)LP6j~!xwrD^2U%ZtP@a&T4o3#fV4|+7G*fFe`(Wdf@}2;M(NZ@G#Agy&eqzq z366Q_nywwSM#e1sL&XGYqNw+VD_{2_yIaMXkT)zsm?mY6SE;BR0$FwoI!)?+2o3cP7n@j z*ngF$#JagO`p;ZE=zNX_j5+4Ffi5#aqg)S{G>BDO@qB-}YjB)6J8R=Wk;cHrQr@xw zzA)745t}{u0ExV|HIVab<E9{`-3mGXHTnDfkSEPk?&2s$nIW^NLdNYU&!U)j9 z+-pF~b1+KH|D`p`@nmKhq>9-b+m3ZWH2*e;C&1QBGcH3m0hO`_!j02-Ap6jjc7Wpb z7?tu-thr^9K{Cq2%t>o5-d-8g4GFV(DF9Tj@~P>2TkT?QzCci=()j2PuK|L_e_(~n zcvDLLps#hy#aLlTXQv^dw%Qr0=`A2tRE)M8{aMoaiP+~vv#WtC6DtH>ItasH@=|sl zuC3 zU1gLyB|>U^`5{Ap9qo%d%$z)de{??_uKU=lr{Zc03kxrmVk<1s%Fj1sNDqhqoi?Zf zE|DvLm)ZP8BW9G;%H4FBnLQfb(|&V3q3V=a@j}HrgnH!snmi2ql9NpIE^*a z&~ zdi%{DJxIG}X|6LCac}nJ)Db~B&%pUFw^ip)PdN|I8b_J8hLdK8HcIh%-TSp5nx$XT z8z1RDYHPwCY3}hOe~K!qN}WhGS>UbmCbM)vi`cehzS~)TBEr(XIH~N@XUtormh{)C zFQSMUtS09JXUVO`w96FEpCA82>$Lg?%|wN|W5!9fVikjCqVc$CG00B-f~%7b@Ww&M41YQJ;bI!goGhjOpXlmDD8Cr;v^ zRT7;dk)jD`izEBEGhgw77$40uBB2{AtT4oWf~>#5N1j{6%~ zhMS3lzHC2RXuH!Z#3$)^V60XW4Ei$w9!g*!4~+4kHc!$`TQ?C(3--uYph--RFjx4d zz2Rij3@>bWsk`%eo6~5G+X2w1*9YFe4-slf_B7(^y){N$HLPy9`JZi@PG5iO%vkbF zKQOZTll`gF=Fw_uT@WmBjf6%dJBY%-6M&KL3B;E65^8QC(H9**-s(O4qMLe8BD{yH zeB?~~=2I|(1FmV!{#2pJwqn_-DHv&!vG&rtj66sVdP~a{+D8T>%0I#&TiY{c56yh# zRAMun5HMwue!*gXfv5euwD%?awmSbEC3%=KfR#4G$8myX@f?01&gDI!0r+OEG>Fv6T&FLEnepp5^VLFPi|dn$^l*4zCOBJ9b9bVBi&kgH0O7+iA*8h>cnrb>(6-+ z^gbGNPh&V{#XH8rZ0jAqyK#3UWS-V!Sns6A20=womx)HnXPDCKx%i_L390UtAXh13c24?l`z`(5f z3?ZfG3tc#c{WYbK;kX}n{Syq40w1ZK4BbX8=cnfBmlj4Olk;*+5~zR>(}4Qe85Q69 zP?jXOz7zEj#sV4x_EkO};PxwR+#Xs_1SqS|0~EoWpHL3Sx#k=zR{)QeYO`(fBs$f{ z2Ed~Ms<7G`;FoxXy^ERRHUCzW?EDim9*_(b604R-sJXyE^#hhexZCkUaY4h>Fmo7w z>w_?@m|z++8%u?gev7^)+jwGyq&#E;K^)5~yYcQbhK&KDs#t2Anmfu5`eqDiV@96L z{%_uD;QTqw!_5pqw?@lesJp)mM$<+Ulxn%kB}Mc_T3Q}xQNA3g0#AtwWXy2X?#5y} zXf$5@I4jLU6H*8go$`DLYbOvD7=c;IKAM~nS9^yPY#h40!!E~sC{^#9>=fD;S$7&E0ZP!r zoxlV-Avka#Jbu2%YTuN$7QkZq7WJlFt6{uHO*eRD`4_yp2tS-mG$&0fIBDKE*xVNX zlZkUEqvmjPlAk1)@MGjdl)=a;&ApC>IFPhC?BFDygp<6qb_;a#c>GY=RVyoTg_-CY zc*bl1fz{2yP{)7C#WquB+xCLJyDX!mgg4xU^d97uS`zPyfqNYf6#oRrof&WSCF4G& zV40aT4C%cH|1-J}xnl13W>P?HU!5bxhuvuFA;u{}Cnad0g}XQdsjmE)`nPIi`OQFQ zWZ$sv-!sa)_z&yLM_JZ>l$IMu6ZE`uXa1XC1-0n-E+i|$Z)0F4(OC4t4SIqVD%NQS z&oLU34uEAxi!Cvi((u#R>x35~s$dh7SChDlkrcK+4rj;-J`EOAG<7N$0M0|?OEK`1VG}MmN>cE8IvK#&(wjlV?^E|PI(Xae3z5M?` zb7Gw5!q(@{OJR@!E%l(kNtToI&Q1umq5povU0DAGezT*L66clth%a(Xq7miDqw=>K zi}X|H>kj&F1p`#bu3e;NGUYj(A7G!j=W&ui?7edHueGrx(>{xbl&MH`U8p_&Y1&7}HYZ{C%(HisUqu?srr#Y> z>9l$|d8lP=o3Ec{amE+2{brHZ_kK>Tp08Ah`+1UCM`wqp{0A`{WhRQSuE(S55Fcgc z`qy3R4?~rl^k3!ncdcKoR9WtDP(s2q{Po2x5KjifLLbJOt5gaI2h95)L~7 zhp8n22DF{rp+P4s+Fl{TLhB{Lw7aghzEiT zB33<{Eh*Wdk(_Vsmo2&yI6xY>?Z<5Pu`$dQvV);?*L+e89t(k%2UKb92dKV)XvRm? z#4#SNQ0iE!Eqt8fyIAC_+H5z>`L27r6qU3L&Rj#dE4Im1 zUfz#M8pB?@f;H4o0m2mgzqijsdq5F^me7qAOteHhRHdS*4Rk8-%Azx+pN-1k!btIi zR+eSs7fy`iGa5mk{sV!;c?uDxk;{Fv8?GYc@O;O8MNtQN*vvZg&fpz@M1yh?yhk8N ze0G^W#W~B0S5#KwKEfkx*3HaPpxQGJ0|^3K<0@{kr~^Ds&^-Lwk&hEEULjDo>X z2|mHRQ>vK;+`Dg2y77c5hhOQlcUuSTD>-=}Fh7)Njwf z^v(_1zqonpvjARcHrXLtv2}IKV!o~`q_~50HBQVwB4-C z8V6^hAGsx$#5UU+w%&{EPs{T3sbpS>z1?B2_xGop#R5S{4B_V#^)DGnXeAQbqJ;wp z&T+0@ntavT*lfZXJiEIn^Y)|al|v1^qVsS*Gzid=uJdP;wrcph&XpL?LAf62%-DLt)ul7fq;X?HM_9xQ|Gl0 zVkk*RnDdx5B(?%jkqw6cnwmimZ;XE@JGeemc6zt5-KnkK^!crjt;G0& z?5=|@)^qrgR9hO-5PyOOz5ZuvI(`U{+ zeZBWNa4@HL#q^*58zJWAmRIV8^2x3`&cpo<#HE1cds(oh06httvuFHgKH0Sxk~-FN z8R_ama>7IMHlnZ_m@Of1Nn5I~uPci@vm*O{)QkY+sc(xH(`wRv&kR_yf;Vj4~2m3Qyn1*@9KU;WLx zI$6;Dfr+M~OFfel(y@~Jo-`6)x+UkvHecEQl%_&yZVp$D&DjArJ|cz+$_Pd8--r2u z(CO*ZG9+{wZdP-aMq@2slqOk zRpm+ypA`&&vkvHMD46vfwKzdH4**HGeJ+HvKA*jsL;%ztiCEtp=1{<_UFe`1dgTfo zEewys))?q8H4m3uDsd*kyk1&@mwS&NeZd*u&n=+B#K7!q$q!;s#z)SLNa4n%pi~?* zr71CPlX4rCNXH%v-5cnz#+8!Mmfc8Vd?w0!w;8YRbh5S`)=ZdxTr?v1&5_yr6q zt$y&snE3y3_U3_5_U-$yQfb#Fm7-EfgccNODoIhuz7LgTmwg{?R6-?$kR+?B3=XspRah%ninvb?Fu;SSs z8XxohJ7S%d(83pH5#Ck8LIa3CFQ* zb?#sL7IbA0mhw3ko1G_5s*})Wr>5q6KFB0^c*;e$3+QCO|Akpf8-QOtRl&XG3EcdJ z>=L{AQH!WAI1;FnT)!S%P{Q<+B0RuCwnW!bEcAm>KD62!GP!4nqLX-4#<&OsS7Hcn z6iR;SJF|4gVVZm_GZAzYu@iY+B3F?-cT=z$yGSk)HsEwVSzN>$%sATnNmBsT(-O64 zz5o4ySch(TfJtoK$qhwddI|+>1AuQJ6Gtu8p>8wd!Jsv_`4IQfv}V%-TVPUTZHQAx zh~~w};KbzQ@UyPj`dfKRW!tlCdM9RK+H**VJlfja2(3+U&>XVRr`qMrM6Cut*()C2 zC|u?9RICFVzAO<+UK98jccnEXX`(++cK z!ZrkE`GBE+392mQ@RMAduy_8~;on;+=J4*kuMHaMWoj5HDDi;I=8`Qx`83WH>f5*GmQKNHvTaM_O?K6OtzP-KCVXuR3PkuWeaLYpbU$idM+C5RNZ$eb|=1)7B0+zj|(<)-I$ zZgGL+BAFO(aqc{N71C=$uUd^vEXn9)-n-?=8FIbSLuf3|qacLjanR4m6b3`yd6cT2yTuAbw?NI=q!euKspVn04z0QF!|)` z@NhK)#9z5x4~Wr3*^h~#UQa|sL;`964@wr-(9*KbX=g+2NOk$N5-iMGXTW;~ERh|o zCpz1&)_tPqppH^VP!~4glv2%B0bFW_$MyWPp>p7snGG8En-J$;)pWia*pT6ER-TbJ z(w|89{YVf7dS3xtYG7b1B#E z+z<5ci5YQ+Siqh~<^gP^lhbNP?}Spp)TMsyW8xGX1VjJ;A-y&4?S!6i#@0vW-)a8% zluFuBa$|-!qFa&);R$mn%$8}JSx@Udz`PJ*HcvS8{2H3TP2G3Fs{%C`;Tj38zgq$v zn?!4l_vevTZEa^~a$l(ovr)2n%O>9vTVN*twdDecsL{r`7QiI?bB>wD6D-W>^5A{_ zfLzHM)zOlXY6&tl;ra7LNR*d5Zn^@2gRGS6?>n~T&UyE_aJ^dP6>Ag3d6E9*;*g>9 zrTs79+iIG~K`$zA)#D@p)=|+*i~*pKkB#GMHsgKj+dr#D$T#nbQT7KY_p; z+o1qkbIB2l@9WqTrq6OiTF=PUND#W{LLns;<(&?*%uk}dox|;R6Z+4NeQyTKS-3~=vd9AvNoQL`A}JJIQMDIk{uCL|sZ0o$K;%-@Kt8@?iz;-9FafY_O*FCTZrgW%32%m*3uhA;Cvc zG>!limL3Tp3+6i|-nc%LOF)=rH7kx`>h4?#Mo|IpCCujvz2M_6x+Lr592(8Z&MrH& zA3?l~ui>(|%DRcz3h%)D59T;kdOiSU&D(|*M9|0tCBS(cn?;)oO>677bfS66!8P-t z_doAr&~qOZ#lzK0Ln@(rv^Ykdt$<6SrZf8yIz3ZCC!s=6mxG|dfi+nti@@$k0(I{6 zmtA;QWBGI!*P$^207#H|b|-z-+sCB*f=;K+@R;Y?L^u)XbXE*BlR!va7*TnEsUV^j z+LQikkylokjCwYR%aAfJgGMZmQ=XM2RFLB_uf9chRZXVsm!2*UgrQ9e|{Syf;f zD*E@@WAGF_=vK{=I4|;a=?ihcDDei?%90OEc89_J%brM~KEX}5LZ#yf`?vRL_|-$n z+F9V63y!TSk%We!x`%{AES~PB%oHs)3N;)@?Ziwq-Mmo%w!RWL`dn{$U=1>Uar*b5 zMa46&t#0S+-N78iUKSUAcH#+Q6i8_*Ijib-f)nu?Vaz5*?0`I1(fc)QDLMK1Zj{LZ zk87Y=RB;sv0n0ovpAvcovee#Q0~NxblK{Ft>Hb8;^0hE31AHf;83Z!oM2`=JY<{a? zM#%>wnLZStilHbUVOzh-pcvF(V9M0Zf;^ZhNnG#ohw!lq?G*7pH5_>6=KmGfE02`L z(^P^7rZOm6#?2=TD<5x2PDpqxDeGDjteknHcJJTy9*zb$3R(QA8GsG<0T9e-#7dm( zvCJ**qsDr2QYz-hFmx{PTY0pr2C^FPXHG6t1d|Lc@y#`+rl!ih(3}OAt9G~6*mY9n zgVQb9huD%{pRDHoT$`o8BhEDV*OxCan|c2WJ0Bemqg1U@_1cG2dnZp_pH-s)|ihEuE&O+ zNu>HjM9dduAD^K`=6v67pb@QVXK0ZW9fUJ_y$8@(L zt#BYeV`k4E$V^Q=FFG@Y4<}-rdc2cQg<|qYe8t3Z-65TG&r~v9^TTUpX+@atLbLbE zIopoWbg)gep7Y-@(1Ns{e>08{8nc3ZYCy5l_J1|BwoOB&04u7uv{NypzHz-hcITMu8mY2!+ z#1ZWLx9~Fwx6kjEXvvVK(SERF1}13rAFJGcICnZrdfl#0l%P9j@{SDW{sZG(PG0H&lgLD?ATN2uT|AvW5 z0RTsHLE6DX(J#Kg?Fg|`agWRc7A_ZsRv}0Z?(JC%er{5LMxEL^qK>;(2e6j9w)Q7g zKo6i=VmBNueF4fqmF>(T>m~hxo4Hi_o3}qn3hZFqkNNHmfi>$lB2!P0r?s0c#hC`6 zGtlwgy@nz1K=^D_z&EJ!Io-Z+&~LZt5Z~}~#$aaT@45at=uQbG9%ilsOckeXJV~62 z0uigOZbyrLYz?-inSq~I+5tsEa*`bm2Gl*RXdP}cVLQ#c_(quxGHJ>;4&mXS_xeR| zDeH$(T|mwg3GoMV%p04S!G^ZZwnJbjgvY?p*#wUrDU)2@m+*rYu^Zuqt-;hExy4~D z%qoMMhx2gXDvdmB8XY?z3~cjq&l1DPNvJWK>9?7><2kjhf_=FceotpYA`dAu9q6M8?kQdW_4iX z3*wcAI>9oLi}B$u=dZ2L{OQ`&fSIX-v{3iM-Qz*8OnSiq76v!`H-gr1zk3HK461y~(%5emsw!D$RO*0_9 zgVEi3N-2_n#kOvM7GU zoW~i7z{}qHAXh?~3owE4MV#;M)46NTd>>M^KvpUGB|Oz|W51XoLu6vyhScNzrS(HK=tMCkGxG0Oh509c+k`AO|)^qlUEgeQ3#sF7rfxk0Z-MI~m=3618|~{sFbRlvgW@+s(*CJIKoA z^erEk16frsCxW2h%7C|l=ML^Gjr?r;y&!gH-{bW0cPbJpBT{K@v`YX2nd7za>>JwK zv;AwaMnM&~fxw&rK+nsET&GY(Nk{;#UvlSlW}YcM38R<@H*;4OKpk zX6!=>`OCUl7E8bJ?hjLB{M_pJzcBY4RrvwwDvl%t3A~Qz-TtM3spgc~18*fl6>Y%N z9%WZ!g|DfmgkqucRrcJU3=-I!#)Zn)APOqq#?xPNbn%hWzGWo7Rs2KSy)PwN;{agR zOu^%)eUJ%n(EnCgNuhqNAok4WW)s#l@xm#B01KYiaTdDt zuh#kC%~h=I6n}%W<7Ss!LMu4-6lble()qTq6btVSk~l=M^EuLI$!+4ez4_y0-|(FKYsSm%L23U2Rb%v6#W#&J3Gp~X7TB{S9l zEV>}U3k-J4`X0*V&Nx*88TRf^{sq6OgvYcxbuMo0YyTFT-Et5qkQZv{KxXf9Ke2|X zAQOH<0>wI}f+w5flyI|_59s6HI8T)$)9wnMr24?N_M)e8yI=dfapj*7W4q#kB=UGt z+EJ!=`ZA;=_-q}#M+oyP7%?V}9LOpPy3t{@qIDOm#lnaO39DQ3v8{Gv5>u?Ov=|3m z+qE5w<47k1|HMH^6&T-26TX$wQA)@kas$wavcd2XnTsYO0@B|*5~0Dt$FJ21J%b@r z-)`CTFuWr}FQOsk2wVi624$zbj+&b5W_YP!6D^C@ec{8W%aN%o@P|WOEd*N(Xsya1k6ft@6-IVVMpCVb8M#*G_y@78F775o_t2Ky$9TN*m{DjMsXgx0G}QE4z= zFb>QUe%a;N@?`5e#cShaZA)Vl2lf8MAj|$q@XwqCALSe1vhYEXS4*;8RVxu(me&g7 zm0e>uo?d(hVC~5UpW3j%OArvIm{4I*l%SxX#Yjqhe{#PmLgUpIFcb$X+I4<`oaSm2`vCZgD`*XqxY#s!F`pXA49k4w*vnLRw$Em19Y?=e1@N(B}H3# z{J&N%vPS-2SFW)P&Me$#*+Xp6p93>a3pb`y2z%ZA3?n`x)FzY)65|ZV+_m(6izUop zJ^8&TEfVFXCx*M+{c_>O1}FgkG)44`jmu768#sfe`zqo8tXDY4(dKrXE-pfI68{$_ zkRKl|ln~|&DZS!1YhQ3M1h=2TL$IXA6-6IkimZJy3DQIG$Qu!c{)b}xzKqbz2b%2R zxd0%xY#^EKpXRTR&i`ZccS_R?{&qA3z=B#rZ{q>IZ3;Gbq#cbT25ZH+9`XZV9<*C% zD*x0~EgVjMJ#AG9-qy9)te(yABtkQF;+GG7D2_2(E8gI>|&7wCXNV; zN*SHx^#8r@%ka3`!;%>{=A5Nb>ZN#RUsy?RU+Q*`gj*e~VnB52V z0-^&X68lYoWC9#MiHQ|L&-8@Uo&C60V?ypj;S=#o!+GQ@SzZE94`s^$AG(gjvyi?K za6+dpLZ(+Q1WC5(Lqpt;JW*ZBF37JK`4$q-BBrv$#-r2r{51Ix)i>C$1R!4M-DOA` zWw!!Z^9tp2Md8}HkXXv>n$EADB1lY3O~03?d+yn=O7hwO975Gg0G{V67v~)_xV5+` zL$|2$&w4Suv`3OLgoW(3n7GX4AZ|`qMcv_-SF6Y+y=tr;sGlK3#vDou)OBhZ8*TkT zZFCl-{}go1m{rr25Kp{>!{>>8T!sK(5xTHD6l6!KJFBSSck>LV)aH|;*a_pbx#=SW zv5y;&YhVj8#cZUH%^5blO}5Rnk5=mgL^cdc3J{K&R>7O2_dlUG9tB7jkzo!lv+e9O zd_zz_*;d|=1=NXhPD;5ClgLW(ZKaZlX;UrU@saYb)2&|Gp(s)Z)&af{yO6{)sS;pi z_q$h^kj3AmfLzeU)Wb-uEp0vG8sU?^EmOsBazid~kGBRIhNU}X1Wam%lkBGo-ufWu zlzaRM%DUZp=MF7nI(Q>H#UoMzggxXXW8ipa|kQ5dR}rlwQJ zjzy4d(9K>X1cp(E{Aw0PYT9h^z634%xVEMyU?M5Pplax1~}*XQN^`~RYzM{h8< zdX*;=lbyXSc(eXjArA`R3l&FIuyIh31G_*5$sK|$TjM8&hZDd6r&keXXbS>bFg`xs zxKY-d{JUl4)iob$v=ZX7enW~k;s$ob=~lr;{9W!}2=9)@Lx zRLD4*o6CtQlZ{6$kPc`OgTjyY&MZI91@V1crgi~t1%vXUu}R1=W) z5eWMpT3;Yb5nXb_kFy^#@cB%!NJ7D~uZ)69Px3Ch3ZF3mtAd7|ac}}f^025cyjxhB zK%XmE1~32<3(hvfRcsZqf|*#BJ&=TdV`-bD--);tKMF2`#MThliU{45v8F9Rym9%! z538*8MGEpxb(meK+L-cJ{a;zNO;pn zcl@)9^?cR(&?dXi;cXDUXOi3sQ=CS}K+bfqN`ezLu#68u4J_OLfuV^h2<^xNp&h~I zfZfFCXdtj*j-~DPD`pE=Gx#Un;N_2i@oe&<0Z4;=!{p@@8}l6=M?yZFK6WfMGt=RY z=piIS?8Xc;0&c-*QspAN9ILTmqi0d0P5F?;^}NQm|3sGo0sx=-hg_v15Ns{asTl&5 z{lp1BWcG7sdc>&Bq&@UD{k<;X%J~H#^(jmHuU`PVWpp>R{){C50cwF6O zv<29K1BI+dBoV@Ir}?uXB7EHBMd&jkIYKG(@YvuR(7kJF!e%rI2KlV2KARU?XM?@l zrmu*aGkPQx^_c`JrrWV!eG$mN!HF2*<}fi9(kg}R$aY)!;no%=_Y?aV zArEQacMYyrNhHkG_%SD#?*k|hNvc$wY%}tbg#ZuqWnRxX(9UrhYvWj>SrAL$E%c&| zjHJANqMx+K{tmL75M<)s8B18JfcF(F{;WLP0LDg_5Hogz9Ue~KL+CZAmqNzbslp7f zNut3N%MBsmlw^F+Uj%hyFD*yWN`|Zhsl>mrD!xWiZl%TpjBE#Md_JCPT$Zdj(l7WQ z4-7`Phsi~Qmi}xwdpcW@gdN%eu~+K@Sy6NI zfK^ug&t<~byDPSQG~$b`Z_!l74jRO2oio+KUg?vHe~6Sz>1P_hp zxOzV)>nu?8BP7iH7h#3*!753FG_Yd2z7F4b0zPv9L}usItHWR;z;LTvp}eb zWK@a76E=_j^8nAO#txj`@+2k6Dh%vK}blby1H7yrjJEy=c3y? zu<~qTj!1<9r)iR3)hrJqdB|@KXyKbqR)GBlj2;5NYxz*IDCb7&`$Lx(q5~yY3{3ZQ zzC*ieqfm@EnnuURPb^X(RH0vT{7-J#INsoBWmP*w&el#+=l@{g2@O>8mXG`0T_-Ah8e&J_aDJLn;p56bEGZr5QH3Qlua`-s; zpfQ0A9SX3(7_hm-fIHBB)T=Ty3rOUU-v2^2P#~5QXidNe{*>C0cy;ZM5+uA6Tl2~2 zCJ`X>XYWZ=w?%Z8a6qM)&|D!9r}G0Z97Q@{tsh*{G$R>n6$0Y|2mNvVkw|||h~0p| z-hZ56d;%uL>=|^Tt@lc_a77Um0mmVbzva!0s;)=2Z5$_>V`4qDVI zx+n!HZm}i(&UUQ1xf$k)wLj3cbaU(In}|)@_A@GJ+9?mHK;VLaZ_D-&*My!Sa81Sl zF|s_wA4%}f{j@zHhIi`)w0dP507FC2Zp2wRILyFY+YAW~kL&ubvs&kd&A}0#Q3IXB z;oQ}4F5w|Lxw+cEyn(+=E;R?PYHq#6t6gBVNHUnjkF&)){YOL?O`vl^a@z8EnU@Sa z`NWN4$B);-nBt1tQDD8z?p3Uux9wHPQv;_nVWOgjdrzJ`c_&B}flkyBS#-sk01B{k zN7}F&$6B)p{>kb`(GE%Ao@HoDmL-#$67ua9VA%+?Ist=1Wni_PmC!kMw^Z)|x?*7F{rJ2ZXH&z0EgH(Xb$Uz89X#Db-g`|1G z>9NzP9xLFyTkrp0#fJaA2vb+ex%lB+nHgQ5y{y76w)u{;9O1Me>&m(9ly#+uI03DGz?)-2<0pQs8({CW9UQo}CV#S0VEqo{XqLS|Ug!tqWKIz_`pU|yrdT9ajCCDz( z*8M%kn}j-siD{vfXL^u>mNbkXnGs|z=sDvuq$vHj#%D0!(b7nJ>Qq6~=7J9hbiRZV z#BejRiiP6KlbR`J^^gH^GoXcyDVisg87<+i!DsXieRTP#%#6C_vUhuGK7#L${CkMm z2+-YR=F>c0DC{QRHo|ZY2v^pjZWBuz{<_8I^Ad0JiU)+^x-d z!;RBN(mnibHnIU52Z{=W>ZMfaGp@pVwpX5m+bGcyj*!%^%8rmT!> z5dOLOpLE)7NUSLz)Xu1!#A4P~VDMrxfuB#OIiJ`lobcWn-N?wU1%k{8fSSik317M` zFB?Fve>C6%9ND?$uG8(&7c~aGvac=BsLQh*c&Yp!H#Rz%y^CrIjBU#{ePnW{^)ze( z6E>tj2~R9*xFH!b3r(J3V@`*zdwt`d!tq#9b|`Trsi8AZbvn4qP1^czcE4_A&3Iy z?l-3%K!dLID&6NKe`Ja#!-HB(4ETj{F60Ka4Mmy2yae!cPT4pkU+swhmJ9T zqROx8qSiUvT~QJA1!Qbfm8b*Y6_B7)jh{2;T*zzKY0Z_$!kL!Nu&gRkTTFTiZ@-Ax?;K z-XeYbM@2D&r+-J;hg?YP1&kf2QZcR!N&&R^DdnCVi!mGRmcfH({8M$QXSNr(Neitv zBz=3cBVwuZcJJdE1UY!K1JdE0L?U8X!C(3{XGt#}AUWbLL`u}0dgDFfvU?9}7N=kKugK@eF#*lT! z?L=L8YOkE!xqD*4Y=nF1KJ?2p;bB?N?sVpSH_z z3|KQj)Oh2ZgxnWf#y|yk{Wqkgyj~KmQ|moWSC z>>RFJpx=U#lady4(&G!Amn7~Guo_pHM<$3v2B-L(1Qr*y#wJpt0=3xIz>IS6c&xd> z3llO*TRQAJU1eTHh2_EV2JMxL@K%_eYq1p!0M_7c1z40vg^cU8#(lbe{75I(m8{b4 z;OOWWk{eLnsaSp%=FL}O=I3D25}(1WfUdx=3LfXmaTFeco1vXbpjJWsDPx9kt6tp? z+tfxR8u}AKN(-8y8YP_*Xnvb`=mHq3#6Bqm#Xzh9OobbTw*~dr6^(p&PB0A4+PH_G z68p-me2o7)5+$a^1i-wG6xc+mUfAd8Mf~>RI#v%_*3YdyCDO3#IXB z{Q>VfxVF<(xL&;W^hVd$i+ym*Ci3)Nvn|$u%QBrhc0qWdY14o$tkTFw85O6F0sW?3 zv%2#iPD`WoCWh}mlvvvIt#RtiBDtnfxK}9jC+Ce7nt_#UEp7+?VTh9wM~pbU^9a# zzE)rkdob>UqKQj17BDJ@8Vv?I-WG;F^zkVT8JyQ(x^{pu_Jh?>8e5Y7Y$c!3)kQ)( z^zfYC?5MT2$J zT{KoV#*2bSRM=xM(zVXo?YtA+9Zi1p!9KQiiq*P*ExSOGcPT9f|BPf&VH*l*?0t+&mGj8Rd5DFj`!e;=YgN3-!c~c0ZHLAGslOOGXz+HwH zT_i)a6p*HeCpKM6#5w)`y@GNC&5a0p~KgDRKI>{M4IXq`BYq zmJI(He}Q;}ZWzr->HI^#jq|?GJ4f$NVc-PU1rSL|aAt z?oWO&Z`n&L&w)P)g}rVGqax+E>rZW3eVGcp2qSBDQPL7yG+`6H?B5;Ce#G#Ye?Y~| zuzW5;7~-CTT(Fdx;~uo65pi6iE8VXDjp>zHN?R4}E7Z1-!H6N0G6;$Y*W)cA{ktfJ zKClibMpbh^pAhKS8&x7UAE00T?z3JYYdSKV9ZfWB%?i@4goECOu!&N!bAnj5eidkT zV0YR}<7Qh3$yK>Mw78Vt-`StMLuzHH@4L+hVMd|-)8anD+^4(AcAdbqa{NXun5U-7 zAEV?%{@FqeKl7_{NAllKel@?4(m+f z^A)CPu3u-pdOe-jFgpC`=>WQtD&*PuoF%tg2yXf3*#QO5m>KCc?_h@rQH>Q?!`NFY zLr9Ard$t*MFlJgLaH9063kbs!Ax*%ZUt~fs0Hm%jtD#Thl6f9CdyDl=FH*vHeNmT2 zhy`1L>+}$Fag!U`CP$f4QvyiaoPpJ%#6nMsmZ#fQ2HoWAN(3`$-pu$q*O0FhCx!Ex zR3^LvTa(wsAy7M}s+&U6frxfXWn*7)dHeVbPW@-ZI?_{8fk&`$qsDc<+H5}dbV{8VF+PJ{`xhL04y4_k-&R(a%bdf;{qHndBEb^!?9DMg#^O|Oaln3fC$HKSV;8YiMmjR zY{Z)1Gje}WwX%wm-~Ca5!R*;B7k^!bhY!2WHiqCJWcrA)!y;~z@mDM4oSS4%#Ju~n zot6l52#pUR%q>|3M8?OoP{nh$R`V zz@Q^5`@jVD^4`@D;gB7JWcZF6f*te{?vL50sO_P+v01b^$Swmmx+DRiw$Zq6mXo0d~<}8JF*koygL4AZL=gVN3Nutm*MUu)d>73V8tq=3< zM_00}Vm$xCh&n7E6kO1j<&k@HnEM6Tv$BD24+-+BNkU*$*Ym~K(=9NRe8KnSs^#|p zj@QH;5Cr$^UUhyEFez`94B{Hm!C~lQzN8zY$+o-*AcyQ!;0iOoK6sx3+qAQ7_x?RQ zcJvPqD`R%;+<6)V_Art1ovs^g2uqmmaE?;UQ^woR{N%v|RrTRgO6EsNz%%pG0ei3S0>b28&4xa%2xbrz1qIt$ln_D*W6)Pv6txr|W|5;=~C+T9z} zYBTZQ^^n#Eq^I_ifyp$mTPAEoTg-ee;f<#eE36|&C-v0uSn3tlnWQJm< zrbYxyj>O`@K*)Q*dZVKpH5&_a#XptsS2th= z`nEAurV-S}CjyJnL z1-4GP-`mW&G!rSiuSBD|qBsCIYoagAh9B z18aeau@*qnW*-0XzDT`H3$A+|_Kt~(ADH5YynfwO6QT+BB8m`3q`ac4Yijm!I(E8F z^Ii|2yfa@k0ZJ3lorMnhg$)Ju=a2raTi zoEVm3(tD4E(}6zQN6MSl0N8`2j4bZw(~NHD9c-MUHw&RV}T z7kCysxwtx85=(lnzvJE54w+>R#ITrCF*{P~r6X(Jbsob%ne)-Y1-`>>r@^WDCsR@Z zg?dmwh2pei2|;>lFH@1h92F>_qd383H~FE=9oo4?-N2`+wtw#MpS=M)$O67R>y2g@ zwU{sD05__jt^_yK0gYV8At3kAf#?=3vb43ZQ%MppF8};d>}BhAF#_39fOst))Al#7 z323KuP}c)?|Ce4s*jf2hs9fDitYa@*<;A5v#mI4%3<(g<@B;n5X(1lu#AC^z_isY( z*S&ODA=A}Y0IF=9uY-TVkjL~(~dS-Uy#On?v1 zrULzT#0^q+Dcj`R7jTe?X;;mai|3Vh#X{zpQSo$@jqM5OIrY{rfI48Tm>qe;&aSa< zqS-0&FAWzZS}P>h=yR{gem^dE29Ff1!FA>=w|W)->g)4I={3#O)hVAoy>Gpo7sdr} zY_wT=O!Gfl*RcKCc*Ffj{6!1Lw**h0f8bUIE3r{K7MAjo>ksAKZFKE@0#)7fFyw{7 zt3_M(Q|lP#*#gCiA^t;;0-j5MM1)=Z@(4u7^A$urjH32Wkqbt5=?$smSWknvt(? zIF~XQ4>?*2aY#p9y{TMrX|=!Ij-DXvi#ACoXkW24fLI13I-Qj7zIKU(U@(iOoylrH z`Unw6G5R1YY3+L;Z98QH_;oKkabl>H>dwUOgId{qE5u;*OU)j5F^A=IPbFrph^s3)J zv{pK|A0O$tSu)^(yTIw+-)Sk+*)2f>q98@_3mUD8){Sy{)D9barq}ER zbPw;dvcbW@PBXvtd$h*WKv3N>o}lruF+pRi&AO-f{dfY1HAIqV!fxxR>zc5zTP|5X zprwmu?i4mwc66Kt_O)1k(EbXTTvu9J-JzL!L(0jC0#R#=LcOlBue~}v zKk%j&7^PL=!j9d%REpp4IR4-@>7Z=D`}gl3l7D&@Sm@(+HrJIgXKnb&yn#FOGu`8v zxrvFies?k9($r|XxLM05&;}knc<_nz8xSMHa|Lsf1P#A{fJQRMtAXJ?NfmtHt@Q;FQ0Wc@3Nw5Db>Arx&K4T z%|5vwk7-dx)+erRe9?ODEK{~)3q2#lj$f;>Cb~+hhtHsB)qUgw=DqlPE_%95ZFg`c z#$sQe4fMC9bMwZ60@H^tslO>dboC9~05J5-_Hr?4-@+MKM#@h=9c#GlVlX=xKb9YQ z8wb8&6XWB?w@xj82GTm@I_eO=xIMjnTv06kaVMD%Y6O9!MSblj&*9k$3Bqmnt~kt!+=J

J4R9%w6-Oxx6KgZ9G5FuGAd|!DLr7zfx{itSzAW=Hra5@*)OGsSR`dH(w^ar- z20vK6up);AAB1E^j-({#5=-aXW_~|fry=L>>*c1@F%%s=$nhfZXS6>Ha{FDEh6QW9 zx%1C7rb*Zizi2qIdoo|s=zB$MzbJiJdT!m>ms|^LqY3+tgX;~(_I96{-w=BTDRa6( zhMYTV5WAJNad=|Z1*BCl?Cp-+xb#Lp$)6(6S>4T>uqj1qeRM;6%B1TFk~q#>|J5*Q zcEU?HeB8^7`9m&M^_r5>){U=!vMj72YPm+uX&2Y-czg*aqpxGEW&*y91@@EK48~&5 zoSs>Q?rx!PjHlFuT_qY?AL$&KyM{C-z()9bOa#V$YtizO89xrAX>f%l%zg`gY2` z=AFKTvzzqUiLM9pmV@<5`}X|q4z1dTy{k4$NGx2&JOyZm$7X7I*P+3_ub+#jwFqBD zPn@9TQ_W91j$h$T+jIvPD3E9U<4XwiM4wpq9XG7vJZaIfiO;&gLeN3o=clh$xn;ot zOyU`2#Z?R4z&$bFHk;WWMN}MB)+P`2nNz3ofK(9_f0Ro?OthLlHAK{zKd04NC*gF! zb28u1S0X*Js{Ii`J=3V}!_%j0;Z68%J9qA$%Q;bG3GL?+a7`O;yxu1#akFO)>%E~I zmyq}GWiOmzo-zI9^BKbzHq-Rgn?uyJNkLgzSwllZO%13*EiEmNH-59&e8>~s_w6YJ zz0mn~aWWmx(;hiv!LDCHcbjT<8N%n&j0(TJj9@iy*8XHvcj#^bh%=(1qEu8=z;hLr z-WQLbu%6wKKm?1Z`wSMm(KsaCGzpf$#U?oUP$!wPFHb~SS~}mn$tm$A4D|R4cF5ZQ zEPWGnqHbHq^aDdK{yaDBZ+x>5O|>#idHMP4dE8;}T`J)K<~htqVb;U#Y`f?+Pf7WZ zTcsmCaofXXylA-jLAG7qC&MbzYf$Oz{!J!MUzW-K>4_QE(LVF-JC*b3YucbKw`^mQ z;Owel!KIb^cn|cHxbid1hg6Er;)bvz5}!pAtbVkY$RuUQ-)cV6PPH0C7NZ_+pWS03 zh$Njb_D}p-&R(S(Tx01#GMo!X{4GdBMzX(?Pg3vwI(3emWK7 z`I5>hIE(u-4vCl7^ih6(XgcJl*Z|5$By5Q6`7O;QtSbK|!EMvU15(cQl2@*$ zb{-_GT)DC}!>skAA!`SF3Y_&PpCMcAm~DY4{bpxUYsq2G75X_lxX5p@Q+qpzq?g!S z!Ef?>^5)#jpR*#@zT*&QaTycI#Nz{}v+9P=Z<>$XzEt;bt*aA?SY=z#qv ztSG@Ua5>xgIW161%TN}s&TRMoX}{vHMZrZA$%Knw*^N+Eq{gM~%}77+{4HO_F(<6X za~;bZ4#u-B$Yq)e{J7s|cM9EdRRs$*Rm^(et7UZtH^-}rAFfy2(1=< zH8eG|VKLvoe}9h2id~?bm8JQysf@_skYeh~IVLoQ(H>=57ubTyp^6F{8yh?rKJOh% z7lA|#0>+QZWq>4kc&xe`4D8IFR*Epu+Q0;fA%d(Uwhk8m0wE{ zZ}2>nLX#M8^*`F6m8;>Cn_0^{|I>hRqo}1DeRRFEycMP)3?;KVd~~5lAC;Ts5iGwv zqs<%4=Rf8^=KaaDk`_85nMzo*o2nz21U9_j#A;z|Yz%(SCMHFg&1WxKr_#u#|6WX*xLq!F2O;=_B0w0rq$XcUqij&ZCT|0Z_JkF4+Fl@2BB>#8wn|JTtff!{CVZHmU z$p^UoE{}iRpbj-J{1vU9H*i}|O|A4w6E$x;+#R?~!OSeNs_%dM+?NJ?2d5a(<~YU3 z*tqb2`hGJ!!TSoOsd8lAZR1;Mse~r%oL?Q_lmeUG-fs2!SQ5}bg;(1Dh~{la?wMWj zZ2pFngdtlaemlg;wz?BZ?=7R~V~6;sh$r0umymGQJ&L3Ku;xWI% z*wJe(@do8?r}fUo9j;E)8gEK1vKub-&K2Iv3_SidyAF@jqXfh+od+zNjQCUo>f`^htZVDz)_iDLp`x%fS2iLISMCrZlKP`~kr3-jgL4wY@@rq_s1sl}af znb~Vp=fl}9AXpXNIsYp}P(JwNem6+nCPdms_liuZ?b#;ZjBQo?GUH-nDR^7A`1|dP zt#pqZ?68GPbEk(#qGsTsC**F=sIS#B7Jc>t-^yRUWS^`q%e4Ob%y~}a@Xp_J6BG9j zeIBS2K7JwH!RY`G)_f`{|B}@x5IbL*l{KlE;&*FBQ+e{PhMlpR9nYea6LqO0Z_nfJ zOq}#$$K05EpL>u+P^G^Ub5M|U+ROIjs11!0)v_&gj#k7Ku!Gff;@55?{!dx=jOxCw z2vt&h6F+#g)J;i=DidX%pG6oJO?hw~rxS5S`;q)tTcv29#tTB`ZF@A?mh+T9aJx!< zzxv>oE7zow=H~jyd9PiiUNrf5eNebGMQ>0edCp~>)A9=m@qcM$28*bXO-YSMDCd0$ ziJ0s=_WXT0sd>V*5MWS&wtV!pYmV(`D|*M1A^| z%a&t(q_qKg;9tJegGbbAcwdvBO^IEL9uo4-`{ z=AGW&d(6dd9A_L-E*5TmLi2y7kO9 zUoU8Nqz~B-3>XEgmiUqa%x@0Vo^H_+xBDqmK3*x#k#0n4{>Y<|5J_LQbA#;7_rfgW z%Zzd-JJ0o=u-a~9En-+X9D2g=>6@qBE!dAoFSqnl{XMP(Jb)jP`A$B;|NeG=#bxcl zi~IOY0-85r$ByY7p5-eyFo>_m9$V*h@}kn=Iy}kNN=z%L=0)I*TpJGK>&K*Qzo*HL zmz;8Y-kW6|Efq_t&EM&)!7(NJ^BadE3r7ED5dI!j(Vpe0$K`~NW80qNCoN)!IL#I6 zy(2m=9H>b1|NGg(&U~*Fu>Qph|4f0@yA;{HkPJ>~=Vs2@_%G2kl{XJ=OGM_$)y29@ zJg51`JXUGAt#t8D&-bU>F$2-$v8adVHILoAV_>tzCgi+da_oTbmZKjt8dwMH)5EnJ z^1Pni)6|OZ86;XqZp=Qrb0>bN-NP=d^zb#@P=zQ~)wC|bhr;sHB_>H1L>Y0KJP&Tt zPMqx=`pQ*4dD_;CNI#OXm)ic3M;<5p*8`hPBz4fro)@hm`0ivraA9TMv)UQ9k8h zE-^fJpRK}bj*;9AZGqY13MVhRS?p|GYujG5;c51W%rk+#h^oZNZ(PUDG|p_8FyqO8rGWo7AKo4MEMc6sl4 zY5kI>xV+Ei0bS&X$+LU)$s5?Eb07|5U9OuLOhH+5_cpV6PjImpSIwxO-{*Kc+Fk8g z9vv2qYOlv1)@&J{Hn81O@lH=JD@i@_5%gT(k3CT-f*X zgqWGv8t&ioZ)~5u^Y{bZHYO^{820|@Evdt`y|3FTw{_MM*1h2mWB)o_n`3PB>OH2P z-&z0QH_0$~2&;$MY(y*`jFvmu>wUMDI$4yMBX(GImn2`##%BK3JI*)XgsVPGuX*x1 z?-Q~-lBSN_7U8~)&kq_Q{y4qEF?v8b-?5Y~7b5c2qS9jg=LpA!P+|PO5=gIr~30=0(pWSsxVJfb<93#ScW9+R)4KBv* zVPSt}c%i%dh0=Lv<4tLD>WZCOGD=*x<+=*0l@F!~vv8@~+0K8Mdacif4UYU&{}~~~I^Y^X0GsLg><_Mf z*K?Sr&!1V;O1nn$Bg@OTy6XU^={+GgPp6liLOP9n{Df;cfm2vcF$Nz#Nu(J zcMl(*F#e;x9xWmGJJl31((lQEo!Bw`kYMk8oYNuUsZluAz`?-?PD|) z1Uqo-lq_FdMitQ^Qsg)5FmV6N{UhJuipzE3;R@IE6URyVtZ!_0Y~BUaHj070`+WPC z#SlYmWQoPO;kR6Z)@)IEop6w+3O6rxs(@#ws^61AU5Gx;@-V)nw*B6h*|+z+el$>% z`_`NtEP(VK`npPjv)H67L@#SD#=+x|B|BycyED9^B)QD|tPn@+Z<$*m7G>+h^=>i- zc0!V14{^O*OG)^7*HdEeg+6tsuaOVxtUU1z0|}FZn>evA0lpbX#sC^`u~O|5s{5T| zhjs4}{ynr$1-o70r9q)uxA9Xd>ijoE^WEVv?cCdsUec>I9HMP_SQsUT+tpR|^l0z& zI_u^gV_a9ahfPX}Tfyv~J*ibhp?ujXM{x$#oY*fao5?In-1e2=ASPBJI(O=1poS4x zm{k`45#jYuD0>qs$}xSi8FtjOS%M{el@X}^TnH07Y4v)g^;|yy58e;N*;H-Du-Gmx;Z!@Pf?r_FE$eYaI=)R+QvZ zB3MSd923NN+nLF+Fhj%3H!_}pV<#-gIy3df^6vNqJG;A+Ry|^Vw$YpaG5T;@L}!UcuzCQUR|P)SX;(07^2)2LD{tz3C+Y}SaJV*h zjD=lJ@tasDu8$cVC<_+Z@ zeBi`leNBM6YQc0lXo<;Bl%rYJkol|U3m=pe@!50uFuRpxE61G4;UQSF9JJ-EyVW0pC;9g^ z?;u5}YFDL^x~Xuyl(CxKGAB^o>Snl{^`;*`bQd$hXsO*?KH%s$k$nr3b)-|)M=zy9 z7W3|`4iD9)*MlFYfgHMX2U8bY#x4GkRtQcd-Mn_Lg~yVokSTi-37Z2}80DB%uK{FL z6wiUWTGp%P9~Au=KkIJxqa}z`+`XV~2T&O_P9%WpAd1{e5^BZg*?bTm1=2XWfVF&U zY~M+@T<79+4zC}Q0r2<8Q}FlT?UhFGMllW|CdNuTAS7bEzKhj@ dyrBHQmOr407Q9u@@75cK4+KX9{fan#?q7-@6(a@nkI=EnH39EzB8AeHg6W-JOLvIGpWG9Nj$}?AhI%JY3Bz zES+4fEZ7|_+=Z;PXz}naqJ1@Ay8r7LJUs8zUTw4^>e-Eenl|z9KiA|4Uq2WizZYv5 zUCo@W1`Eh%u`9d69X(!!4Gu0i_4{G`ov-b=LU6`vnpywqQ#YLqTGmRB*2jyJ>hCzE z{0b#v#Hhn);^=<-WPQriLKr?G zf^jspL|FD70%pe7`E5N0m9bP#E}6x1Xjuju9mP;tkQ5aH*=l%=`L-i)goz_WKh`2IWMTMut|Gr+lvX zmE9|aMP$NA#+wdXExLT@YnEZqBZ&6%=+|P}aT7Yj%t)O|)P1LiYDd`-yTge#HrnIn zbW#OB2nA`Gj>eZCqR|Esb(yM<1Ot3>>}y$j3+{W41gS5Gv%GH;ZXoZxE9V@9V4!}u zNStFN`!RIEXJmZ#fzB9zNNsCfTWb|bIiFE6D+ z&OZrgZ&UL56(w~+EG^^hJCb?-!tlv2J8VKtIJ*>uTM*A;M4PCq z?Q-6_vNXy0{nWn3xDEdvbAH8ECtBi%y!ADbc^b`v-7cMz9XkKUKi^&I)LDH)`UTiH zyKGb}PoK^btk$&|3^v?iRQppMHcR_!OoA%*K|sP}ZukJ0br(q?S)Lg{Ne zJ!*L&t)Z$xA)g^hT{Vk?2!+H1Drd zZ0fVyn8@gFEoLTl^?OH&Ce}$?#@E?eTdL`7S=Ch)>)PHl9PRC~i@!A`*WJsPq5c(8 z`}Ka9qIJ=yUm8ieGKwXzvY#JJ<({pbX$+@L1KnQ`!3%x5*f?FKY<8aB|!4g{o#> zAr~Z1U|cc2i$gj11kc^|{kNY(ACbgKD0}bP5L2-j!+9+UX6tgBI6XXcbjH8Exe`e) zZfIZ-ur~4Zd`>DBya?*&?v}&N^Esn#Zf;>=VS8(nti^r~4s0RGGrX6HDPDCHx&1m? z2*?ZL$DHk6lNL}4oaUBTa)3Bv_M3(+JG~EL4SqjTKI%C0x7c3j9UmVbs;3w6{!(9$ zN+9A}e457vWI$Cg+NPZ{-MM}i_WgC4?A;`dqphtG<)yQ+@_BV%naSu%v&fb&W*cqC z84+yasHP_MKmF6H{;^`VhmUKTgVjFVyDpt&k);FzNE!A8stjDy$_c%qb7A8H6Nn zrw-T7shCCQ9{lr;O4!y9ON9zHcbz4wYs)bvw3ayqLTt3t`194v^`#ng^w7uHuB z=a0iL31P;EgHPwu>I%#~LTrxuI|8aUy;h$kdzL;vf0l}6))2QC@MPX%;Az#IN>eSO zz+}yo9#i3a5b(pdXtbgzc}{wGZF}+QHyfoyHdbmV5#nOn4RAIT*VC>2mVPG1>zV%e z`X1@C!ljw)8AEByA=KaN|J_fUdPhL=ZkpSPd|LSj-5u-IBma<7QuVtz12`9p5?* zYN_EgJ&p>sT6qAW736AJA1j@jGocWuh&F-0f_X}iPx9t?4!YLpIp&`d-S40-&CIt^ z^*=tjt++QZV5peTr!HMlwwaZek$&sWod@^uw@SHBe|LWGj9I@R?> zv_bTiySikV$n|-!2oYIAL?D03>NlgPl8`99>MGPl^dx0+$M*&+9@+)jdZH2n11=Ya?@rUlw%*IQ~wjFFWZ(HMqpR9JpJi3Nl$+Bg9G*?(~`kA=l}XlE6Z#9mKARTz3)Y~()vvd0VNo3CF_ zhAYN9>@2zMz9&P+&0eGIVsAO--iu##j#({+YjhQ@)7o~QZmAJw=9Mr(XtCDglBycr zIQ?)XK^`8cp+{5*tD?|Neg;ZC5zn9BwrJ|2Q^o9rUbj$5a*gB-d~;cvoV;j;VW=`? zR%BF^&7*j5DrwzZ-bK*RE8@1I7n6H;Y@r=*;xjMY83s&2T6C)$XOhd-9zYr#FLLa* z13x+c6cJQ^*X>uECx%aR*EI~Qpq?S7y|~akIH7;9b!*fx>v5UgD2>k5D^azm-DW=eky(ImQi+?eo_rK^k_JM=&X-Cvo1BiRzpCarOf{ z?)WI--*R@MU0(87CzlEL;_yR}5+WRdA2w$6pu>6Ky?IHG@_EmPs^(^MvV??X;MB4$ zbnD&YeN{C&`ctX%CT^sPo!qnej&y7}IWi)a%V1hnymu)5K{X-!D%)*f zYRI70sRqx~{G|!+Q56e|bWP7yPhwu|4R*)P1F^$cfh0@|D`6cqnvPX;FNVnE0~KJ%kc@;fvl; zv6*{kwj^46v-batx%W88-my&VR-2vs<4vD)1_I?Hr6 zUX7AnklW2WmT8GiSF3q|!G#Pe5w0^UGx=Sm;Wf;8_#8v4&YfsjRpoG;4N~3~TJ3h? zz<M>oTKA|_sepAz0sb*z|SoWG75rF zRa8W75&q#=PeVh*7QqG_*JY9qk2$7|zpzg-&kq9?*YSS#OdCZEmMo#Xafjub8+^tH zD&#mpca2hv4L|I*ve&6=81WMWp~5N;Ba_*#cki6h4>{Q#d#W8uyPp9kF{HrE_#?bd zdBn8pe9Wa`dRzPBcOPDz>k$>pYgq6!wJqe6H0lAhwPT5MK9|NqG^F&MwN=f_`a*3Z z1flzAj&KffH!tEMfwFK&@>j^Yn>vSk^QVcKsHd7u(>+t*@tdzc}`o_U6s>q=V#YjgTT%W}D<=EtzE% zc?XGsN)0$+3Ct8u#j>9U^K>jSJo&XTt48j6E;?tLn7E5Lkh3k>)$+EwlI(mdGrSL3 zFM$ZMEPnENhsc`$eEqXdP=yyn#1yqaZ3spogx_U}<<&Zy+mUiln?b-l{%Z{T7D6ZHDkR3CEibaQ+Q*%RB z(qk!#!9k9d;?|Jc*6#+Z{y-&NN*Z)6keG>T%q{}4n3Z)!Y;R(&zHoS9owJWNvw^8P z<-xF=q@bx(B`UGlpaF9Ex05CUkA~+W&XV*cnK(5$6JP}-ZxH*o4kg0n1~fc;G_>O7 zFY9S5)A}k)RroD!X>*A|h&4h-f$95kUX6$kssN5=7m8{*R8J1k*`)5bCY7_0WXED(y(&@Pl8tQM{rcJ+y+hB=XIXFayi!BKjmb#rwivK~ zTk&atpKKy6c+3qwXsvs$X4s&Z5#+YSRcx~d#1tBNr@t!t%PowN1qaI{7ZMciLy5$! z;mtQul?;xQnlwD;s-C#WJJO5&9G+mr7;vbcmXVSBQK!)xdW?6C1jD4kuFGpc9ZzrB zMF3Kdtr(fWZR?|c?!Lqe1Zd_tLMOkh0JeF&XE!6V;R|qrWtq(DhAN_`gf+7VG z^#Rru3)CkRC}1d(@Yk;ndo;G5I5xhSH7}l!NAvML&!e4T4FqAE`Uke5G-OZB!tdZ~@n_)MV*lBA z0sb%GFKtmoen6BI zbCncW8B<_MhjKqU6@8sHM*^k=O0f?)HHrB0%6y2cWFu!jd{Dmck1`CnPQu6Ts3aj_ zuQ7aA?d$&jQExm9HE{kPQ#SU^|ezCymOkW>u|EBZFi zWjED$rxZj!!1>r*h#8y!hnHoA6+In)$WPXIul!vE39vQ|cvF2IInD#4`qG%v@*TlK z|F!4%2mY<+g9q_wT3PP->sC6T4I@9$Ff9IlcXMznORa_s)(Ql*9|9Y48LA9;K!}4N zAvBU0h~zGKI$tAbbKGj2`-?5oe|M045;=|r^%qV}Ar((odiis$g<;(!t9pe!f3;yv zT1!ZYS04shyrgE%=z0&+U)8>g{i5t(8C_|Oy6#`!Z)FCK_ZGox*sZtuBuM`Cv9U67 zBXC^QpIXd1CUV1xuU{8(f4n^uN$qdd7w$IoTO{MQGD#U4Wa61B47s%A<{-;h_}wY- z=KOF8o*Tmn@?0L#0(i;GSNO)4g!}2!z_BkY8U9p2_*TsTP z`(NmAEcB})a;1mq-ot{$X>9fS{L)wMUnQeqxiq}@eZMPp6Xtd5&xz4f^&rv8ELbK5 z$eNySIli`*Yc1_KT|VsL=7`?*ea0gsf_%jh>NT$bK*n%Hl3xHtz~|*X{}z>behu$^ zEd>^j-6WR$mh6()>7C`UJG~J|jnb_uXL+qYbF@>ZOQ47%#n4{O zk&g0}Dt>Xt6J4pBEF=?{e)-Ck5GXd9dHzZvcWO+9e$ulYl@FDO>o1)Cc)t8DI$f&4 zqO;4A??6t`#8zPjr?N#~t8#=5u+5m_G0I5UaV*oOZokla-E*$S|D(N@8J0c&%*nC<_f;<_Z7KB?bV!)*3&LD!j?y3F3bKf zVR{eq^ws4p{TT;^Nn9DZ*b(W0USY`ZyI-N#d%tBS$DD=IIJS_o!K+k*BTGgGE9~o@ z(nN%+Hp<2f73Efrj%zd}XMS|&7MUp3AojqZLJ|V~4x=)5ZXCElp!RxKk z&IM+xO1{|dGg4z zeBzPa!`>^812Feg#9Xr*ctcBVW4mgan=2zO_4AwvA+1o2CD$I!dO!>G9@}55ErFqg z7tz*m2l1MK+_S1RX#N^p?}AX#qX-gYZFCzBLn;L%7}dPV-t1*eD#|pCL4<>H*YeNF zhEZu*((w0nYJUBwBqCieo9iT6DAe1xW4XDaL#b^)rq}&Vw*6cB7SsLoeNLUghJjVS zka@_Y+~5pTs)O31*i+w*PR`e`k=I|+x2$REc3MxZDJF8i7S_~oyq1O*8$Z+aYW-bE zL0&_tO2yNSIeMV>Jk)q|M?X z_{geG^OE`n8rp&;-=WW$o-gHn^I?ZFO!?{3uB$JK644#2PSurjAHPGs>3gY92v*Gh zXgXG^*&`?rmulEqX@hWSYE0hUZ6X`OKPgp4_O^$WD<7o53Jj*D)TcqbM7OwJ!B7)Z z^dJ-KbX!>hNQZ}wc2$jRN(0j@SH8;|!<*eD4zsV_6dThS{!=3sCY#B{T7eNUdJ1dkf(a(Og9zr|%H=^^aL2qgkV z{=eA)0^%`jGp-1=-pE_*PIX+daC=jYHmFz#juwj zx2;DNs$<nr{|oEoBA zGG{gBkWHlZI@ntsr=lKL#W3eI&{rCbGrUhkLv2$Ni6t+o3RvF#!d1HByZZX%bBRPj zc5LiZbw)Ym2jZX2%w!8955m$FCJPW>)Jbipr`gU}B40k8ZchFN2~ZXGM?yqBvnTY( zq06n%f)o`+0qtb|xkQIQ|2itRtfo6AmpfbBgNiQ^xK}OzEd4*Thg>hZWZ$slQP!Fh z)aQr;_55a;jz)?Y^A~it+MBX-Av}yitB(2To9Z=@Fidqm?K;+rE71#t+PEi?+^gn6 zs-Y(W^L7e3#=@`dtz0(O5dZ`P6leCZ#*O}r5)dA4UUr0G0tZe!6&B>izaMOqA(=f$ z->$`89)(Q)NwVAUNDYG3VSMZ+Rz_;A3aOJl1%wutA0AasvSB@Ik*oDqAJLS34mF;K zJE4dPh9UE=7DL|xWBBQ|U{$G@!C(K2^?gEf22bX^*Rfp8Qy)NF{4l0V*MUOn`K?=~ zY3q*peqmBxa}G7V`#&;crlq|M{hYd7a$EAAQ^d!|-}!gSR0RLyfZ>rrVPZ`R6R5kK z@O1K98;beSqDyy&Ky6wvhP&JTtiM#2tlyJ(0hgudMVGHNzBn>GAGArJjy zz_uK?(EJ27M#iIYKighIQf2gy9?b5y%Doqx)&7(TQ~Ya6L8^1mI*^xFI)EHVR+iL% zt?ow}uV&c|3sT`mXaU+}*jD^cps?S%e~svkzQz@O;f{#(y30mC*d)2Nb%~ive9UTA+CXawgUMY0CzuTd4NfIL{xvQii$^FO_K2Qc- z_K%dtFC500SqqQD)oR{(Q`|1mJW>-HpBzeuEn~mgXTgM4IhF<>lMNMF!Y-hmyBzJ* za&j4C5Xkiu{O}+xHEQ_@g9%m8>vL+A-0s+bsBP06i zP|OiwugZoz-oj9h7!a33LJX+y2F78RXi9~(I5pi2C7GtLL5VEuy4S&~JiI4rg^Sj% zu3)(;)9m-wJO&5x%}gC*FL&gu#bejD(2k85g$TVDk6Oa2Te87fI`AUoW7eYSm}nV# z4Q-dVL1j5hET8)3&5{YCgcj~1vDMPP!f>xvh{N6+MXv*(r{L#bEnW4<^T>_-(yCW; zSyZp-JH(?U*i##gW>cZDZJ3^~LFpch-=^UE4F7`xPQm z@^^ULuy^~1ItQKFRux7n7Vk1xJ;F?lPRys=d4J=vLn+Oq7_>S@{yapPId`mh)*vuf z^BXp+S@|4w(vI0tr_rcpDelrCMk&nh`~U=lq$|H;@eHLnu2?Sr{CWIfmXoM6gF%|* zT*q1cT77dl!2xS)eRnGN{N2!gob-VbulheF;8sVgtlYgi$xLFa9xCdV<|Fx8$i7s# zm?z+Egx@61p8YP7B=Up^W#HgcBWMF)$Vaqjj(rV3sAZ(Gb>(f5shsYfZ%t29E5oLG zxp*D>U3jIQ14`8F_9zFGv_Q4U@wf8M0h&F9zbUiV#`dFl#&7BS`ZTR+$X_;bnk_MO9&AP+ zn%KNLAO^jwpkyX)D4D|uvZ{};Td5s=sU7u`hAAnm;oBm$dt1Jf>1oxo&a)0&ntRm~ zFj#%mIR1GCYz>lmg1oY8&}Gj4H+~$YUPBN%_6aRXHje2VbfRC_^wxmh|HKV%M9Z=y z@=qPt9z@OumRvvMXQsMW(i*$hooCtOVnzM<0T$HoQ>d$%2b#!lnF`WAVH%zxpBrx@ zsX-BDxRy*6x1r19w3fo zK@}0y|CZ=#G^`+PJ2?2sWl^l0>g?+sP~CH`3t_;W&)cB1j)F%z7B-klFGkD~BzIk)C!_1CYCt~*sg|J@WmrGLj?0yVPFT-z)El4q|?Pt?{B?k-lq?!y0ybh+1& z1t7I!6i$5U|9Sx&08T>0aqf2)6c(T3DxkP39?+j4YTN;2{ObTtCLrj|r#9(s?LbG) zA6Na)<4KQ|65oTO0tnu6|9iUczrP&~041%R3bBa=hO6{`uOdOIa?++Kg+W5VC-_^Q zdFG!1B_KTSdakU}`~L_Cl*K*h=?0b4XMTeRtoQzT04Ko#!eohFP~XohbKDc2{7tA5 zsPOQEXTKC!aVQOskm9Yss=Fa7S@plKKR1)Hk`LbC0(A;Env`w!7Kbf@In_?7L6@Y* zI3dneP5#wMj293unbe<$s+^~>O5}72|Lp9~lTNz^LVCONa@yUzhd=o*89Kq;<9}C8 zz6G}x|5Y?pu#z`KEAPMjr(j~92@ckZB(5)dVd;tde?)xI>tN~cn2p`4ptRyw3X2vX zk-QYN$}IiQ2&&BIHIUtBFNh+2c+Oo9!EY+(U7}eY%Bj%R+XI2rU2J7Mk*lVY zCha)*uOGWPdVb+?QIbh8LU@~(UvG-n43t?E7N&eL0hf}X4aYMrP_W~_q9tp#f~Um^ z^o~IyWQAG`Lya!rRw+A3_`MWd&Gg#X_ca{jzSolUs5RVWKl|0rlX@Kd4;klgRZO@p zFIIjl_vx@kmuEK53LUul&~Z{9;yqaDp=^VEizLPga{XU|6j8ynmig|6wT8+@IS_$* zkfN^+Sj<0V)&TC@JZW4LLI~;b~p5lVqgAgX}v-uYu`zJwR<1t@;=FkSa$3M7A z-vAC0lnH?;^ucz2c~v!tiwXMq3@Q&#os9}>O0w;@kOpn%5&5OD{COg8CbR$Y{D4sn zDszCjWFGN$G~|vTF@=6Z_TlXQ)1b!9O^2qncnzx+3a?SEJM&%zd!F$RtWkCQQZOuL zIB{bcC>FDd14lQ8=X9=)Rn>fTKV!p1uRd~&?5sRPz;bE|ch)~Z-L2V|MqpIn*kLh{ ziAY1+$!BRd;_RRcS{G)4d^_5PA^}9I+V&vh7q%*6V`NLxTkDUuwJmlVS%uw|w#T>i z9q)nCD}$0ZjU6*9YjBMjU-f>|2B zLo5#b_SWJk<`)(mFO*7LPr4)c)Zg`not+(*CTIN|M_l*#n~Y(jA@tnO!qRdw`#E#0 zF5x<60`(E90DIVY$UCBLV8B)==)GaAqoc$1pV_I0NqA5lAEUEW2XfC+B{}Kp`nc$h>J6XmE>erG z`#W{&&El)@&uE@{l4)`tht?t97%xW$ug8cP{5&oY)r$Qq%v8atXDN;t z*xERByZ+%=qA-6McsiVAEKZx@g8aj_M7>JJ%6&z-^5}S?KjWuU0-Bgr@Nf$D?bEj)%(d(FW1NTCxwuX?&e5YtYpM(+6*0r`^SRrS$^qx%sQN zS;d6iy~F0Oyq|7yOj*s8M4$d)OVdNs5%KMR#WDKu5|f98v5yl}3bz&Jo#qRQyIQQe zNGsfm9yia0=6P7m6h>ScZ(8?gIAXxVGlt_Cw~eHGG5a1y(od5-45E1Aw8NI0F-wFSQc$HKahJE}-LV_r%om(lfuop@E;$ z*5~V!YNdQMPKbw*1GXI^pd{KjL+5$voQ`RH8|YOG8GrQ_rg#-lHHj(kzCKA6-S_ca z_v?%yIwx)>zT=q>tub`*GH6%(`|U2op03M(DEk8)O&?YV%)1cHa6Bz(h@6I znyuu0iLRG*jgdOQ^07~ZFW})xUZP9xPv@xjL7h@}4`_f}b=+qaf0lq@j!~TRLqT3O zILx8F_=OdGgBwrj+vvFPAb7SHzbO|Fk5dy5{MrtL_dxy<`1Qh9i=X^R7ewspv~T<5 z;zzAA*7Jw(Dj)~gk>GEnd>NwsC4h_y=IW7XFEwPN7R}0mMHyTG+d34ZEKtEH>njau z;B8lc6_fTTSO7_ai<`chM|NBHO~sAU`%3UQ#eww$GhWzx00g$&&6^?Ga&jmu*G&qf z4yvBP@z4&sRx+`qmJeWD+(+(W#gr{*kLij(%$1DhDd0$fhO{MZwbJ(+bOyfi%%0=M zdM!ybfv15A*V@4iPN~VXM3W<)y8f2)_THy;YMjMyXaaIuynB8|*NKtYTK#8^V^d9j zt>3>_+mHXkKWfZ@aXqqM1;eHW{vF5?PRU{Df4G$QCP?b?ZzCklwEwp9X8WsGcosdt zeM?A4Jh|PS6)A=q7%*l^m=4%8@b_=@r=g*#w;$*BXe2&9-D_D#*Eks)&kVpM#ZULm z{Gf-kq4e0L;l($kVgrHZ4MS0kvbGdhM83xC?Cdf&@xyZCP~*F?Yg&1N_!n^J{ z(XoTyPvD;0n+`fVTqz}`wdF9+MS5s44<{i#crM$ndReFxzt8D3?&vybNe4wW44uto7;Q_JWgJE zvL?KJIGqzeVz4LL5Y)k~A_2}6nNkb!VBxqQABy}esaCGMWGGohlTVY4^k!|)ayqe7 z*7PdFcIZV+H2S!$Pc?OSb*1Vo(Vy-A`1wGW?sg-et`coX5sp^0e)+<=E0lxe)5Mqu9D=NN`qc7^5}@trd@h6*cw5QI zy*u`(RX6!F0OM#mc9oK91448<6*UT~jgGAZs!=cs5#oA}4jQO~7Kn)Zu1(ZIq0q6h zmw-lx>NE`4MG}%de0=?|ozkf7#}7T_huujcKCZ2iN=e-9B$5(lSq4HCY&kjFc1s z0fCgPY+=(9veW!2AtB-G*RSv0yVo7h#^bH~)w}g0o$&3ucbokWT;IeI0m5`w1kh(N z_@rQzp(f}4Jt4W*s$263Mju{y{$Y8jn%?NK^1_o3XBZfH;MO{$uZqd2tE)>&woad& zdSQky(mj`vf%4CipF459!y^4UZeCvA$B!S2i5d2CBG+C%iEj=#T0_@F(p|^-w^x*s zvaTAN2U|O4-BV`gE&wkuBD#!T={s-)u1DdeY@%&{-3Xu-Tnh?l5d^MxCyoHDQaGvp zL)LC6>o_SOtm9%cwymY-eEq@^PO2lnNxeS{mBt!epkjH}?7}yH#)^5$y#L=c$QV zi>OX($t#t&_8PdczN_ks1UzimYcjH0HwW+IM#%k~1KR}9bqwdEa9-y8Q#`x@{=Qgx zM-WC%{!;Q2Lz>=)b|9b2=JP~~%gf7jZ^ z8aIV890RRnMVAKs1|e>Y!kFdf*t+y;pz|64k!AG22O~VqoPL?w7>wb>g68#i;a({h zULn;k$2n+(=jW1N)&PM3e1&uMA=S>uy==0#i%rgD?=Y{fXg5ou;S)d(LN-o+bwET_ zUTS-Q#sB96W@|A&F>nR|dYWjTHx;~sC5M0*yh^~y_K(NKZ5bCvHl&kn`?m;gf<=Rc z-!Ir!2y;yDaFaUU@~ih_Tq3hLTe||}`;EDet`J1(XR;dFF1$7U`v9YWcn2`ktK{gD zAxq~K5S6C5;rtz^L>n(y2ht}87%S0F{&@_7zNCkc7VVD}X}Aa;_|k1O zvJk+zrM-G(<2H~I{++})U$EQr`JQKtgiX(5se^KlIU6nljA!24Y9jAYlijiuNba|ZuVZG#%Tw*;b(cC*ax|$ zp`8#HMGmJcnHFc--SLZOM@=C&=ORT! z8nE;8#VJE)Wft8zJK^9V;Al{@tj}UrCUD%Eo`dyw0Ei%LZ>d~@hzdy4epOyxGWB{O&%H6_nN{T+kk8+^?62;3iDoQtJfF_ z%og^pf5GR{G+QRs4oqu`IiDoxH^>+F`d@Ygeg)Sabs1;td%B{2sCKcl9i?SO=7Z6X zZlzu=PrA(9szz7A@=6DCrUT|x}-mhj0^FE*EN8m{c;8uoftNm?UXde>&+qpnK zsHuhosNgAJHUhHK)_P}5NX(;YI&yPD;#Gdn)qXqM4<3urxDwvId!iPz|8yy^GNvSB z!No2IBOn#C83-vh+@7O$5CB9LoLvH6NhgiJRvO?_hlc9z9d-qO+oLVb^jc^A@L?+c z!p+_!o;mkYXjhA>2$vyWJ76_T0=m83O7p_Phu^=w2bCx5Za?v6;rBELR})I{iGuP5 zwLRQ6Lk8uQfNaHOpeI=+khPYJBMzP$Pk}5Wk8y2u{IsZKQ|#%RW~$Omz!nFrz2)Wm zZ;p1iO7}C@R)6J-`Vy@4A@ll?fFPj_<wI_xav(Q%+U`FSOEC_$zbL*E`(S> zNndY37U;-;&X=ye$?h`mVk~$8Yl8ZM{(wn91mIbAWPzd#p(uZ>zslIl=wBCr$9M%3 z^1be{(Z(+tT7O6fD*PQkri}pKL;6TU!k2~d&6_tW8B-?x5DV^Ip8>$;fd$&~jYc!| zD<$5@`uS&b?kr0Tuw?Ft`)o}D@|KP%xaPDcK~9C&r+k1_&l#Lb_P2Zslg6pLi!Kcf z(-KI<)1GcL4Y&`s__@`8>FR4AWms{xniXgCZl|sNku_tc7QF3 z#n4upw(IX|6xNZChQGU-zzo=gT_0kjA|mLS03n>FojZX&jS0WnZZJ*FOM?sH$%{B_ zjRVGMWNVYdei48?tEV8*gDp-QiyOlox!q#+jSJWZ&3W z6nD>S{zzD0g;_sHaQ+VQ5M;9&-DYsiEu*G$R(7>buWtfz6_Z^z_8wUa>iFZ+7R0}K z^}puUod8QW%f7%~1HcIqqtt96+!n5ki<^xvb`OsO2{uUiOoT_%-gO-R(#cSW5cinv zpFErmAhd94u^cslJ3;_xwEA-a2>m$-n~uQr7b+IVDR3_;&P|d2co<6@Q0s%a9O{dT z{)fb!6lM^L>+XXdk{OlsF=y1jI(Y@FF3rX@qJ5sO)!<;SD$E<#0ethSfS+yUwMJQa zIIA&+pGkQbG6!UHBCh*9^w<^#E{HWe)%$nnZKPz^?%6QeU<&`po;g`;QFy^ z;s%vAXW^6|9Mhx69cowE32zPq4^wcBKe=L2?5{_{QNC~uh`4ef!K@n;7{U$+0tiK1 z^oM=`WH23K52s(h7{f*&95|(>S|)`a9&C}@co7CWQvnLQ`yW0;>X$MAar2XLx@ekx z*9qjlR?pBRfS#C;IhQVZGK@J>OVd_B5&*11x$S}Niyq_=#7E82EC+EEGIugu{chsl z?5`6_vkXqbOI$FafDqUsN3Q>JFet*fT&|pi_*@6owxNVGusB=GHXtvS0AN)PDi^gIZVgf)xmNdEouJKL!W7lPfO ze&#U&RG|YPQev%80FXfVqiq)KoLv``M8>IwLTyDZ1S{f@5miC|kftAYBiKt2`>+uw z8l0q$--MqG430urS+}9Ybt9%rVnKnKLPP0xwvPZmHtDWtFu1Dd=~7VZfd6a(sUXRBsr@gobm156cH6im5Lcy< zAicA_tpWn_k*k0Z;;C@ghRJ^lB7R4F5x{0RC^q1p5?$XyeeY;t zPrMcCEI3+0KU-${meXIxCn{aNyd*o@W=oqoUziE1u-^zXw6&Q8#SHWHE5LHEffVS$ zH?=*GF#L;~zJFtOQ8cIPSD+g}kqAwoCdp|aD?LJXT>My1N?XuAWFV39@&m*12&8V1 z6sM2_Lj#E(<7q&bo-Whb!VMZpD&KF++G_wk6ZI{?+TIqUIw%BX!0UC|RLilPqh2D?r}?gD6<*t99{cMG7)>Y zmR1lI*L@+*FKBQz5&Y&&Y*a>IG9TbNg~#F)i#r$G>ppun3N)JFl8*Upi*Vu<_6iWO z3=kla@Ys*-a$6{L-6>phNcj9Qis|Pw$2HnfgHbQQ48C$byO7oxgvEy?@UaI%=qy=h zwL$dd>2_Og3-s3ZaAwvO6gu#tBxr^LlK8_bNr55rBfA;<8XOGJ=S)HUv(wSo*rNET z&CNZImm9I3@jy9)u;a28pBw#K0mo#8e*?#~yUnEdE__v6|5%thQS^w*Z@3V~t`X0m z@-!&$*THtsRPAO9;N|NUtMPB;rrcM}n8L9`0YCfuOv6j%hl;nk0yf(pIT)1#`JX$v zG$(jw3V=ez#Cdrw1&xlpfSn{uPgfYO-{5HiIvsZ{)MjV30j);sA=#pak*Y1lvMjhf zL`qXDISFSGTU#l~y{pmsep)XhCtj zRnQ!IzH7kmU}ek*^zAy_4Krt8WAhoxZm_jeOb@`SS(F3h^rASq6#cbZwu`sIN*oJF zsR-%u6hq-G?g-CW?ncXAMN~TrZJqZfu~l;z(i|0X<83~1)u}U69Y{-nS?z{)XM&D9 zVDM7%pkh&ee9TLIFnyNB3p!M|T>*;%+?BeW9%oqmCQ}WkR2FeD^>g3(+S4;?9)7iu zyUC%gIes_2O20}KA8mFBsXsj;dDr5WHjO99UnJiJbEGRf-_(*%_PNjJ#PxWxOYjQI7Paiz(B^~?Ym-kw`p8V>? z`KmL|r6FnRtRn;aM?7Qp!GGK(i}#U*Ai;%=;LJ%%A1>sPpA+{6BQL2Xfp9oIZUddI z>$Z~YJRfe_GV)exFV=^kr$ZQejN$6KAX}9wz61~JcR@Aew=8kBcH z0!u*&tS@&4B)_1F?3yhKT96n38+jSj7&3Z{q}y?GGC$6!{|qX_cWlq+^#CKFeqjl2yoQy#YD!@rp5$it*w~_mL62~w|C_D*!=5cS3Jr8ke`N!v zzK+3P%JD9VmLyc4wZ3Q97~E!qtTO^$DA*+lPcwz3jknp4)%jbX$xx85;KW; z1+;5zJv=;wYaIMU`F0OL|7gGHUhUd)tk?7L5(Az5x2`_OsrWI+XeCBZVL+I(h1-p? z$6h#~rQpNEb-&<}{2mK<0p@*2{)b%907I-5(a{kJj^MJyYB;a34ZI+c1s{0FQO^fZ zg~VP6M^)ur-&2wT4qOd{o}>zZXCk5(?-p@9*8=N$Ms#385O4vW>xPfQC}2v;0fr-) zVi~&HHNzy5TI*XJh9Z()UBFvXvd1!yLYx5ii27i{EwWT+JKB}R1Gr!+788^m99J&_ z2_nK61T9?um`dLk$Ui`yP{~~&td7=T0X>z#1VR3l;H&U{x1()>>c#-y zv(DjS=mRYb^L!(WMKs4;u}unD2tzTqKc5f#`CT@mUK-PAWMsq=&6aX+y8sQa-$PzR zB4PWPTbzsiID=PyUUcJq>;8+)~4{c#W4J=MQPGy@K^qFlXl4j&|5OKw$vl7%cce^Gz?_8bOlGVhWO%Vli^ z;VF(#YF#Fxo{95I3i4=;t{}aAsHwVIYA-s^mr&z0H;1WPhGo5BXhFVeGLT8N;ECty zvD9REK`Ydt)R3tv0{1@!cmgWKF6ciIBLLo%u30~Ac#ZBr`RK@S@=KH79yBCGzG8F< zz{;3-_B14_UNxNU{{4yNVbH;|EY}XPP`FtOjCc8q(DVPsnd)gZ7t~aBWl%8)HKAd%XnH-SRPxj{`F%Dy)^nqh}su`qW`_GP|Fbc9C@8lPk ztELH2+`U`ks47fTux0Bm-;e@Lvx|kA_bR7tr07EH346$fD-u56rggtErI* zb&~Y3h6C@VZC6;G0t3x7rtmaJI7rYYn@6Prt^yN#|NjX4>aZyHt!)bf1XK`2Kva|t z0cnA)BO*O?gES1?4Hk@uh)M|vIFxjUw1N^tNeoEm01l<3^tT50IcLA$d#>->f9&hp z?3sCfxz@ezweIzt#;uoU<>%+;)ojVpMIGM!ny192ff38wiQS(w+Rr>4u)R9;dO!uv~iQslJBjV`Mqi(`nmoGoqUSH6hT*z_#{d1g-j*kB)WFX`u_J%!1 zy&8}{t0#$A@=f$RhNUV@!`D#Rf5rwYLN|aZ0z?>&3r41;NsfY~Fmt&Vx8Pcj#8vKu z&~|)_UoM}yWK=YL9e4ko1+a*0liGW&r3Crk8s(pX%zEcy?To%JMfVMNyD??a=y#&ZIG|bS7-Go8SZN} z5OMDE)I&%xtwxZ7u&OJ=kQ}F^V`M~>5f|QzQ80-Aj?(@fS=gYGY~Sg=(g?xO0qcW;3E|a;I+|wlVzj}3MbIvif z4$_TA{+sw`&z>#S?$^9-)DXIvujS$|NS4?x7;myPYIP{ZQElt5G`5w*xPGzOU*tSsoLLf@C zE99*z37n=wdK3*o821W{f6cAo>b^wP1M|lZfC%SZu$y?ju+Ia!l*B|a#Yp;9Y@H5Y z_@XL>by$7)@Zs#NRmn!WSPT~{B)4wdK>Ba+>Xo*REx!E~#3=60b?cVz<}&^`=lQ~- zqDXo@pxb{J5hGF!$Z!$rOZ$kXL+t(L{3f`VRYKmuaT6TCWWBxRwG0V23Db1D^7ytK z$0LXZt3sdA!NvaM-TVUsc-ZZ{44K1QLa1IsL`2WGZ$CF`_YHhJvYDHjn!d4U|9Xq5 z*y#0P`uXO+IF<1-7Y8Hj896{i(9Wb;GV4V%RxbF(JOHP#mzUCOwy`LlI!f|2pat45 z?Y!GML)0GN?|OGa62yBlD(`=!)*!9$@6=kyN)Vx}5BZf$O?CRF;DWW?u$E!X72s24 ze8m^Kh?-3(#w^J=dhHw)E9lu;gr;VQ3gxLgpu_e1{_?=&Q2h4w+S;Q_(O*AyJP5Tz zx}BXJ0hlS`n5x_n6@0hXia$GZ3CcWjjUCP|bT0{c5IkdTG;pmKSLUFtfn##5CKg+Q zwhhppYHwEM&4q0(KpP-WYK{OAyjGBla}+1lvysq?Zt@Qv2xmpdim6L%szQWb5q%3M z|8`lJn_EXn)hjU4{X@Tf?XShi3qteW|J0Nb!!&~EU1e!i(c*ZU^vQ|<0bwgv` zX^&ZC+JvuwWkW_{W>M4p`cwwUl{|A!KjX@5V2O@Q*pCmCqj7xUXf0EO8vdC{2Y zmyXVxN1YFiil?JCD|llq{Y<)2YWF7D-vMn$0n|1?eayiK8+cDDD!pV9n%`ng^nIUV zR?-{Zqhsctr2}#B-O`8;9FbWs{)cEx0--^}iD^x*yqU%BHlw<{v+q!`l*^ML`@cL2 zhmroqxWOQ|)9*bEzMc{rT8<^MZ-%y-xxyjKFE5-81v5=#nPx;vsXLDEJSGJhHD!D5N~d81GJE;mO3Q59d_!W*uk zZhMYfUg@Ql%;HEj52)**!_I(DZ~KMJ#V&@6nVyaZy55A?*a;B!#&5hK7V9fU{=dz* z)}@~YmG0yJ)r=Df4(Pq0s}nm2tK-q5>N8Z!fA5Ty5DXlvS8^$6KIXT>CWi`Y+%`IxCeU&qS`7ew=eAoaGNV3eR z_t;+*{Qksl2dpRb1=UkOM9g!(R$4=)yJI0!QQ)hStdN$gWGl!uFQ+pJ>ItjLO%Ow7 zLV=9tq9l>=l(bJzkmUlH#4EYy@1y*F3-Nas1UqU)z**-Zp}hm-Ql=m=wTCECYMyGy zS~yUB^{LdHBc^?3bd+Ti1;Z6cXfl8P3|y};2~KE6|wGwnsHS>_QNPCq|YcEihP zPilCNX>|TO;?KudpjZ;J-BTX{cJ&^s!+> z#8&w4UR|hCOB+g_GuCu4T+l~o2-TbXxO86d0jMF`xibBP=@C#76%X0}WD6p)-`#vL zm3KD|Mw6L$5d{))5JV{|`ar>KB`sMq%SH9??dGW%o#I@<)?#fe z;y3b@_|(@limzrkR10?0qc0%reovS|;S@>;7H}{Ts*>V?hjdzSMMNI=8L2YTU1?HYW(Zw(GB6jKfIJx*+s=#8g}7qPL(6htFyDFFSE4ti~A8B zi+`!4@-kX0{dbdIGds99)SFE~a%Oe*ELWwpoBp6(6dyotUL(alY`&0umhCvP6G=pc z_-zllh4rQgLW^z*C(b@7oc5tx?Q8oe9UoCEyN-K4VQi`kc#A3ZOSzjoS(%1Vp7H%s4&y7{fB zKzqWkS*5gXKQ}Eg<2x^`VwT%gg?W~ORa-8-{o5=`yl*poZ;*CcuUb^|X&$gQptj-9 zcm2T?W5A;6k+&ce#<=Z*-^Pyenc53@zJ9x?bi*J2ws5y}DX3)lv}LY7!Jl~sS{T{h z^xTYD+#r&HP&prm>af_TiGG4QhtM!$$jej(cwO9&6}+)*nkErYa{RnyWz@`NY%oDPoA!<-McjvoF%HV!uGL!xCQDHa*BSEV%}pe z9TJNvnCI;rTGts35xC)M&B5%Kua~kiQ?gYUGpc88DydvQxLT!`#*L>yi6Ap8IqtV@ z&n275C1^Kv2o%Y^Ou1YacFvr@6$f3sIAGJ4EBHL%`^q(R_bf#SO;2C+MeX?dQxWNz zTlJY5D-a-G2PN{O)$d9c9Y{N^0e6H(GT2g$N$7Az6iXFY!oSS_67t;Lk_P>($42Q_ zq@BlO1W{{pd2iS-2qm6fk-TV~M)6&o9wfQz+eDM@Zl{MR9pGd_pe7hc|4L&|$?EL>%4>izkRoMo4;AlQMWvD=wO5aVKnrpeA)A3{R*f2Xz zd{UEflUxYl^Ni7*znfxaii;9ZjFJG9;(G`U{Ob%r>lPZs_1k_vUm=c9XB<5B#A9Sx zDZ_pIQ?|PEI?=q>Sf?9e$P6(ukb9Q7x6aA!M|%RF^C96;)h>~S>A0BL;rT&hO#5jE zeP+ZQWn%}%A50*zXB3oL-htwizmuWQ|NM2t6(})_D^Pd{FkjsXexj7JadZgM2hW}m zOoIq-Tq{uN_6oefuO=rQ&(`KE<0e~5I`JkH6Z;5$d=q7QSc3ta;sOFH$EN_Ihz}rf zeGhc6OFR3Fv#|%EN|XG|I%Acm^OA^9Eb(Kzu!Gzkm!p6fVC_rr0#UlJriJ}WSJxS$ z5A2*T3iX_Lupw^%jnkm>O>qSa6|~-SgNup_yyxShp*c+s{KnTVKj0D2(cI)Ti!TIq zt2ge)9m9rl_ZdXHhjAO;Vipl$1Kw3r6AScSi4r$WFN^{L1G+6{))SUPH?I64K+_%o`zZ{ys<#E7U?rOAbPftNRLW6K~Xo22l6_h3Qgn8mfiL4)v>prgItY8sF zR}pDsgc8*H)AfXH`NQ@lrQz~-H`UQ(9ynx6i|}0;O52tw5ROl3d0gZQ|5G#p-oC^> zxG}V{ex0Y*{W0{N0G1y79xO4j72jy0I&?dj?yAW(xRnh$(9ilxBY2OGj?t&xIub&G zO27@jHXDN%97g zNViWP+Bo9fORLK{I9Gs&fF?oR0~k9I{g2@isS$bvxDUgF%R(13Kl6jG>>!hHe)IB4 zBY~bEUM1C*gX}E_Xb)JfLPeID%wggw?r|pzNv+fvyLT*zz)`vAQwqg+&nXU4AE%BW z1j!(q+8CGmcd~DEzd7sfsnaE)?*^drsQ2;njBrpP^=8fc!&W{?jr9X9P^i$p=Riik zWKIT$+FcB6pCx#RzC?&h2Uq~>?7-D~$reQ51CQZ?iuA%R1SY3k$n5{_zsn4fTE-ka z%`FjX=cAMqCZ5H9LAtGYU_U|Ei(>5xN;+pds3wSQQ{Rhxy)wh9+jbuM4k{;Rt*mrs z^W4P#uM}%TNZ0VKCxPNI7c{n=*VOZ{5NEHL|nHL?xv2tfi6Uk z8(iIr4k%7r2o8v0MmfW1_lOCY_Nud)e~$11|KvYf_2^D_wWKoe4B_VDA#r2dP4clA z53o5q6$GaTY+8ly6Z1d$f#(4ADNbv6QKDR2q@hU|9s5zjeK#zmdHA^Tk?PA@8-g2z zKv~GnO7}y{tFo`i;X!9~P-cykrg(O{eZImUMxRgp^xDJ7Tfs`@#_7Nyx-aOOidXio z5a#4_G(qbwrvEW3NEd=iK=FE|RiSxOg4!&9?i0iTTWYII0gAUjlNJ<%Jj!X3Cjc6v zLN_9_b)c7X&^X+J`jQRnKm5{YYUK_E?cy5@1X+9l%?H%5)!Yurn19}mU3O>ZD8OZO z3=C%G=@WK9&Jy`DAGzjQe$^~jB7nz*_m||BKlrSMU9hFg1>nvd9>9FHPC&p&Ks)09 zah4Ypprp%XQTx114{-8A*TMP{vynfKAW9Z~DnUZFnY<)peI8^mYQqPp-6lPO(#a(2 zuQvd(uR~EM3-0E@X(jT_xi;xg|Doh#CD;r*SK08#P+DA}>-NT!7)% z?WI#U-JHEY{~HY3?evyj?!S;J+7DH|M7SMFMD1{;phswEnx<^pkfDeikW-^bs3cq8 zTy{m>0#G}QDP8>rwP_+rEPO=f8mg&)Pw7ZZOqbLC15@ED)8AxEf^<$~QoldS%}>R{ zefWI8%V#L~`qOX9qSBGoONU}-ha(bUwX=UV+W7n;Y^e5gb=FR=@9a0p4? zeOk5~YhyDfIoNt=ohG;Pza3uGwyKs>F{HbCJYnff(n0M9#RYsE#NvW)H9Kn6g^-ZI zt>c%ECl16xch%C@iHR&1SzY?|JoQfF#)-qmU^qrwP}=ds|FVHlB9=)OqzmR)dHaP_ ziSVQFe?v7LLXBe2zm&Y+ngi$Tz34|>QU5sqxWLMtf7)PTUy|(%Pi+XkpPYJE)iMgX zY(Uj~O~gYzl|kmwjQn?-2fX8Wh2F?L#6NFE=IGx^7U7&?TyNAft>6k+7)y&9i&XuJ zL>}P$oW%!6j#Y3Lh7vhL8Q=`9r8Qw|{-ppG*Z;T}Ro00lkAhYo#LO5_mCH)(f1g%~ zi!~xTfCQMKdgPA>I6`FV;4tWsnB0Z>sT7d2$V6u+cM-vaZzBjyb5yDlp<|eM$dKeD zq@h%o4$L^}M9o@{>4?Ia8e?MCC?$7-ZSB&*@iu{M8Hxt`f#)|MiG#n5eIkRr9;p5K z$;JV{J~j+JK}>5GvZms9dPa^ZS!3^<2$wsf*pjNZ^W=8d?cfNBO1(*T&h3d6|ZBl-6 z=X;)&n1nxW=3o}NAl4yPX@Iw+&-~X27EIC`nB$G|>hu1B7Hc-?s*Rd6kq1je;8$I9 zxvOZBPHZwzBRBF}4ee0o5Dm>DDr~F%BeOhqn^^f1aI>e?zU*~w$F=_u5qi$fRll;P zLZ#e(yZa2EF9&n?zqFUvhy^>Jn8*crudd(0u@fN}F5vI=JBIN#v^Z5Usd{lzoHeozTmJmQXMQMOQ4k5pP`EmBg+5gMz`V}e8t(h zRLxSx)HO7`RAi-~5V6rkOLY<+MK}U%$;;aEz$XLj4A5Tw<3|w?S`G}a+2{M>eY)QW zjZ#}KHCTCnm-w;Ukcal@OGAgA)!8>L+iA&Pz>EXAO;q^mnXl-BVNp4x%MwLK4 zze#ucQ*!z4s;w+pzSnhErSFj{K|MBYGD*24eT)W0x1!NWdgvx}ydb!EJt&M)BCeuC9Ik^1P`1?;G$w!i&`b$g=J z>+)-D^0hJ4RDWg&On|qFY^~>(f?~7(fm1Q~i_k`SZ|^xwP+dz)UeD*}*yqB+!in{< zu-d`5Uj6Zdw0S$9r7OZb_pzN{rNy7`#wC&KmFl}G-}{uK$Fu(fI}w%7SF;-#=@7f= zz^Avr^dw+lPbP8GP*ou%|NiUOk6l0rv!7U9b~r1ubz^Tg>>MR!3_`S+*F?lB_7`su zUkl%dN#pFa0`+^R8u?oMS`|)Y6klLyq)Ib=dzA$x7PCpiF->;#>1m2L{CA!?;L|s0 zVlijMYLL(GC(LNsB&=l@^$pn6=0Bvr)b1wp3CxS7{h<60|;2;yo*o z=;w<}U&97CnhZ-zOEulgptHYBTUAv({eL` z*P+rvbp4Kmxw*4vz#_P(8)?NihrF1SS#zrqHDIot3{Dlr)=nTt_eb|f9LJsMnWC6i zda+2`y-rh0QY_EBcp-jdbkWZ#S3&e=W7&>p24!l_EAycZV1)PfBxDj54C;;_IrI>* z#+Tb?(C@es@H2T!Y(i1PCsU*cTVQSHt3a^fZ4!yt6WeT9YuMkb6)wK7Q{KDONS0gW zQntlAn93!?zBwG^zvs_4c=FI83S1*hPU4zmLHTPwi8vHzXH08&UhzMq z$G(vejXE4np~Xi%CM14bDU(4s#L3EdZ~dD~N$NHoNK`OlE#=Ec;URdBHGjg(cp4vX z3wb)kVa0J+RPK$v_1_brr=`i5F1!*W4wC0GdhM1Ofr@%aLLp9s^pjmpu2?t>HaR{j zrDpwzL&EVx44Ez49R6B~zf&(9+el~cPrep$WZ|vt&UpP0-@$&WdNxt-B*4Nq6{XI= z&tLF>nPzSj{z6l3JH^{BJ@@GGktMF9Fg|Zy>5VWkE&qp!G&_+`1fu---A=&@ zz!Dh2n;2w`Z1-Lf{}0xw$kUba5OKHe+5PZOsQV?9n;=of?att!xpDG*?+@56QwI1R znsiJuR4ecNuoPkJh56|~88$Dox@bMl3n#1$utEE;T|ml-AQ>GA|21EJqTGA^-CO^L z&+o{#UpgXtvTgTqAyAk3;^?6<1J;ObcoVq|i8E3Ae_nAf&eEHpi83Oo0Dh3Zqo_c+XmXg7e;eE;Po!yC{ zpO3UU5=_}{wRe)taW=M@z?jj%F!95Q3I{CPh5Vr(WJG#m*m^C5M(JL5VMVax~)mPq1>$|ReQxq(J2?0V(c^|>!8Z93bK zoJ!S}KRD;juf1%f5~`jVmEkkQ-F0gddne+LPmFzjVr*=@GSjiKcUCf@^s!r|sOIrr zc%Kpjs>lUprxNHUR%=hTvbp^gR$xG1X>CA27S-s!FkoJw-J7=&x=l@)Qq#r>^vTJ> zs3Wj)aUm}F6Qy5YPWIW!@>~*0Fr|ARP!=&>vroWfO*V#6?pR$Y`1pKKgC1 zVd8J=tVJ5howd(}$2_}Q~(4<9}Z6;wOK z8i$2#kNNLsK?rC!D6}B671PkrfNJ{;a>Ba0x}Lq_YgH`OUl}-T*oli(-_vKPxLDreb2s8{TNK*Q@!xP&V2gFJ9)cG!n$v($Yc|@L=-vX-f{8D`H~0 zIpJ^LvS}u31Xa|D#ov-uocj50!?xj72`CKF19#qWN`1;3eYiqG8|{`vD~ht$44J5lx-FXmR*zacMf)y z9=T%&i*kM^)WFnY(bsu7bS`wAA+tW1P}6Tw5$b^C%|vbd3ThjOvlnn`ixrF)_u%2= zbg@Va*qt+i;)|3ACAzd++TV1O4IDRabh!#LI&BWx zTc(+<{kem{MD#M@8S>ilan7!~lKhe9PZ2FPE5#ZdSvUZXMqxwKN4P)aarsO@mK-#{ zjbFZ`cd%#SQsO(4WlkJB2Im*F%8gTE*c`OSS0|PJ>-PKP2&@ zuTu<^`ef_rB$?LaBfNIAsN!zeyn zXPKY?)$%GL~$kC(BSFgG+jkM)aW~c7gQN|Z`{2Op z66%|37H-`PF=RZLU~7viK^K zvZ1SE1AhZXr(eKW2~NiSArdgOc06B{@Y=_J()&CGLqY@_X71 zWADH9&hQoqD?19fOKGR6+Ftj;k2CzWkcLhPg(2U{0=SN#td53N{>!+5w?!!H;a{c%m zhC~H$i|B$6tb5Cp_VH&y%ktvNFaWRDPF1**f8pmc1qMwTokl}0iS8NosuD)?B#lzc->`*xapSf(Qnj}0kt1P9qu|mt27_7nv(beoSo~kXwrq*i!G8^hUIhK)=(N@!7M6*M@oG9WH zJVXj^e(*KpfR9D_1Pn%XY#s89;w{sh3ZmSB<3O+Pu0yD566rk*&#gcxHYeox-COa= zdR2GmQxSul-e)@j;;;05H1xpl?ysGRdGN_&{AT2ansGeOiTZ<^h9iL^UF|t0A|?9! zD&UQlg42IrQwO+^{2qpn!|HPU?Y{wTS8TrY_~!?!kJ^oCaxNsxEONi?TfQm}9!XjGN()k!b#bd0xLsZ|0@JW$U7^oUFqx z>51*~dF(neXKF4bTBJ-O52H!_n;!L)$ub2Z?4D3)xGxCX&tE@j6vkVYQ;#_U7}ilM ze$Va) zxmTs#GugLv;#T@CU$rxm5447U7wIhd`q&90rtlPQYfOelMHX!kkbg}9!GB&wMJzWX zoQ-h(?;h7`2a!s(hbbaO%1xa1!U8ldi2fXNVChv#?0Z|XUfY*fTi`ytS=?H$yzg(b z@o^`=QOTmc_7}G)zM8@fHxEZ=P3oQMF+``xBeL>5XFeYA~-%@!-d(UpKD4}EHV|ggt@YqO@74!1&NP4fb z5vRNR6mT`5^=|j5=#cG)h_4BYo;|wD;l^t%nz7&2uIJ`q2Ri44%g=)oVM<{A@de9$z|j&4VSc*r8xQ zax<~ks8s~ql%KOyZOi1AkMdx@jE^#Ek1gj#CN_Z zyR(yEr`X$R%+I?sWlKpDULYdB4@l&0?Rx0_X5gSWk0yOEjchhke5Eyz^_@mssZ_Dy zFq)t`AM!GH*@EZWH$Bo-Et_5$=~roU zTp>YgZHjyB8LM=fus_>Sl)L!SANB%%?0OStC_G@x-OOV)xeA2AXm8U65BlttBC5oJ z0@M6*1UcDuXNHW4d&P?0(v0edvTNkm-_0f zaDc_=5Ga9N)5Se8=KOn&UPF-@m*zi!YjM?qacUGxj z3wnAxSC^dc(V8!K?L{UfeyL^pzF{PqTuC_~=$)kCu=qjl6l@STxF0)??qd`eZtsE6X)>p^8BUylQTP=pwNh7`jItrNzOnfE}+x*febgoZKn5;G26cL%7@6SPPYtB93D}0WK^ui>H zsKW(IPYZ|^*lHd$6Xf@k=agZnzY zii$yszpJZo?s9S>csUhC4^d0k(bv~0!|GC!^1*U9E+MKto()pC&FxF2R6G!@3-muy ztb*3Pv@ag*a&LoCN67ZpupE=Jm$7w+f=irZKzx(GgmH6s9o}b;|h=fel`w~<C4QE%qMo`;YEzt6Est zmfPG|lD`cjs6cR6SU!_&w?b2_ReG$gU9go5>Ha`AImtvpN_zU_?2QMXh;sk7mDrv> zE$y5qM~^y?l2>c&vrsRbGVobM8X03FED?9?Ukk~+xZt?CJc^4IuvCKTs6uV~mbUoB zsP)IHnj>84>i3lsuMNNAm5a{R-)u_D3JRI_JwZxJb5F)2Joxo%Q9cu3Xg9{Z?F=r$i8Vg4 zzSN;~-&6gxP1vQ^+))`MfnPBbkO4cfpA=-p%$zf8hc1BN9HsqHXbg!cme>BSQtinXRHSDjxc>-r|U zU%0MPIi}}qN~+H=W(q7JxY@M7m&`FP4DMgPT36^eZiZ`E7)=~5ni)>;V0@6v@^pksi$!c6=sRyc^z?J zU(e055^+kLsO4&SEhlt~Sznah?W_I(2uHr`_SB1e#)o6;FW(p7pgD?3$!WX}>Fc9`z z=yrEFrKN&9?Dqp)?s<6m_RR(pA27%TK|tgK2HjnN4v~KySh(v*?QbrZ*X&L;U_{$7 zm!uLrIwU-fA0N5tM!0~^fcN}#bs5fn=lgC_ZdV`VYX1EB8=2&Q{S!xyh(YC}j$sYf z%rHn0-`(0uZLP1Urf_DKbqwj0evC8(VaG|a7RYlcz<`HK zpPG&?ZKI?fEmdmWt)BgNks()cRW_++3`pI2hRO{z9E)X zEt=t7!3!rV3iL&4YPNf7q~zq0JfiA&y>kR>rG>u8Dtl5CNxc_cZ$x3wZN8JlBUl=% z#A5Tfs%r!h=(Yu%{fj4BaBG4m#CZ+5N;WGb(3yG7+&RZ6&VTw2JLr{Kjm7wv-U@U2 znVGe-6+9u>k+_enc9-&Y)%Ww0R#Ys&iG82u*l&%YD%RRu6NH(4@q=;}t7DH~w;W+v$>4;Aefku4t(Z6JDopSY zTJC-Rw_Zk+xY5z!%iokg5se5njR+W}S=NVP;_5G#;6LG;u#CmA2*I1lDFrM@ZL22~ zf>*{>uAhwvXNu|Z$e?)Sp9Ic&tyNSV%tqbgebJsv5?7Vu2PXQrrrnZ~D%7=TF;`|; zi6`8RC6BUdCP5s0O{|#P3;8@s1$iHgY2$+sd?`77mtBI^s{rsx+ZzFw!Emm#YaMKe zvVeeEij$z-rjVW&|_X!QpbHx)kb zeyD=A?GSh7RZ=S0ZX=bn`~CTCK|yjQ8ObvHUC_WwRY1b;jiR3)8G+jRHfeeJN~K5G zZ2!t)wNGCrCnWX2k8yw=5_A7(r)ZLdqvbN zO1@Y_KfxZMRFYp2&IyU@CIFp)J)C%=muRQHLU81QwU?H^syI+^ObZ=`}wMd zmF`h*>~D&1hl06hql)GYume+zDooGMX9^x*!+lxF_yMo>+ht?5Oc;oe_1mRQeZM;~ z+vz04eV@MKaas8J4OLiPG9~6a<4?1jS)`q&TMii*ctdN%fKiv1yI5n^tX?muFR4mUc=x*p zK&47tp_b9Mbn44eCw5*vA;rP$uI3m4R*@nuAzX1NoaaMYn{5o4o^2~LQtElbVV4-S zjEq#&)Uq$lW@&9oqe}kV$iGJfn*5L#O^5TfyOiSX+dDcUf*opNxVBiRU?V!g6#+Om zV3AyD{i`7OgOXiva5<;=HhIj|AdQT?u#X?L@yYCJ2^vZP&pp%!g-?|zmXIub`TZLW zdlb&E@*>aMxz9{z6<5qn1=oy8tY@-uB`LWQS_|BlEvQ&ciSSQ4ndRJUhy`toi3EPM zsp*Ef@Ow%2>nn0fm#zmy1_y!Z_YH^D{+UGH>#mwgjcmYORHyZ zw_mOePA9+geEbE;UjZh^QfsR9Mu@_C$unUftd9T)2k!@BZu}l()T61{LPA28n(Zs? zTf|V&QhU`KOI>f?sU`432=4#ye1zSwcEu`A(l1GcjhY5cddVqW?wMPhm>V$vr(N9? zqj;Y)_uR9Z&B>{BDzD}`a71pb)N6e;+Ry(o9=u4q&S?G7F~;d<1Y)>WO7CnGEUl8< z#a#Wijn1DR%&&l(j6N_$+z8{tJBT*qeK<3a`!d9{BDD{N8R1#~n5;8wTw{N*4huT= z)z9=dC$Y_83WQ2^^Z}+u4Bn^jM3j`Z71$h@J)XOOkzkX4+4`-BL3ojw6>=kx_!IH^ zQU>Qjqtq|#cf-e7hJ|*Ta0aIiydVmKM8EBZW5-;fwL~gZDw|J8?G<3amd3CV&69&~ z;Q0nEPoS4qe+qII$5m_!&!(WSi&h zgV~lOAV+-#`c29ei|;(GyLZ#J)Q?`#1!2r%$QAl5pN+|H*DdB|mhy|4Iq&qulD2ah z<|SnVkH-xW%^sH#(3FEEj--zl@XWV+jG6bJG(AcWqka=Ev$nMVFiGV9wmjzDY2~~% zB6mpf_$(8!JwsFOe{4^aioesZ#MB#36Kd;(EKDgo_p|$o@nX|}Df0|>p&m>-PE?N7 zXYIL|kGqX#?;OOL_Qa{An5)!-?XmgDdjl4MEq{hs$AMN7K-lPJ`YMTCzTW$ zPnjk6j8m)J7r3qur#Ce|J9{)DxFxzajisL|i(}$y;}vu6b*G8EI9@Kdd7ar_y1 z`$^QrM(X_JD7{xonM>L#|FKYkc85Jp16l>E}jhFFB45wr&@q8UU9cn>Hm-t)v`>4b8ARHDI88lbn9Ta>dJp-6zjpkh^n zz3H&Wy#`L~mtT`qcV$vu#l-NNHSt1&kEu^u8Bq~8eVgB81jW7cP3egd`ISM0vqI*1 zHR~aAjCPZ%hNZT#r8=qM5O+CRfFZS}zk<|e$V?RN;nb3hg|pds$p=|}TdlHs&~`g0 zb~>}}*(O!$IZ7?n!k)XmzFL%w?YegQqk1dVL7qtSnIxs2f2|yaBc83az^UwC7EaxM z%6lR>AUc>xb_}dhT#s*tYfWuOoU%fzfpZF{+FkF6t*vJvAsUjg{CFKan@;(!rUNg8 zqba6>s4{BO%{=L3b=&RI@xs|GYdwGp2ScSFWzAc{amqDoeK=1ZXJ?dWZO`_?SAru` z9_SMm`e({>nDK0Ludhzn&!24aLYIa%(b&6dY9$Xo_MNfGo!9qXeHKAeytX!s0MMC> z$jRk77jqI~qL=y*oLXC@M+E;ddQ4}&-B$PC-?L4%Fdn}ea}{-+^r|=OIp($Vex2wo z6}ZzAPM}BxO!}=a+P&E0g>`LuPIeR-DUF=Y=JWZdmwe`LZ_2v|ZUX5yfO($M z6lGs96qO(;8v&qI}aRz~FJYOnC9LR2u^ z^WA3FIlEZgzgGTK=i>~u#-?AnS#eRmDzcYyf=(jm>=*2L$sWPub|)E7@*E0Yo0)xv z^h-?MkmD@`vgXXqRjT)Ej07<0&tG6W?NqxQ@u-@xASrG#K+7Z!F6lv}%mDxvHtKBn z?PrbQsy4o4%&gLT1x8S}A5;3oec_u7b!iDer~8opc=1GrfkQP&BsuYrzh`9PWu*2P z1IKLu2O{c60!e=Ips~{D!TbWV^X%zU8DYEFxz@YF1kZ7kSl7V?%z>eOL+92^#Qefm z$lCYsKLKXl-5QRL6CX8ZwikJ@u(k+zE;*^<*zpl+DiY@K)nC%cDa;2ud@~>`q+X1>UoSeUDP=H-NmPKEXr$wo7&4#9dT*s{Z9Q*j7zU#h z;rjlP)Y&XMA`T(8DyrPsX?^udujiKX#*oX0@8<0%fH~i3!_;hA6n=G%V+d%$lv^b_ zU*<8tF7FI)d`-tN6rLa<$xM_h&m*JEmDml=D>Rc_!Uf0z7_A-C#^|HSMT?GOy)X(+NpB;d75ru|LOED|8BFpnKDl}{14Tzhf zm9nu~U>;5@du4vOfp@U5cz!is$VFM+=eCgErWM3d;RzSo4TRh?G9siemCUF)r9-4C z<&>W2Um?no8L~dy^v85-Vc{yDVO=F;ncDeIr75}?`8qvSH0Lw?gAX-c{*Za;tIwxu zlJF%<_KK*N`u-jz8|$Tr{$SE9*-hI!(^=|KymGq}6H@ZV6JI{`c=eN&NOe6D#$7@q~+u-U4qy{9f&n5 z<()8p9@t&%3r)Vt;Qm`{{2bU ziMWLCuCl*sfL`94WTKB?cfQNLORI8jQ?<)#9OA`8{>~I`z(=!QyXC#NgC+FA!QB}Ug7Blz7BG{KRG1ov`K7&0#IsyWetLVM;QtgudckjHI@14%? zqYLMiyS*)az56|JUM}Cs|C^1k+d6~p(tOvi+8|P#i_plWU~lxvtq^;y*ctlbg_eoQ zqW32L1G(jgw1mQ#N`AFd_|?qngxSh#ZeNC>Lt?U655C1H{P9lc%S9q5e&lx;B68BF zQ(V+n%l?}JAY0cTSy-8YLg#jPgN{*8Z&cOqiIe6PrFWc{(*e+0>CrT6cD#W$&gvQL z(+=P~CdTFDq6P*{a2BjezJ#2zG&ZWnFy&Xgu?~b~z{9#oED&%md1YHDAybpC6Fxj!$vDU!FXd#SvXX;Mx^PrAX-|G>UkB!(`ek+rC3}o@Vv_fYdxEoWHPMYQpT=+{P=xmN1kJr{RyX&Wl^+IRfV!tkNmNheh9M?-t$w78CbOTV~)=y}+ zmuga&O22GSLMZ}K{lmm}2)o`hGeGRTwM?K(muyXWSAdjMeIt`~B`DkNYh7B|G?7GH zQ#ei<0$xmS7_%kAWw4dW@J*=GY696wq`;i!M-y}2w*P_s#F#bG#lN6nWAGj6TF{O8 z94~6xeXsfN7%=K=T;SBcV!W?WpYM{4%u{g6V@x>(F}DVKfMSONf)B{}oMhnmOHT@o z4ZL9@CnVxVVw=^#m*o`%(qdkMSLJCx=k@|8c1oX{`o=S zv_gJ)zZDy>dH0#7@?vGl`hm3HnPPx%sc0Wc6hRXtC`$0=E>lg7M4U<;#TAoTW4i`X zIRu;$%g&ss4{ehY54Fu9WlckSlJdutDKv!4RKkA#9K`jVi>n%FMoP3g%=h|dwvAt- z;J;d@3R|v_2*Al&$LAUs8jBUZA2*aT!WCC|moTYG;5-67S=76Wuf{=4=pm}!rA@`g z<+&*e5)Up7w$mhS$VUBYEN_~cn?GDPhFR_%sl!!0m^Tfve;I!kXGHn?r~Q_~)3|7`4c+G>q!1bBz|1QU6Hzb@ zq@3rp)|pO9Al2yb&SYG%=x@b(S6g;e;x-n?Vf!9Nxu>TT+(W5o|D;D>yy!i*zR>dS z>`fpz@i#NSH$spJbezQ`nH>WGj*jBcH(doGc!TNl3-${w3FX)@0LI0L>!E_GA$6rR zmZcGlkcubT8u$4N`Z_V!1NEZy4Eis-d_yEl;B)^8M&a(a$A_nWlT*78+14UF-P}0!w`3W)2OJ|}gU0Gby zjr?}nO@Z4=T9!#D@p(4+#UACXYh7u@Tnkx+Y9Nqf(Wp63M;8SFhHy)o@wqegWr|l| z8~zdyQ{O+?rJbBFqE6d`F%S=MOXp?^4Xw=S{fuQyuSvbu#6o0SVOpeQ$Du$KDant+ z#Pv)_pG*GH`uWKTczeWjN8)HNLuL}aO<#Lq%rjdcau3Y_$<~YRAh8c&oJpP$$GwF2 z_qLc6G=(ij&mhlJrID9<96+yg5aZ)MVZdLz?fm}rd6bz1>g=i>d_M9kWMQyOD-B*5 zaQ)h~Yd3B_x+|!$zo1L7T zqH7=%du2IKcQeK^K_z~)*t$Dqpxk99*Qi#~qV;V`%C$HlHC5HAIaQ;9;IObhF?GJ^ z<*)E^NOvgyhF(!Z^V)`n2_LoT_faBhe1)Bzov8RPU+(gK@pp`5Vq9Tm1b`2utJ99> z$ot#7j~^d73PkaohmrlFFIsmxsI`CUMr*6WDYEU?%N7pi*dX#BaCVOzb$A|JF0UAZ z;$69deOV_IA4C?-J1;)*4MSK0H4GQ4a)LTm^&~mD24{$%dM!Ql(%PI!218!LiIOrirAY9b};Ha)?5pxTAxr`GF1QwtEu3KG|U0XX3N2aAk3+n8I z&{5WUEm&`ZIDWlgKGUF{9%^|eU=@jR42A4h%;Ofl!cI~3xZunu#WD7i_Bi={T~^fh z?CKvvRDIbx-LTe$h5VKlTYa^AptF)A)y#7H{Fp@X*2a7wSSvT%e_AV+t6smpo^9Km zP-WnN*#8`b#_Cn>g}|4LPq?E7dyaKuSBVMQOa7JL1>LZ~NkanX4#FWaR>0yK2ObcS z97%x(wxCk6#s@t>4f)x15}z%PW%hjwi?sW!k*ha!gL^iNyv7r6iUVpz@GU@x^fj&w z$>+~$TKWtIs()-hND%FvR@+4?$1G>B8rdY{@Z9y1myWqy8l|zXyKe*co^%Bsd87BZN&tXd}yPwRQrr}RoHIeTszBC7A}P` z1-scN+S6ZTbnX#VV_KxZbq>y=o?u8c#hQ%Wxnr-=f(rbnt?YF2-==PkI!+8+nftlVUo)x1t=>@LsH~xE=d2t@ z&bf4@8#(#BQ_-$|xrKrDe9k~IEWptrf@}K#tUzb@SS6oMlo}_6;E_SN3qHHeu%gZ`02kHjL`?9RyJ^5^4M)y!Y#rq1J1XO|ye6d@Djp}L9Sf8G~k&}op z*IMc?JnK{zX<(!&*ynhfk)ou$P}mPPCZAZ&Yr+}lMgx7{=!NZVHPOUIIG^8KSA!(O zgWo?JSM|~nPAgX!!#(G6*ukKH02{}ySdJZSa61i{<1iWuNF2@0qn?ErH=umX`YO20WfB+KP&2Gqy49#><8YQ?5UM3VF0vxDb^ zA=`AaW-HEbyE`=BMJE>)zpB@W{`%Dt++&2_8ag9|`@_}dsVh(;GZ zQd(O-N&4SGp=q4RgWW(eD#B3YyM~f}|s(_;56%>K&XLXO#~^3M&Z#X-6a)VD`b5hw%izr2ao3kkQ$J-@a>Fm;;9s z55GLsX7~emay7qv2={}h5pR^kBM}<|9}JU7Eu`JRUg$Z^Ujt0+-&hA!wnlK@{zxvm1qjdEqh zE&ORPl*`%6n}QP#>lXnDpYG?oEA5!fqzupkAS6Kn5<(tO=`k_GDKs(Y%xiMKJMzb4 zfH|86CbEO5BsaJZtVW{owC~C}2K%l`lJASZ5~bsCTs%}9H6{M^ye-U~PkqdPa!hfV zGMk;Msj+EqZ!cJ%F|9Hz7X6qr(mJJt>wn#RWuI;|)sBpO+}X+}^_7OKfgBZg zO5B;m$W|@oPqnoMnpy5ZCK|kb9kcxGsF`>Xm;%>Nb_PX~C=+ z1gf!cem;GwTeH5E(`&LL*Db-m_TzHOwNf!p_vhN;Yeyka;xvR}w@A6CKjN`MliLcC zzOZ+5PObS>zk5%Ukd+U_N7~%*wm@r~Ga3(-zhA{lX)QV*tFxogE^%ls2Rf}n2>{jsQS^Y|$!5FVa?tSi)>Jw;`f?ol28d2FkY{|9(eDb_%2VM`+$9l_3HnwyV_|& z^@{Suz_c_Xh<7kmd&ljlEXR~uCw^M_U{-Y^Yw-j-Ke{m8F`>dDbzD=;8YuaPrZ%74 zE*GemgwIFJs(}YFSm_Z_8M&cQ_rwFjjwtH14T%~w=5eQgV1*du5z!VV(qG+I%*FA%6r>zs*Usq>U}c+Fi>n;W7N|&d#5Yi5@7RN847|Crsn|JNcr3z)A}rjiP{> zj|g`cjY(LYX6*?dKhPw&JrPDy>Ooe}lp2PB*-1!+_1j&%+RB^1b?JygOXpr%^5^1Z2pV^ZqubO2J=P%wtb zCr|&qIq!N0){A-tcl6cCIJkW*bLdHCkB#khR}AIwoR#Rj2;;p1vVZ21j@iiXF z;ZG#?$Ok6M>XXtBW-MeahpJ&1GWr4Pa%8YNIX=SZgK@l*F4p!k`}J8uQl$Dt)<6pmAV6fEtB+;fasT12c4z;Po=>vts`q!r`T^z}P_7MGQ~ zt3z6y(*tnV_ZUS{t?cq(;7b)M&`Oxlr)5lEi5w#!>=sj~{FGyDKkoX-(dRTWn@F>) z2*8yMRUihI!Fwx4NcypxD(y@dl@7B_vJND5Ht+tZ^mtTYBM{11;WnIJ{#mUFJIQC< zl)pUaHeaIZv7jq2Z;Jr3KC|9$BL&@A=ig0Grt%-E#5EgQ3hODK7d{ zmC3jM?g~XxckC&#YKNVbT>{l=fRwcx7V0V(fOO#Img%Gy);NV1=nu|J`Uc!`PS!LLQXP?E>( zp3;?Uq1{flLM07rDo>^__2ne_qIkBi@k0(F4b|-*L;(wprDNv-$62>z`MnFZZOV1L z>k`*x%(XHf7SCDm>9~;=}PBKg&7XrL;4f0RIFmh|)31b5V0Kx&tAm1*`H zCcL$$sH*=N9zAthC8dI(+RjCRX{A<8(m6kq%5ubxP+Pb{_6j_ux#7K|0&Od=Q5DX) zU8+S4$b<#Ue6DP!uK9(gl)yhK?-K8q*mH`t@^!x8_Ejms?XjwmHp-`@!k1=N0O8VCZJakW)ut zu?d?5WhLq*Z!4i(2WKv;`d3*)6@?|Z!YC^r{EfP{7-pmSyi}6|R~Q=;38gVj6Sp>) zjVDTd*UFln@^$qzN)|fR$y7CD=}qu$Uz6yCTY1j=ockVe=m5sik3X5*Rbj`g~xAik-;=3|XOazHB>LJy{y-&gZ zEdGpm%*1GnQE`Je4{k?UjC1>%ifWuD8xcSO(N9xcm9=40iv6IJAh<@-#{s|DyT8&g zpC{uXmqm>gO>5Vh)TH6c)hcPeg7JmLYg|?WP)`HCS;lcFEgeexq`%Y>ehHb=Bl&gj z@H8XiiY(Ct=|$k1D55r}?1MYrP3~0m9f72{4q>+{Wfj$1P-2Y0J|6E@fmReb0-p0Z~ON!myNPn&1a-wa07RPv(Lqkz+dm zjQU_Ulk!VeyfDp^vv0&_6^wvNo&9 zj$Ofy|G>7@=0r|TN-zHaSDgGUw`~Pd)Na^Y_}j}?0YKQUIyXYgW3VfcUB`K7ao0a1=uPJC=M8 zH?nukp7LJ#@#aH%W0`Hr$3fmbXh;fpQ6<;~35MzEVC)Pt1*KE-B+aEu>6IC3v(xl( z8Ja^cF%Ym}PAq|cd_{FC^8+7$SD0|aH;6gFZ;~_5XuWI|tMrqOBi999cZoCVWktPi z=pCyp;dR&f0F(od3m+jV3ABc{K^r=vQ6HAa8jYKi`h*C>50ra}Y7^F57*X!J z0`D~R&88O@e~gOf{nh9{=-h+yct<-SR)dNQr+UA}T99VQNEY6=Ru#ztjpE$%Wn%@8 zE*XDS>}9Zt7r~`(jqVAX#OK#MHCA4M(k-u!r$`FuoDATVW3XiB$lWNy55`Q_7>vu? zrp)68;!zGa_6JIlttSNC;>bV=2nkgbMEq9n$xv7?@DC*3beX5tOWTCM;)YRmPsSE#@Nm~g#Lg67R9biReMgz8bPeHF1nPgD3FC=LZe z0Nq|EWGA4VN3e!Ik=5^tP$gE^ zXqLUqd{WB~j4$+tcuUp6A|tEdb?f#Xiho)OXWT7B5v!$yx2ACWgHV@-n8!146}#_u z;-mwuBx-NP=NK3o`t+p`bh6FB7AXv!c>Dob4!(}~NP6S?_+!pZ@d{AX)|amnYVy`d zw#bQU{)f6JjHJ8O3sQo!10{DR*$5fI1ZC+iC@pl6h%2$ZYSw3D4v#_)t0w_274yKz zO>-kgxa#5c2G|-ddJ6nYQl~OMK8$Bk(8l1=fkim+vL<~y({mGa1{IUd`I}1j7wvsY zjT*Zk;Z1c4QLzMZyZqZ-sjQrmE|w{fD(^SF&e{%3c@C<<>!*Qrqdjfu@fyW1EV3oc_n2G2*KyqAsuwR=E z5-v&_jQ)O(3mt#VL0CDX)WL!L4JzG(?(;bf2~UviLWL|O&{!Yvm-sd(`os{Q>wgOU z*bLDB$?qI`kX}0CQ(ANq3k9Nm$>M8xfZ;8 ze?7ZvQ1Qnf^lpP~vl{?0eQi(iFCEvlL>(hJlgoGRT!8gFJbTn&3 z93Y_4eWyBd3o{S^&IV3TB!5kY)7tV?3;mHXBR>-le#>&krg^<~YNoPMi-K^LvTpm^ zpL2#9y`%O%k5+>S`x2_h&8ZghbX;DUqc{L&vZ2>tnHtXS+=O@!_H!^}EN(Q%Rx`8$ z9BimBlAkU?8CQ)mXgQ`&A(IXUt`^W%kEo!m9TbSoi{Ipe-k!&tV^HYug`bV-+uknk z(2e0Sic6M%#w=Fqbye2<{JH_X>!QaWm~kNv9i+5_F^znKk-QyxY{4^ds)OSxg69eP zn6nS(ioVY^(0g1X+<^Jj+?MRz<5N=~<6eEcgv5B|NQRq;&8Iv!6rm}k4}T&H({GvS z{dcYPrj%Up&O({^SJF=UO42}gZ0w)$vicSyD($-X>()>;IZ{N}-EVgv4+;YTm(?kL zVy8-BV{&5a8cEMM^p=N0$LILSD?(CRAc2QO?`;U5AY~4@iN2H%yQ)#BCpFoWZ4I#x z3T^gYKq7Nxg$oxbv@w6%YL~RbmY$Mtwxq`CG*C)hLkK)qd!WF_q~)Gk6U4`Ir-)yj z6e$@!UL~Mg!_Qq8kC7cmEZ%j|>*AP8!jw!wlo$Ug-pDNmY0aEeBIx{tPqX_Iic%8y zS=Zl+#=mY%USr~q@RAH{YRYJ6$VrL|`#zcD__NrcJ?4V#zhzfCih;$HaeKM}SPpIx zwt~8o3z~|RxN7amVhvflbOE)Rt{*a@7kT`B11%%G%(I^d3uH|E)(=sTKKtmWv#P{^ z!ML}G6T_IfpQM%6PU-&h`k?Kl9qD`rflC{^amVP>y?N;qW0T*;4#W;|xMMjZ_n&tC z%0U8@WCx_mdYn;yAPE0p z@R`~`q#|pcX%C8|AvXlsZs@ezQTNQm0gL^y2FdcjDDM5KTrH}S?oK`$kMBJi5lw{-;I5Ayu0r$Adb z9T4E@3l9DP_|1R;-wnkVlSlds#vWxq9}QT|g<9+P81PXud*H3h2FE7Sf?M;m2w~%0 zh$ROSha8Ft?#u|Poj}$MoYoJe zrFD*6C(iX_-GyD0t)@NeeqYs+Syz6M^=6DW)#A!|vY;+p||kF-SQ zcmmqfDw{N5O*H{tZTaBeo|UGKjjq&3YOQIhjPt{L|3EDpR8@zCXXhz%?82M>A5n zo3z5)Yfum4G0tZ8F-S^AE^S{(Mun72+M>@>QISx%rZ>zYaW>-cH$H^)@H6Ons)$yJ z;%yDR1Lo{HYE$?+T^uHT#0W!b?<458CJX6AJiJSicfAD!_EFBs-@lI0(Gix}P23a-u#kYKV}hpApjAhh$XF3eir>_@e85LyeV*$@}N z>~d|FS=|Ep>f!}02XdwesneM9y@M=%&>?LjFZV;$SegBkBSi&Z;MCO26M)(a)l-BV z1=%?vGAKq_v5MmnR5)y{8T;jPtFdZQn?nI41lZAs4>X`&8NL1;su0SW{@qP;-%D9; zFSVg(V}&MCZLvY!6IH?;m+czrHQ91jfepGv3d#7g_#Qhs=PY4)ysPsyH*{pbgJYeQ z(Q@%Lv=H*Bv4(T!d@^)^HZYttqYoV5a$myk1bM_INI;2r3bPBA>fFA4I}+|3eYOavIteW7GZA;QLPnPFq5<=90AOJ+OZYar}~ zeGqw`R15xn?LNt4U`C-WJ&#gJf;EJEHT48;hzq*ET+Tn@3uFFP*`xN&57HS1%{389 z%Z4k~B!Am+z}AqEeawXNi+lGe_g$X--yHPnAcpAGNp{B+*05UNp98M@#)o+k7d+3Deg=cmVL)Sq#9_R=7IAo-~}b2y33v|G=i%pzfm64 zJ3z3Yi2i%!_nKdwjuf*V0K8;XjtKk*t>U%3XVc_9$NCU_8g2z^$W%Ym6jEaas+p3K z63C2d9UU<M9JZ9-6|@qS8T#0dc-5 zJv7_L&hxiVZ1R_T_f3~JmWPI8IZ!fPeGF19?)CA&PN2y93^J1b5%mJ1U?v3$HX;sh?(09tt-oE#^GojY!!@*-P+A#P->I$R*$aTpuRV3MTH?;!k zqnIR)iOa5$yb4q`;K~7MgYn#Ok&Qm^8*)6GddDNv!>m@ol(miW=5 z$;h>p;cav3x1!&tE6PmM3vvx3$mL{Iw6M&sVE!|tx6K6TNS^5nb`=uKhF1c=689RR z&p9CW0&b2%Pbdt&8Sjxx@(z zhw_K+DBw<~d!*Dz_`k|%M0(SYJ!?nknSiGz;DrbX@-E52Z*ATUTm1$yk$@Mm|F;lt zIcNJPn9bbTOecGJb+&$4{os2o=|I2jTeIx#t!rIr8qRZ;HjV8*vNeK%G8 zy4b9eoet6?Z{G1}^_Xa-r>F4?o`eBA(?W9^aGi(%D;jWfkzTrFGgPkSILBP!-R%{P z=1thc5E7;JKIxSp&^`OmFQX=pu6dKi0%~`7bogL{zEC23FMW$*@MbCz*QZcwlBe3< z+N!%iP+sA#TRT2n@?$T1z4S|*m*$!6U2g1+_RQ0_Bzp|Bf{i`G(4~ciVVRV$Vk3bM zgr5i_y7g9aX{om36kM;x3_sRZqJMTQCjzGH-H7etc59MO#}1tr_`O3T_MZBW(va6~ zF8LP{Egxg~Fq~Eo7U-dJ9gZU`G4CNx;2qee*=M++Yj^`_na0|0ggj9v)Wc$hRutH@ zcKDKKvez?O`p*CGs3;SS`2%tUCcO*zsbW|35aO!kWc&Qu_~o%%z>C9lSbD1@g-KW1m6!A}j{E~jmd;)|^Sy$ay4dv=$ z5t#xSGay0YtxZKs8rST>Tj=c;m6jV$D11cDz)=htP|gSep_1+59e5c~7D1Ba;BQ%AK4cv0U+LC=k;pfFA#(z_{R2*ArL1a zpWgFeu&M(V8sH5==yYs897E>##i=V~ma)`}1PF(svxP$*T?U#P&V9zAlg}2GtNi)Q z5egquNrdg^*ZRSC_Hv|vYzjJq{eguB2sUYb&K*qt$Zj3b*ATV4ytOqYg|<|Lph~B- zs^n%F^(O*ku#4iAKer8FqhS>BNk;8JZ>7$cTIc^%zcb>+X(G5=ur*FBZkP}VhK_s> z;MS{HYn`U3|C%1~(aF&g4hNaP@y;f0`~HCplflK~w z+zm)cPtP}7!JsWP($z%?2nb@hOx_6Uo?9qiQIL?3fS{;7Rzz&~&m%YL{@h1W&dz@d zt@;aSrcEd)Dbvw8;JBT|!&BWl5Ig`CBBP_JE?sh(cJs!+e0?u1G|px6a`%RO6t8?5 z@DCVEWKJ5-6zXZZah3Uhwwu@q&Uj znT;r$%IU7u;6PNUW96SQyhN6hnJD%4WWZ7rPH8>=DbjvNu40b&z1>V(#(luqe-o4ta{HIL`yiz132w?+aj3v%ZjS7+5O zE?ZeyStUn16OW8#B>d$5Su5&%GP+!M{2LUBlle=ED4N+5s_U={3vZ`qS8|%Ex)G{V zJnfEWs^q$PQ({*zI`wz1rs9xf= z`Xf;m^dnRsVz!jf-7pd!)9+X7UR)7!Y%xb;)Pfzy#>ZzW;TpKF1U^ilt{vtFT;M~K zT%^3#0}R93xrQgj_>hzJ#X}*Jl?j-)#=j2$i{QI`+0G08zbl(K^FV+9|5@6E&_o^? z5rr1)eIwf13K>uOu*m{z8ii>7YlV4QCKqUEs-V!z(;7Kzw^c6yfa!J9*$+`b;C4}F z7q;ckL8&FocW--24DE!+eR`%Rv$OIEoNhR?^ROyC9VZj;%-O6T*tGZuq%-z)YE%C& zQ{AnJe=*gu6Yd|gBfuV%G5PxR+Z#hq_aUnN$FYIYeEP=t3!gx^o2u;o9hL{_PeBYbQpk0=IFiY`9bI0|tOqNRb= z7SS*_5zad#dwnpEMKYkbW8R*=cR0Nf)`8-o`>J_9pivL?CNiZj#EZW~_b2zIjGxnH zZdY@Xi||0dW{+U|>?yteEP}LOhJ8USjb@HTOG^d2@vVc;n4SgbYMc-#r3SR$g9`%u zr-uqr``D7+&2UcfS+`L|DTI9YP_>d7w!akEcR7EK1B;u^R=^}T5S775wBG=^88FHQ z*ycz<-AX+~tiNS&qPRAO&P#*Np7WCvVjAfF?Nz){wXaFoGs^{sV$U{hdqz=p?|S@pM$<7=Y@_TsjhN@ZXvTnq!?^c~)ccA)YihnwIqoaXO38cWII^B_ zI3T??I@Gzo`{2Cjmx%miUK5p~r!*}hsfdw4mb-gJaehh)Kc{59AjV_&n@DQskCg@Y z>fX5d@K@E|5dXkdK|uM5|A3yzs_m`dv=VJ$eXZuD3~SMGn$yS43A$`}_E4O-wZ(5~o3gA6GuRgnOuz!lVJKd3MYQYr^HiX^;?hqkAz3?AMkv2vFl6+p(-kwEM(Wef-aDE zRQ)?T>;}WzJ>5n{Z{KNdm-LX<8{5cfq~skEx+GO{`8H*f#4H_%7uZ35wK!im`*Y4+ zOmt|24JJ660keB%3m#w6q0-;$N>J3+5PhKamwMs%`n*@YT8LJXUMnT=_$xQ3zh=xU zPW|}HH-m-p_JtqCmSLu50T0jcoJ^EgK4w#!WA^ac9p1{m+c#d4OIT#0L#&%rnjUv% zTp0LJ-!;5YCgRcF*47duv_3g3zq}zl>|?^Y-V$Ly6>DEb^LW1Hf{0bIeTn6(F9slu zhd5bo#q%w#A0`w561%+pla)Qu2u@aZL{ytoExSS4_Y+QPkI**7dW}o!ut9 zOZQ5OISt>GJNZWXu2+Uu%>U@{sF`T<`1?`XgDWdeeu20ikx01C68Y8sgRVZn$4&M7 z)2b8Kn)(yT`f;csf3NORI%~*%8t#lEXS#hq7PZs1VnvXB0X`CE<8|fWi*f{^!|@ll z2@jrgWdD)Kg;IxaLB{y^#pJh9)Lf786;KCvr+oQNo|YMKid${!*crBi`!SBZ@PG%= z9^9k>WN7iRVqQj@tCST(Dcjqmu8Cs3++++)+LXk^)HGt1X=(Q}I~wAwtZ>uQ`NFXc zW}QKik!ymp&4mf&+S-xyp;8h7lKIH2Z>c@&FiycE8`y2(N;fiCnWS2uQZhD}>7jRZ zYwzlrmy?mnakY9pxsAnI<$eG8Cod&gTfl4AD(_NfrKN0Bx_|(YqeD!6?%}hPO;SYT zG$mBYTwS%FbE=_3Uy8Px4Ghc#{zz7HINI;g7Zzg7_~IdQ89hvu-2A*ZxK!o&`S~3M zC1NOa*f07_uCB}I6kH7?VZoh=PE6W>c?R7^XB<U$S4+XOKKClM$B6|6arTY-wlp`N!zwV&G(CSFiVdn@DJtT7?b;M4EqyjLR42em zrm@n_KD@cgJkd%TKNNNoHig{5Td+V`qtL7^nq(X9ok8wc`l93`T0X>ZcKk)+nZI(e zW7Ez?P4-axV3bII)Zt(D@5MX#){8(|*?El>vFVfH6UVdg1|932i0?5RA!b1=u+4I@wZ|(@#q!jp%5`q z18ba{^6Qr`4mTY9%3&s~Ey^iFLlw@UB$XH81S5%BHeWPJw?!O5zaHAUD#kV4B&$RW5 ztf&3it<{({Gqv+9G%IrxY>6#?Yg#eep6OFsxOXXRVz4ZZcaEFS$;DvQe7ta`SFHTe zH`OF31J=m_JHE1>P#ql$H%kYrv5(hN>qGzjUK*EQqPbakm);~MDP(30e41yz6obVI z?!XbwDw|g{H30))JVdqM=lmC ze+9ra1&tOGiM_XU%k~C`!29I&;p|8*dRj)t_)lYfKYv<;NTLfXMa_N?z#X%nKA~L` z_4r+%`mi`T@%10 zsMFGg@54(TZbkIo;j619+&|l2xv>~7SA@$14KB!r4e07tw6->#Z#ut! z+cc=KTU=dm6o9zCq-95(bpDZCRM^we>fFkZ%PlL>G%~NHUhb;KiADtUK4b`JH>~ax7 z{EzD8P1@&);?ae5a^FsD)a*>c+;3%Er{Gl{L5P$J5jCyqeV{4I@Z&_t;rC zG8imPbk*OzQzT`6J~>49LCNxDrN?kv!-q@S7MXS1)WnAhb#smiE3)ox+!$0QB={y> zOtLO7Z@!3arcZ+SVkP+n4vkDcvJ?=0n&5=T3ncG#%l|0gfODihHE7w!YOW1VM{BG{ zzF6G%?kj=?JMljAOTFYi1jb*B<0GwjUiN7FmzbS8w5=YTU0GS3Iz^%!VA-&zBQ1K2 z!a}W7*e{3E%KZC?tIPzR1j})WEQ`0N+;gX9lV$qRTak}4e%o`HQ1;EA-<2lY>7Sx! zdRRS{>H^It($r;*P$n1N6R2OV_!#E2;3-%D?~QnK_VB$sHYw(J|3q*FM@6nREsxbX zfl9POi{5wy`KVO3wT2t(zF6JjCoDV;=Ef6bss_r^4q+Y6)jUE%pNrPzq#M00vUi@R z$?!sDTd`5&AfagddYSF`XY6Wn@#n0s*CalwUw+h#YdWuO!TMN`?(pKZ)%kJeVl}7mmR^NKK0%(BB!Y{KtQFc9csEp;)GVN7 zFp8J8aV8K-o>^zyT(#9iz^1kp{hHY+{P#Ies!yq>^b)ri_+H+G5n$M5TUX&a)5rsGc&z^9reGa>< z$RL}eQ&nQ3%faD~T+>r4;H9I})`J+{(bRWt zXa=hn6lCtQHX9W#xTk7V4--sFK);G#KlW0?= z&)xd_{2^~9Yx*PxJP{GruiCm&-!yV5i^XvOIl!bt%nWGw@KvUk~2P zmqm3OrjaGdNf%_*!bNe276nJ!;YR}Hn=6CYVl~wYG?*jjnkw}t^b3B!38r7ahrS*H z!!TmsTu-H@{)W1x<`G(z^8{_xI502~=WZtxgNhO0(#m{LwL1!SRbxuYq^0;QTsgPD z6N{X{fmQhJ+gnZIVGGH8U9)mkCR*b>5Qp$^(yka4!wNIAA_<8A(6mm2xQTp|^>p0H zlgyGTxEPgr0`);gTGk81-6p6&+B)%xzgSC(|1|;OvuGOQ0-E7VdB~OD-~b=DPC>a% zZH=6v@rIk2;jX-6_>?H>M_*PKJAV8q|MO?b+zxJ&9s?U*K9rJ>Z~~X%*6F=#q`(_@ zYjiU*QUyH;9;y9QQj<)DP=AGNZePC5n)CGueXSleDQRY)uWy)!wA(R?1Xs1R0s%2E zNhNxXz|&s~q*PykQ{LHWA}tiCcrj-%+e*4qobJ#tswL0N=DiDEc?esNGkIFDFzCb_ zYL=-Le7UQyHZ1Jik~X^hi9w`uRAh3O!NU5$3Mr3C=r*KiMxSy(t>&rxr4 z{_@QZIQ_L$>XCXx#7cU4QRAIQ!lAfjc0BDapHQS^mJ^za(o(vbBDA7kwK zp>i!NEIiz}eA&9R$tL6Gi2CkFe6)Vnaz7^DI;X+a(ebX6lf90Phepp%Qu_9*SLuP8 z8=16!wE{lRQ`8`uS&V~YXxvv$AvVQRkrBvbRCLP=NuMt-w_Z=}=n(yURj>TMkcom{ zrD~I!?~(!|W72$xxs`_qH%i27_nu6XQEF{U!`C53V_h@LCvI*pQj$ym$;h+whmZEz zSkK!5-c$eq7jm~da`$TsBTouMOwz(p;~;GN@f`NMPcJ2{fqM?g->B6*MW!KuoX`XJdo*U@fVMC&6%H3O|wIh?nixae_@l~reqY{1DFSL(LMzMO6L!20rx*A z*Nd>HjeT6xK^-wN21D3eVy~*B&D7QO=FPadjLJq)Ma9=rQn+<=elipnlYtX}q@MAy8?m-oz25zw)#>_4g6t zbQNNnP3>LHWnLSan)275f$xgS8J!QjIOO z+bAmFW*7A=k7ViknE);??DBF@`N{G8dx;nb)Z0`T6fzbvg7`c~t2uDR8d`)hotaio zJS#J%)yI|9EqJZn)hOr|?hN^x1wZ>1aTw2B^QNGlb3=^bZE#Hl8?=5yV>aFO&?>yP@2dU$ zOh7A7H-++TS*P^y7IUk4NvRj<5BWP(cfWhf^&d^A{t1o4>6R^nRxy6uR%EX6V4#?o z-Z7ndybwb_+11zgQ|h1JSiwy8G9kGqSYg^!N)+O7K`;%f2p3uTV4{f4!l^*yf$P^{ zWe47_N*a?xH0L2XITkD>yS^ui3_^$7a&iS>@rVRzX%rTyy%xAkU(82#KD)IIn6a(N z=9-&Ihby>Qw4Z)!40cO+d?7C=h#;75@L{Gf1FBnd0}oHLKMXwU9|_6WGcO&JL#!x& z=~9W5qK4FzJG4)u&?0F^vzL7wqtAlz2GwrxIHk;Sf}XD4P5AvMYpltei(q8Bd5pE# z*oRpJNvY?!c8fdjf2^nw|0O+Wp{`X@w*$7x?*Fw-I?O~(O5HIk%X;99&dlJFf_k>T zSjO>AOrCP@=Y`f;5fDHsHWTFic~`5MSf!;SEGtx*Mr;wi3^H~1b^MzK2IhWl69pNX zxE>>6gMiV$m>h-SYk3*+xNiwPFExk^eGU&ca((*2sXx50^~k7A$~C#RKK9_x8XDHV z{*=juoNUB9o?mLFz7V#Bj;TdX4zO(|p}3RSoqcQNPxIoG0Kb_npGrZn<`B zjJ~Wa%FV5vn`f@I;X`tdraGyAY~}jUQQYl5>0s-3Jgzz$s;vw&Lp9#DO1SOx>#6LO z#kTL-tNv=T$B=wJkK6ap?(2?%83b5mc1fbC%6yC#hlNPGi7qeR(=u6`<^qRf^~)Ix zbD87kBy@BtT+gLeE_epe$jwCv@u|y`VRQ5!ail(|mAyn(CnpzR10h@BZ*m%bV|TSQ z!ai9De^%r2SS(fxCnln%TFbfveHxn%lms||=yC1gjJaS375HQ`?8r zKeHNs-^^&RgQK;`w)d<$SlOCxC7iHalGL~KS8nqgevuRRc0+>*RcWS8%~HQZ?d3hW z$CK_n;Fk)b8{u1ok&05GE-?x#n$xfY`{ash0w6cx_Z5QKnTD- zWv06|{!wCj^vI}>TPO95K7>-wfQSr11dZ$&XBTw;Z1MlP@twxU_88O5DA-=%wmkNk zf&8Aw#RB$bTW4|T1tR5bacF=Jd215mwTIu!qHw+885M0CJFvN6mI56A8fjwzYfmi( zw1XpW={9xR3)a5*UTvVE2>W9UQv^-wv8`nSZi)v(vEcdR@rAO`VPF^&7gdEHT!|Oe8O|!nL2>+{#Gkd1X=|y z95OE9P~)hB<({9PEG*p2$k@`QNSkeYTG!`T)nf?Y*@bl(*|Le9_4GTpx+MMQ-%%vo z>G*u0Z9Xo9$jHbj6`>ld@-nh`D2vo*!2O*mJA-Aa!YvBwPuN%wg+~i34>L}D$M5(G z+Uy#68dcF}DW`ktl9CqG_}%*Yc9l&w9{&}c?m@0AibjY@?{y7ti56hQ_E32^y)S3N zUS(EQp__5jXlmy3zb_%RwY^i{K{7W-U`RS9XlMiQ=>rO2%|idUGTw?R+(MHDpBK?B zzQvjn)rmWLAv>rw@lIU3Td%IZC?Jk(`8LBv$#T*m6uSufAGEX4i!7DV9g{#Iwb93xdfIsP<7hx;d;Q$$4{IB+10+ zen>4q10};Z>F5LnTnyn<@H6Dc?z;*oM?!~>Cp*Z`5Fh$1D6T3Ooudxf<3X_+$n+M6$W)rjK6Ttyk&C3!S z$yoa951VtnPf{)iG(O;D23PD>l)#`1bH5f}=7pT)N^)~kRwC1sO>#_|6wQ!&e; z?RYj@1@2Q#FF4gYVZqSlZ@nr0^>mm~TdDQ(t^UjHRwfF33cd55dd?OnG6p7MR0=y= zeR`HfDa^lUjUzS_0E2y$ZGY$g3%Fg~Cg`3C{wlROl9dE~(KBhm+nsv?jN@&Nss`{SYfmYB#~acz_S$e~ib7 zR;iMKL)JAD7%93_zcTgqcXC8iTpp*G*DJI1*ROMl=^p5-s+tRSFnATByOWdKmz}RC z%gdUZV`d(NX5tB|%`bmVsc%e_P}%yi{EX->E6W3F>N$UZN=_*L8FU%3}kEu~=DlNrK4S7HDt%y^4;CT+qR zUwutQb6v%9md91Acj12?#zSPTsOaB5rOs?Dp4~+YW~;8CD@1KMdyad3vo?Y4p-w2z zF^!>1D1yw1qXg>hG!_qfqJ+An|E{Wu{PImf(&zE@b%8>*hD8qcpQk{5Mml1DC>__h zk`d3#Gbb+}g9#5?x7}3r5&XMjxVIbt`AWT=KQGG+DXD7p-47prL$uc?%+B8Y+W^9M zoWtC|b>WNBCYqdO2zW|d^zD`7!$Ao+qWbi-vx-(p;xo;(M8!^m( zb%te%xU`9WtQlyD5kQj?k0o~F}uR~Z!rt{-$b5vH6YL6_PPka2V91S zXO4$45~tp|A=%$*<@BrlB%s6!AD(ysJczanZ@!t71v5xXml)Q}tkEYCx1 z3e)daziD*DE406GATYj34~y0F3#_=yKC?-Uz4jXn;+M_sS1J*gLLn6y31}nie-8;}m@#xeWymd~0}-!tXo}8( z&;;A{RU%0w-L^H&FOE_@3?O1xE`x{emy!ElSvsy^-4E(2?Z2aIidOk6O%UE5jv4N8n&cXvVrm7e;lBL` z1fp`bcbJlewKcdYBI?jqdH+z_X=PN`t&k_9pwuVx$yW&awU}0QjBP5!j<*Z&m)~Gk2@U`>AUBC z&NJLcsq@@hwXND6dLAM^c_2DhT|2j>Yh5fjzig%P5SqXF%y)B0{M`E0Jpty1AqaM z*oN|QmP}L70Ao+CdmW^F<&K$bnTvmwahM*^KiW@y(k(f}Au(!40hV%>kI$)OduEd0 zFd=g*#9PqyQ-)W;?XvARsVk zN>xpU3yHU}>Dlv_U1mKuy!NzuEGqlE1@c8ke0KO+_S#XaVwKC-mwW1!6R(tULy1$e zd*RC8F_@;JqX>L6S}n9tuBxx^wKyU!-92%`cVXwD*YfeLy>FVU>VtNRHR*c`$GwDQ z9#xg^MT+$oxptbmjAvIeiRf=u1w4lm{3R&W7iB+q&f1z|B>`gj&s<{DExkFt z0lc6B4p_CF*CY2w+r%O@R_9Ld&2}`2m7sRCh&=!JRQ+7^Ax^O;6kBag^If;d&-Tc5 zH>$FoUiq<-xYx|^2ksEx;5|T?-_;Hb4Y6G=MWb;8i^&tGy|QPqbK&W2JF_)>SAHtl z&(wrInreu&AMr{0#3>-^_5v(~+nwcAKbl*$ zhBsH2Z3GUUgwIliaSC}c@)tIv2kLunwO z2JnNp5m06erTSMNMH8LfR9Hx^8edO67@2f3pu*Kn_s{K}FYEflrjtth0~!gV5km(L%MdbDi8_~$W_43R z6)G(0z>mS*3+qut+;Wt7|3BmZpUE5vU=iKR2!F9Zvu6>7-bFgQSb;Y+BP-2$D3dsb zQRuXQnLnHi6zJPqUdK`Sd`&Lf`CLOhU$*J*Pjznpb-VtuQWSgbntMUxx74=Xw{|Bp zY;561V;KJ*cC5AT?#zpC0)(hKWX^i)JPM!pLzP7-HfpnsogaBeE3`~n$SY4DTMT}_ z5ZAQb|8>`K(*7tik?QjoIzRJ?Fa((wklip?RW~$kQS{pEglo$$={XzJx}T5Jof8ZS z>F8N3tm&VE@4&X*Jt;z?yCMB`ednx{vq$Lmuy+NnM+ZnKPZLs+8^qPY1}{%f43}OI z7rlfG|6xereIeq_)lNmIZ6Mt_Z6vhcFl_Fe~Q{q#st3m-pic3kzfT zS~F2KaR8YKgj%GzyMx>k61!Rzn-MyZkBf?gbgz?1S&JG}J@VR!^19Zvw?MaNJDcAf zttgA#Sjq9qiZuCf`|{>?e0^};=v%AnQc@q`IrWOuJF^Zzr00~cMxWnajH)2>+8mg_ zSNGc&9W7Fg@iQ-Pc(u%Anz-;qbK1Jxd>pU@w*kj)w1{J4^Ht=lxP^taPl4I@J&gEb z@%Lifd}C;Q1a!g523JS1h|$|4q|DoW6+1B~bgjLks=*uPq-|ya0nJ^aA~l2B0Lr1w zuENmJ(0hAZLrXtCz@Yi@j^n^AU9lkjifGLvOk|fT08Y`ib2G7lPKX) zC#r95E_YkOR_tw6Q0K^#{Eb|(g%8rzLH0x9-xIgu#`aG|0-`JHwRrkFm4KYU%=VDb z*VK%>7kaH?bj6L7l(aQgnAbxxeNfp;@OkRB4Vb$SvJyrh=;`rpJZP1i9#q-v2k00do!)6JJ|2lW2u=9lSt&c*?@YpH=IKAm)jzc{*$7LAh?iX)>QCGRx5ku7Rak8v z@iqMWJH)Vu!X&uF<_{sFzlWDv-JT6>2ST(GCc_<|&G@@aU71iG3Ac#}8ov_{C(UQ0 zA{VL+adDw&5#mur`KD`7_2I<1uJMZVq95@`??VS623Zp&%y)=<4Ij0)FMgo5&)q-1 z@1yWNdnOfE=|D&0-FgIf}#11$)j}!;mEQ7ioYXG z)eS*6g0kseJ@og6kDJw}JKkr!1HD9d1**edQvZb*Xj#8k&(g9r12W$XFp#c^l@P; zjyGzWZE$zmy35miVaR4MP8N`z|MLxHnu?k1PsCao?mF8+0h7}MPy4|awx zM9Lz(11`r)xM3-$z}}jidUi^z*T&LxZO+a&Ap7gP@_!^tR;Y;zIT>=odaG@4UkW!Q zoJdgcZJiQno)Y8f`;@vh6I=RQ?Vqe;8?QGcFy#C8;m#R!aVSU8(pQL!i%dxfvcxK~ z#0Tw6dF#(^ojw$dIW18g)}jcr9kv=Gm*@I(ifunw3s~`w&v5T@?*`;BFPy&nggM{0 z;X6%6%T4WKCXTzY7*ieMC5lslA8|WI$MD+m62IP<_jkoJ$C%R<%D7jiqk_UoZg$b6 z%aNgIpNt5rqlNKy0xR9qV}sN)a}FDAy_5w;q=-KmObo}Zb9TQezq6y|xZnHDjutog zo<>9yj$9ZUia1o~i$Js!h2yY<0N&kF3mBu+)>cwA%&3Ny8}dJH>QFn1N(jRVTh?|3V*`b9~} zmMy(|@mz`I)XW`qPB1oxNcQzn$F@H0CH4aW9@O{b1u$3?-1V$lrO9Y=((%oxJ{*zp zb+itA(X_D-!0LxwcG)M5^}<-WZwX8*t!sN`JT`+hVDX6)E0*E01YK=RvLVD`Cq?O* zTvgaj_NQuHqWa^lV0{bWR$k2e`09#teSFFNB5(DxE9B&o!tSfh7UjkZB_&bDmri1% z`CoXnC4NvjRKiOR=F6++&`NHLe*gG8R4h6ZYjE~CIj)Tw?ri${`*me#O;1-8+8F3m zX08@IylfHnHt|qMr^-W%RP!{EfHs-a#mYU~!-R^6K2@V4bGiPS-TUSq;dF(+`P7YM zk<0lM7xYZ!@z@vIC#j1t>>GMSjyQQLmE`b0c(oDZ2FN zNVk5Gjp7Bb#fEgmVWxn}pT*AAJ)YqMFHvK?-PM+dPzX3cl`;D=INEW`gj^Fvlz~o3 zz;ay4b=oD{20oyZ7q8wW=shJAPi#+66O6)cN8Y zG=?BwK(*rgC41uhd9=2`vRk71`gB7*ODBF0TWBLum0qed5FnI_r{~ZYGL)!DY{Wgs)8s(jib+;DuYAj*D=T}E~)4;I|@Dv7ISl`OOg`=%aW2}z|h-KR-@WbK4#Yz zmr)zJhJ%hrI^8(;V2)uASi;2$y@U?4H5rct5e%w6;PZfPn2Qq$t43z0BVz81XnAg|e${u{2mQdloF;LjSr~doU2aO3Q_cJe(h^c4FFe@qQl`^ozRz_?!4iC?+M@GKl%Ydh^g- z+xp49P3KlR1sxkesv0JfR^QOZIhdAk?|<^FU^;);ToyWdp~G6NXE$pW3yO(OQ;O8cBRS#SmyD9B;2#|$CF@n2gUeS8rOYR8*n0fWRXg_P_f~U783#{ zksgj)M;$mW9%TCY5e9FRKL;N;f}9gREg5Dp2MZ66=Vl<(?_olef7a*W{r}`sm^zLp zuG0MY;%gTFhE!P0jsFd)Fe5eGAJTS%mBsk>Mqz`V;#Tej)*QTM8O?_ZFh$ZMByNbv z6Ln53$uoyNj*E+Ysrs^4{&Ktw_uaV_=_z}GlKZm=+>#qkUw1sQUxhLb)LCTaeMr7I z5fSa;xuOq_h_=aYBb_#}`XM}-(jAi>G%i`;uN} zXf#c<>{QS62sUZf)F@|I1X}kDQB4OaTI(a8`9%{@!gU7}ECTuL=K9rziNPP6Kz)lX zrB_o;Ejn7HGtb!@)AHTe1;2J#>|M~*EuRIfLsh>E0+xo3YC9&+d9dt8=OUOCK6jX< z8-fw2txbhl?kxNCE&>{9Yl))%&_I=Xz%T?s;?1l9OxgD{whm_p8 z`5Qd*Gptmtz_+rwIp0tPW+bIf9n^fUlT58oLus5a@5bV|Ak?yT3ug54IprKf`Ay(4 zyu}hiDTb==jNdE8oPKweRxY@gGLBFgzvMR$>4%mx(qEAZ&)$Jz>LlTt!JjN6PMRnN7etP63ZEijYX-;0DpxEv@3S6(oN_XcL z=RH`uuS4$x#g$N7=sX&JM+G5Z&c0AAF98aMqu70|1Fm9i6eH?+LB0So9NRDm@W98! zyD@`Z`jeYh&c+^Fi`ihVqAt`0J1<;r)g}a~LB^m~9v3?C$1ogz06}3kURBtPT(`ig z^A8k^{UJvUwNZx`HU9Jch4XzD!gx3RdD-ZjZ-$*%od9D2Pf!u0i=&w*Lu;3FS;vyW z99~j#wW$C~ZVldV%kt$oGCOM!D;`?^T1E1b*5H@Z8pyyDa=y?&J8;K+4fSlWJx<@Y zm3xY}OYzsWAV?*XN*8!U-Le*7ijQeiON>Am1fQgQrE=yBE83+H7uG;E>jnON?FfSHt4}t2De1XQ7c~H=9gpYhRZTvp`YC4A{O%bUVN*lm5!=fho0&g z?odM5v?IGg>^XjQxziT%F|cCY>X81Q)_jawewKUp(q!6_;I59LRcLs~vM^ZluRh2r zTl_TP^*l$6$%KbBm_p8{@jFsDBXne>$hgW9%Hlx$Mz@EPRb}6yPd{4H(8!_ovc@dDSx~eBeF!;0T9-fjbBRxfnyiye4w!OZLo`$wzQ43Ax%mQ)j0H8|cHCD|>%r zCyu%tW>wLbJ`b?w4IV+~4VL&=RTgv87DY|=?i??dwYk(zxoF@M&k~$8u!MXCKLLYO zQQ{7_-N8S@FH_L+X90U)p!*^uA zjDg&OfdO`%%`d_}*C|RLWt9HXkwWof(MOS53|fs^Rahw9UJ3qm7`YmVIs!26uOyBz zBIgcZVMp+K0ox_y7Qy8VmnBm#`kI~f+4CfUm0r7ut9fF{O)LZ)oiq~Lmn#JxwTX%I zCP_K26aY7&XN~DEPKQE#?SWdu7WRhMFl|2$lf<{~aedH$g#?QK*a(=A;Bz%m1Yo7J z;}wr8avlFc%}(%fJ$Jow@&*t+kaa)=1l||+BGgX=C@gOj%nSn1b?z9LPp4Ue53Nkn zEpqr!F>|k1rvj%p)nSckjaPkZSo5l&z|9<`q+||ek^nMXezD=hroR9~4X$TBFvdkA zSQR=ZL;2S8mZhoiGc~5I?*X_C_cGD$)>KFM6%oLSG&J5;Z=8H=YlxDQeEM_&O!G~f zfz)(R0}psAzo-T^>mIau)!hAJc`TqXHn$&czRTlN?tRTX!!0q{zUhQ{cpC4P4jZo* z%|*u?LNVo+QpW8{hdF*~jIOHc1e_4=7gamJmGeC&+g1$(J;t`A;(>^R`!rXWfdENt zWtxOBk&%9YI^ZA!MiTM%f{M&-9A)4h_&a65R5y8)_H%-<61v0*AT3c*RC&fkjW(bU^Yo_Lf zb^=C?Ee}26GIGu?k=Km#hOnUwv;D+|iU|E&om@C=qI-*ih_3xM9d`CzmDd|yO216rcN$hl!#RUF@ft9tascP4REgS~r zq%Nsz$)6(MiWe2^bY*e{!$SAYox0a=lV&xmZ?8W|KyrxqJ%i7zqx`2FuU!t03h_oZ z2@FFjKx3kX-rn$BxzWkXhjI)P3*bynZ4KSl0_Olg60C1u5JJWO?KAzZuikf~v0~^% zyj%NhKjg**&23>9H*d%d)~M__O7?M2=kQX?OxsXOY^L_@&%kmYDpJw`03?o73;C~s zIVvW5Q9%bgDKxjpRuMC1@cR2$gLrzn8l;o$Ij9IL2mWrO&hW zaDgw0*t${4uQQ7Er+GU*PM&ux3rNP9vd}XBgw5%D1x5ld6NnQ>-T~u>SpQoRV7$U~ zUc{7chbdo8w@>?dbAV-8`=p*jUh8_y8-H@kiBEFe64+$Kg(QEwL`~i=T{KzehCbM} z1OJhK6321#xoN*jZVo0klt+7rwR3Rr4EWB$+zA}x*ImtLh-GtU(aGaySi>EwMxmSR z5~C%yLwGli-sH7>uy_i@GwVht9LHPS_WLjVRb)`*#4vG+B$!_)k_d_V0yqdfZ!@ z5WD7W|8Stf{|%4&JsV+f-@*FHTBtg#njM|V$s^yLvTxJ5U1sJwQpFeFI=vv3l00Cy z{{2|VC-xjMUK9CO(cZa`soM4dQD2~RtIPKratVjOg)N&sd8Q~&k4s993sJDIO=f;nLe?BlvDp*7gRZW|x@W&~vNjpAptu+ySJUW^D9XLQnwx zsC58ge;9YARfbZV zk>?mwkI1aoFT}S@SH9?I3rK{i>O2Q4QBdI)eh4n$8eN!`bH39-H5Ta5M0>77=377dj--&_!y2c6>&~+|&0Xk&GugGi z{XH$uI-OCp`N*yk^MD8O5(Qh&0)G-ZM)S?dZZ~6PEy5~GMh1*pw$Jb_B%C-HeAtx0 zea%>)n6YU~PWB-4F`!$T3JdZd?;fRazR;D~Vj_Rw`a5RcM@wfzHIrTa$%l&X z4yc;PakEoTOoy6?zsu~^e55S#7(DA%lDUxBD=t0gxw^v?@4YJ(&_S$C;%_B4`XR4* z8S$%+kLHk;D9#rA_Y8N)0?_>~6uLnUUc3$3aDpS&0neRq=Z8I#x$US$do~0Y-=)r( zP&A$>b$Q!mkgR~BMem=lI=UfXf^&tim)ZYlmrB+O1{2=6mM-$as6R2hoqCf!F~*|r z4u5B0RZ&Fy39Dli&giHpeh`RZlXDS;(yosp1L9d>@f>SUaI}ac$iQG0PqJ{-2cxva zcewjS#Rtn{J;iy+HJ$T^L$77!@AIRaPo10uM4lK`9$fu&aGNwPTXjWiqjwLcZ7UMjKV)DVG1PmfZ_Z+e zjOI?O+sgnt?aIvU1j>vGZo^VbL4EB_HZN#&&=(4-#b$?^xldU-5aDPoAJeqN1l%=9 zgB&azuiWk&+xcmPrL3?xc2qONUupM!Zq`b8lyzi|QF`47xzV3ZUpS!UNqu2KiDeS- zd-;zG2oy^PaeSa&@A(Zy>ZF)3?@b4P#-zNw8)whfZh8!s&TTICyxVeo;kor*9O5qK z%wA)z6&eH}+V9S3T|}IEuhG0LH4draudfvr^K>CFOgWGjKQ zkE*4L6l*)yMuaosPLbaUkFYu!cDl_>s~tBda>>5v>wl;o&n$a(f|L{;EA|#FuQcIP zXMLj*f|5?K&0ncnpJd8Up<_?Q4f3|?%guv=2VX%89)r7hJq&XK{s+;!aEjs4q)MA- zA71nYL!R$Lk8G5Kj8Q~bTQ>T-|7~x(czo5e%&4%$?FI<$If51tm%#$PQ)$C*_HsLu zbh($ZN21Iv93o*>Cw;($tKh93xds;!^<%D2ormJX~G+Dm=>a)dNH zYRurm8P_Z5hkS!#)8CqOoibXrdnK||cC>Y($>}8Zn(Gbks9tODeg6EmtK6XX!nG%= zRv`RN+%b+#QyW}=e5?*r%@C;OSK97iN!{Q2-by6Hq8SQ!^3?$wH>^*77b0b7g%ps* zi?&t7rP_v~1$NpJpw=PvGW-4E29zggHN7ElEXEn+CEwR=pH&kGTCSggEYt0>3ospv zS+l$DYIb_Cec4|a

&uk$RP`ZfcgWDKJb@d`z8Fnp0E8G%0_m(6d?r1KG7f_z>OK z)_ZU*y?r%Jq?LxW*Opm2E{jb4(Joi`HRhfEiKS>pu7)Njh7^cQh`lX{ z`8OK*?lP24JWgcl2TY|WE7|u?cU&mB--ION`fI-+dMl@?Df5l~J-)B;)XZg0yiA29 z^++oYt?aTMXHp8lwZl3p4#tj z3p%Ee3%7-BzngXvwpqW}ej;??Snk?{5rHdR$7+^K&JLpOdQdzLzxY$;~>@^}6SMIz~MM8a)9c2aoW4!M{Jp0&w1X8{@wnv`2EZ+mS>(*pmu*5L zF(OPTDoS0mZTw7CasKKZjluMpYd#4J0$%%*doXjGf%At5;U8BUrV94sM$#=xADoWH zjX93`{QS|vX@h*`JhytQBqzcM1KA%!1ilpwPLsf8;gDcMolN5Sw{VQa^Xf1Oq^}rM zcc|R-#JqOvfdrI@?*@bdh_mk_u4>@`>n!{wEMB1jG4 z85gl``LK#@n|r>wUFk8kB27srWZ@~DjuQ>25=|d-XT;^Bu?_yXh-1e3I@4WFK4Tp4 ze}OnBREDw+c~!_~h0tt3V<+fd!fH^;t!_!}jrPt>POd7-TAJbSQhq=o zKB0mL?0XEVO}S^hL>w4>2Py1iRQX|#q`8-9Z4EZ=ly)i*qnH7J4+l_DqI){;x zjwbuc)_ZSHwS~~Q8P(Iub)Jb#_mR7Z>P)QODfFvY;CPJd`G=yN^|6RKy*z$}78KEc z=p-dz8j^F+OmEG92AO`^%(k!KgA&%!JWWTjvkKO{u84SFk-v&s@;{$E^n>=aQz$o`r>UM`DfWEKO*h@_lYVInzE3Y%LfWRD`qb@+5K3<0ZmjXb}LKpXzylqt-K) zH12hzUJMXGMU1S1FG23|UUbLLL#{5J*JNg5Dwk7ze|9P z<*CA8!<6_>erB>wHM?2P^4gLFhGc_H@z8|Is}X;ZD}%;TsyVgo3Y#S6cW=}fle5yngV-2wen@I&Kp*)M~mLT$%OK_)&-?*s%}t-)_=tn zL~!*0Hdd~Wi!kb;1WK0e{1slR<@HL)Wq z_@cr6i7Pa0B&(N%KmXCuf9*qo*zh|Y)bo&7rR#S_lXx46aMQ@P+AcHklUf4+Cd8{TVW*5MR~ytg1MT!-P-xToBM zgnY4EM+pt2?S8LSQb`@<`{zwd%B+}hQAO6xl*;=Nhhh%k`KeijroU1yPV_ijj-x;$ zrD}Z9kT%fA0=?I}e8pNcreJn#c=5_myKTP#dsNg30Q=OB-q|OL{Z8mKZ9PALf;Ur) z|6T!5M^2B^t*v9H5B_A4 zX5oS{-Y-ePb#yV5#5oQBrKKOz(nA4{XGQD<=ZjAxQmU9 z1a6-F_56Sp-SKnBs0AgkE7D5w_eLR`UmiU5Qu zd5+nQ0zE z@_pu|Kzfd6-fVVd*wdL-5Bo65YD|-n3s+<-^Z?+Gm1JowX?z^h%{ZX%*GIfdu{PUy zPfSnnllL?_3l2KIS&Ye1LSTSd-#{A`SRdZ!!7OxeI))IRKZ+>ogg>dzU_KS71Uzbb zf)8xD#kL&Q8?WXkg|azgP0Y+3oCOQ>JHZ}BhcWpGQ#Im1Ow1bSahO8GN5KB3T>S6H z+Je9jY9_b?)Q*OK90rXrNl{?XkKU_M-lzrP@`|Md1exCE>eKryjA;=ig4~q2y4pWJLIgg2#zX%P0VpN)%cGmZ z!p|^4a7a^#GD@s^6iL!RCv+>n25r)o3!%~Xzc{}Xnp>Wo4hO28?E7dp6a?_q8C*}A z;wInoR4f@G=O^(@XuaY8g$e!5#j%}ov2VkOhj&6t%R20FZEe)Jf1k*bUo|j%MX_np zpwV3cByva@OTn^5d>wGifuxH)3Q%W3yl^JE6~I1%X$fCSQzz_XCg^SXki~qry`%j3 z`(?ELidN;g@en_Mawv;2yH~^-6T!jP{RQKHs=aj6PcbPk{`cGYpOAg&>6zrr5du8$ zeiIo59tvf1?76B%ap&kAi{2RUYM6`{Tp)q_wn>PNj)IK~N%DMED4t>f78;VciwZDbE|NUbXbx_AYv0iBl#QT7MD6 zt|CKFAa3o;5nKWg`h?ndMoka@nrbN8U@{>aQZEu6L*^$T77#LjxY?2}{x&wt^aTIv z7%YOu%gH~HbxV1HF0DF>?}f#)K{qADuW^#$ntla6ql0}xf7H&V`$Onerz&h8c5-A% z>}MF4Nle&`$wL(P^L&uw%2kOR^T$t}yjo!bXiN#^?iV2ZLcN<~O%!PyEE8qTh3{vO z#CKNlX8=K}u)EB=T`ZEzl*giQRPtBr39>Yc;K|W@JLLqdC<%tDx}Pgpjmq8)pUIg! z%Ta!@Fvni%tG@g}lAewIb?cTl#-q{g&;zPVGd{wxVuk0`iiRB9K1*sM)#ou&rteAl z78=Pl3CbQfxEB$>f72fpL}1&WRc8m{+mK%|()S>}O(DTzMSa0ffEe5a_%P^apgUz+ z(UCfIZVR4)#IaZ6`ta`)63+76`#FIK6_Mkzly*;wu)V6R)<-VSUb1}yAfcnw<7kck z^)V`_AXCQScYD7jIRvU_@G}7C%>L~UkB3!Fu+l9wASA2dLNsGLL;$Qxz*5+Sm|2&<&EC6hpSlidPLh0=V8EooLNF2 zcuNeRoBEzBo#KBC6H{{XSQyXb6%_D@J;1$SZ>dFezJ4!(^N&9c&@)`A^$(z;6rG&~w+SwK_U_g1#f?+T z?^1hWG3jo99DWo)hW|Q`?S;BuFLDmsKBV(LN$19m);}+vF;1#?3Ax5#lnRZ1MvmZy m3`_. diff --git a/doc/source/install/install-obs.rst b/doc/source/install/install-obs.rst deleted file mode 100644 index 6b3a48a..0000000 --- a/doc/source/install/install-obs.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _install-obs: - - -Install and configure for openSUSE and SUSE Linux Enterprise -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Infrastructure -Optimization service for openSUSE Leap 42.1 and -SUSE Linux Enterprise Server 12 SP1. - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -#. Install the packages: - - .. code-block:: console - - # zypper --quiet --non-interactive install - -.. include:: common_configure.rst - - -Finalize installation ---------------------- - -Start the Infrastructure Optimization services and configure them to start when -the system boots: - -.. code-block:: console - - # systemctl enable openstack-watcher-api.service - - # systemctl start openstack-watcher-api.service diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst deleted file mode 100644 index 47105d9..0000000 --- a/doc/source/install/install-rdo.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _install-rdo: - -Install and configure for Red Hat Enterprise Linux and CentOS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -This section describes how to install and configure the Infrastructure -Optimization service for Red Hat Enterprise Linux 7 and CentOS 7. - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -1. Install the packages: - - .. code-block:: console - - # sudo yum install openstack-watcher-api openstack-watcher-applier \ - openstack-watcher-decision-engine - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -Start the Infrastructure Optimization services and configure them to start when -the system boots: - -.. code-block:: console - - # systemctl enable openstack-watcher-api.service \ - openstack-watcher-decision-engine.service \ - openstack-watcher-applier.service - - # systemctl start openstack-watcher-api.service \ - openstack-watcher-decision-engine.service \ - openstack-watcher-applier.service diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst deleted file mode 100644 index e475a89..0000000 --- a/doc/source/install/install-ubuntu.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. _install-ubuntu: - -Install and configure for Ubuntu -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Infrastructure -Optimization service for Ubuntu 14.04 (LTS). - -.. include:: common_prerequisites.rst - -Install and configure components --------------------------------- - -1. Install the packages: - - .. code-block:: console - - # apt install watcher-api watcher-decision-engine \ - watcher-applier - - # apt install python-watcherclient - -.. include:: common_configure.rst - -Finalize installation ---------------------- - -Restart the Infrastructure Optimization services: - -.. code-block:: console - - # service watcher-api restart - # service watcher-decision-engine restart - # service watcher-applier restart diff --git a/doc/source/install/install.rst b/doc/source/install/install.rst deleted file mode 100644 index e6c8883..0000000 --- a/doc/source/install/install.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _install: - -Install and configure -~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to install and configure the Infrastructure -Optimization service, code-named watcher, on the controller node. - -This section assumes that you already have a working OpenStack -environment with at least the following components installed: -Identity Service, Compute Service, Telemetry data collection service. - -Note that installation and configuration vary by distribution. - -.. toctree:: - :maxdepth: 2 - - install-obs.rst - install-rdo.rst - install-ubuntu.rst diff --git a/doc/source/install/next-steps.rst b/doc/source/install/next-steps.rst deleted file mode 100644 index c07654b..0000000 --- a/doc/source/install/next-steps.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _next-steps: - -Next steps -~~~~~~~~~~ - -Your OpenStack environment now includes the watcher service. - -To add additional services, see -https://docs.openstack.org/project-install-guide/ocata/. diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst deleted file mode 100644 index e901146..0000000 --- a/doc/source/install/verify.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. _verify: - -Verify operation -~~~~~~~~~~~~~~~~ - -Verify operation of the Infrastructure Optimization service. - -.. note:: - - Perform these commands on the controller node. - -1. Source the ``admin`` project credentials to gain access to - admin-only CLI commands: - - .. code-block:: console - - $ . admin-openrc - -2. List service components to verify successful launch and registration - of each process: - - .. code-block:: console - - $ openstack optimize service list - +----+-------------------------+------------+--------+ - | ID | Name | Host | Status | - +----+-------------------------+------------+--------+ - | 1 | watcher-decision-engine | controller | ACTIVE | - | 2 | watcher-applier | controller | ACTIVE | - +----+-------------------------+------------+--------+ - -3. List goals and strategies: - - .. code-block:: console - - $ openstack optimize goal list - +--------------------------------------+----------------------+----------------------+ - | UUID | Name | Display name | - +--------------------------------------+----------------------+----------------------+ - | a8cd6d1a-008b-4ff0-8dbc-b30493fcc5b9 | dummy | Dummy goal | - | 03953f2f-02d0-42b5-9a12-7ba500a54395 | workload_balancing | Workload Balancing | - | de0f8714-984b-4d6b-add1-9cad8120fbce | server_consolidation | Server Consolidation | - | f056bc80-c6d1-40dc-b002-938ccade9385 | thermal_optimization | Thermal Optimization | - | e7062856-892e-4f0f-b84d-b828464b3fd0 | airflow_optimization | Airflow Optimization | - | 1f038da9-b36c-449f-9f04-c225bf3eb478 | unclassified | Unclassified | - +--------------------------------------+----------------------+----------------------+ - - $ openstack optimize strategy list - +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ - | UUID | Name | Display name | Goal | - +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ - | 98ae84c8-7c9b-4cbd-8d9c-4bd7c6b106eb | dummy | Dummy strategy | dummy | - | 02a170b6-c72e-479d-95c0-8a4fdd4cc1ef | dummy_with_scorer | Dummy Strategy using sample Scoring Engines | dummy | - | 8bf591b8-57e5-4a9e-8c7d-c37bda735a45 | outlet_temperature | Outlet temperature based strategy | thermal_optimization | - | 8a0810fb-9d9a-47b9-ab25-e442878abc54 | vm_workload_consolidation | VM Workload Consolidation Strategy | server_consolidation | - | 1718859c-3eb5-45cb-9220-9cb79fe42fa5 | basic | Basic offline consolidation | server_consolidation | - | b5e7f5f1-4824-42c7-bb52-cf50724f67bf | workload_stabilization | Workload stabilization | workload_balancing | - | f853d71e-9286-4df3-9d3e-8eaf0f598e07 | workload_balance | Workload Balance Migration Strategy | workload_balancing | - | 58bdfa89-95b5-4630-adf6-fd3af5ff1f75 | uniform_airflow | Uniform airflow migration strategy | airflow_optimization | - | 66fde55d-a612-4be9-8cb0-ea63472b420b | dummy_with_resize | Dummy strategy with resize | dummy | - +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ - -4. Run an action plan by creating an audit with dummy goal: - - .. code-block:: console - - $ openstack optimize audit create --goal dummy - +--------------+--------------------------------------+ - | Field | Value | - +--------------+--------------------------------------+ - | UUID | e94d4826-ad4e-44df-ad93-dff489fde457 | - | Created At | 2017-05-23T11:46:58.763394+00:00 | - | Updated At | None | - | Deleted At | None | - | State | PENDING | - | Audit Type | ONESHOT | - | Parameters | {} | - | Interval | None | - | Goal | dummy | - | Strategy | auto | - | Audit Scope | [] | - | Auto Trigger | False | - +--------------+--------------------------------------+ - - $ openstack optimize audit list - +--------------------------------------+------------+-----------+-------+----------+--------------+ - | UUID | Audit Type | State | Goal | Strategy | Auto Trigger | - +--------------------------------------+------------+-----------+-------+----------+--------------+ - | e94d4826-ad4e-44df-ad93-dff489fde457 | ONESHOT | SUCCEEDED | dummy | auto | False | - +--------------------------------------+------------+-----------+-------+----------+--------------+ - - $ openstack optimize actionplan list - +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ - | UUID | Audit | State | Updated At | Global efficacy | - +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ - | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | RECOMMENDED | None | None | - +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ - - $ openstack optimize actionplan start ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | UUID | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | - | Created At | 2017-05-23T11:46:58+00:00 | - | Updated At | 2017-05-23T11:53:12+00:00 | - | Deleted At | None | - | Audit | e94d4826-ad4e-44df-ad93-dff489fde457 | - | Strategy | dummy | - | State | ONGOING | - | Efficacy indicators | [] | - | Global efficacy | {} | - +---------------------+--------------------------------------+ - - $ openstack optimize actionplan list - +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ - | UUID | Audit | State | Updated At | Global efficacy | - +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ - | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | SUCCEEDED | 2017-05-23T11:53:16+00:00 | None | - +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ diff --git a/doc/source/man/footer.rst b/doc/source/man/footer.rst deleted file mode 100644 index fc8c28e..0000000 --- a/doc/source/man/footer.rst +++ /dev/null @@ -1,5 +0,0 @@ -BUGS -==== - -* Watcher bugs are tracked in Launchpad at `OpenStack Watcher - `__ diff --git a/doc/source/man/general-options.rst b/doc/source/man/general-options.rst deleted file mode 100644 index 71aa842..0000000 --- a/doc/source/man/general-options.rst +++ /dev/null @@ -1,66 +0,0 @@ - **-h, --help** - Show the help message and exit - - **--version** - Print the version number and exit - - **-v, --verbose** - Print more verbose output - - **--noverbose** - Disable verbose output - - **-d, --debug** - Print debugging output (set logging level to DEBUG instead of - default WARNING level) - - **--nodebug** - Disable debugging output - - **--use-syslog** - Use syslog for logging - - **--nouse-syslog** - Disable the use of syslog for logging - - **--syslog-log-facility SYSLOG_LOG_FACILITY** - syslog facility to receive log lines - - **--config-dir DIR** - Path to a config directory to pull \*.conf files from. This - file set is sorted, to provide a predictable parse order - if individual options are over-ridden. The set is parsed after - the file(s) specified via previous --config-file, arguments hence - over-ridden options in the directory take precedence. This means - that configuration from files in a specified config-dir will - always take precedence over configuration from files specified - by --config-file, regardless to argument order. - - **--config-file PATH** - Path to a config file to use. Multiple config files can be - specified by using this flag multiple times, for example, - --config-file --config-file . Values in latter - files take precedence. - - **--log-config-append PATH** **--log-config PATH** - The name of logging configuration file. It does not - disable existing loggers, but just appends specified - logging configuration to any other existing logging - options. Please see the Python logging module documentation - for details on logging configuration files. The log-config - name for this option is depcrecated. - - **--log-format FORMAT** - A logging.Formatter log message format string which may use any - of the available logging.LogRecord attributes. Default: None - - **--log-date-format DATE_FORMAT** - Format string for %(asctime)s in log records. Default: None - - **--log-file PATH, --logfile PATH** - (Optional) Name of log file to output to. If not set, logging - will go to stdout. - - **--log-dir LOG_DIR, --logdir LOG_DIR** - (Optional) The directory to keep log files in (will be prepended - to --log-file) diff --git a/doc/source/man/index.rst b/doc/source/man/index.rst deleted file mode 100644 index 74469af..0000000 --- a/doc/source/man/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. toctree:: - :glob: - :maxdepth: 1 - - footer.rst - general-options - watcher-api - watcher-applier - watcher-db-manage - watcher-decision-engine \ No newline at end of file diff --git a/doc/source/man/watcher-api.rst b/doc/source/man/watcher-api.rst deleted file mode 100644 index d2f7fa8..0000000 --- a/doc/source/man/watcher-api.rst +++ /dev/null @@ -1,39 +0,0 @@ -=========== -watcher-api -=========== - ---------------------------- -Service for the Watcher API ---------------------------- - -:Author: openstack@lists.launchpad.net -:Date: -:Copyright: OpenStack Foundation -:Version: -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - -watcher-api [options] - -DESCRIPTION -=========== - -watcher-api is a server daemon that serves the Watcher API - -OPTIONS -======= - - **General options** - - .. include:: general-options.rst - -FILES -===== - - **/etc/watcher/watcher.conf** - Default configuration file for Watcher API - -.. include:: footer.rst diff --git a/doc/source/man/watcher-applier.rst b/doc/source/man/watcher-applier.rst deleted file mode 100644 index 7f22b76..0000000 --- a/doc/source/man/watcher-applier.rst +++ /dev/null @@ -1,39 +0,0 @@ -=============== -watcher-applier -=============== - -------------------------------- -Service for the Watcher Applier -------------------------------- - -:Author: openstack@lists.launchpad.net -:Date: -:Copyright: OpenStack Foundation -:Version: -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - -watcher-applier [options] - -DESCRIPTION -=========== - -:ref:`Watcher Applier ` - -OPTIONS -======= - - **General options** - - .. include:: general-options.rst - -FILES -===== - - **/etc/watcher/watcher.conf** - Default configuration file for Watcher Applier - -.. include:: footer.rst diff --git a/doc/source/man/watcher-db-manage.rst b/doc/source/man/watcher-db-manage.rst deleted file mode 100644 index a7b933d..0000000 --- a/doc/source/man/watcher-db-manage.rst +++ /dev/null @@ -1,260 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _watcher-db-manage: - -================= -watcher-db-manage -================= - -The :command:`watcher-db-manage` utility is used to create the database schema -tables that the watcher services will use for storage. It can also be used to -upgrade (or downgrade) existing database tables when migrating between -different versions of watcher. - -The `Alembic library `_ is used to perform -the database migrations. - -Options -======= - -This is a partial list of the most useful options. To see the full list, -run the following:: - - watcher-db-manage --help - -.. program:: watcher-db-manage - -.. option:: -h, --help - - Show help message and exit. - -.. option:: --config-dir

- - Path to a config directory with configuration files. - -.. option:: --config-file - - Path to a configuration file to use. - -.. option:: -d, --debug - - Print debugging output. - -.. option:: -v, --verbose - - Print more verbose output. - -.. option:: --version - - Show the program's version number and exit. - -.. option:: upgrade, downgrade, stamp, revision, version, create_schema, purge - - The :ref:`command ` to run. - -Usage -===== - -Options for the various :ref:`commands ` for -:command:`watcher-db-manage` are listed when the :option:`-h` or -:option:`--help` -option is used after the command. - -For example:: - - watcher-db-manage create_schema --help - -Information about the database is read from the watcher configuration file -used by the API server and conductor services. This file must be specified -with the :option:`--config-file` option:: - - watcher-db-manage --config-file /path/to/watcher.conf create_schema - -The configuration file defines the database backend to use with the -*connection* database option:: - - [database] - connection=mysql://root@localhost/watcher - -If no configuration file is specified with the :option:`--config-file` option, -:command:`watcher-db-manage` assumes an SQLite database. - -.. _db-manage_cmds: - -Command Options -=============== - -:command:`watcher-db-manage` is given a command that tells the utility -what actions to perform. -These commands can take arguments. Several commands are available: - -.. _create_schema: - -create_schema -------------- - -.. program:: create_schema - -.. option:: -h, --help - - Show help for create_schema and exit. - -This command will create database tables based on the most current version. -It assumes that there are no existing tables. - -An example of creating database tables with the most recent version:: - - watcher-db-manage --config-file=/etc/watcher/watcher.conf create_schema - -downgrade ---------- - -.. program:: downgrade - -.. option:: -h, --help - - Show help for downgrade and exit. - -.. option:: --revision - - The revision number you want to downgrade to. - -This command will revert existing database tables to a previous version. -The version can be specified with the :option:`--revision` option. - -An example of downgrading to table versions at revision 2581ebaf0cb2:: - - watcher-db-manage --config-file=/etc/watcher/watcher.conf downgrade --revision 2581ebaf0cb2 - -revision --------- - -.. program:: revision - -.. option:: -h, --help - - Show help for revision and exit. - -.. option:: -m , --message - - The message to use with the revision file. - -.. option:: --autogenerate - - Compares table metadata in the application with the status of the database - and generates migrations based on this comparison. - -This command will create a new revision file. You can use the -:option:`--message` option to comment the revision. - -This is really only useful for watcher developers making changes that require -database changes. This revision file is used during database migration and -will specify the changes that need to be made to the database tables. Further -discussion is beyond the scope of this document. - -stamp ------ - -.. program:: stamp - -.. option:: -h, --help - - Show help for stamp and exit. - -.. option:: --revision - - The revision number. - -This command will 'stamp' the revision table with the version specified with -the :option:`--revision` option. It will not run any migrations. - -upgrade -------- - -.. program:: upgrade - -.. option:: -h, --help - - Show help for upgrade and exit. - -.. option:: --revision - - The revision number to upgrade to. - -This command will upgrade existing database tables to the most recent version, -or to the version specified with the :option:`--revision` option. - -If there are no existing tables, then new tables are created, beginning -with the oldest known version, and successively upgraded using all of the -database migration files, until they are at the specified version. Note -that this behavior is different from the :ref:`create_schema` command -that creates the tables based on the most recent version. - -An example of upgrading to the most recent table versions:: - - watcher-db-manage --config-file=/etc/watcher/watcher.conf upgrade - -.. note:: - - This command is the default if no command is given to - :command:`watcher-db-manage`. - -.. warning:: - - The upgrade command is not compatible with SQLite databases since it uses - ALTER TABLE commands to upgrade the database tables. SQLite supports only - a limited subset of ALTER TABLE. - -version -------- - -.. program:: version - -.. option:: -h, --help - - Show help for version and exit. - -This command will output the current database version. - -purge ------ - -.. program:: purge - -.. option:: -h, --help - - Show help for purge and exit. - -.. option:: -d, --age-in-days - - The number of days (starting from today) before which we consider soft - deleted objects as expired and should hence be erased. By default, all - objects soft deleted are considered expired. This can be useful as removing - a significant amount of objects may cause a performance issues. - -.. option:: -n, --max-number - - The maximum number of database objects we expect to be deleted. If exceeded, - this will prevent any deletion. - -.. option:: -t, --audit-template - - Either the UUID or name of the soft deleted audit template to purge. This - will also include any related objects with it. - -.. option:: -e, --exclude-orphans - - This is a flag to indicate when we want to exclude orphan objects from - deletion. - -.. option:: --dry-run - - This is a flag to indicate when we want to perform a dry run. This will show - the objects that would be deleted instead of actually deleting them. - -This command will purge the current database by removing both its soft deleted -and orphan objects. diff --git a/doc/source/man/watcher-decision-engine.rst b/doc/source/man/watcher-decision-engine.rst deleted file mode 100644 index 3e07c3b..0000000 --- a/doc/source/man/watcher-decision-engine.rst +++ /dev/null @@ -1,39 +0,0 @@ -======================= -watcher-decision-engine -======================= - ---------------------------------------- -Service for the Watcher Decision Engine ---------------------------------------- - -:Author: openstack@lists.launchpad.net -:Date: -:Copyright: OpenStack Foundation -:Version: -:Manual section: 1 -:Manual group: cloud computing - -SYNOPSIS -======== - -watcher-decision-engine [options] - -DESCRIPTION -=========== - -:ref:`Watcher Decision Engine ` - -OPTIONS -======= - - **General options** - - .. include:: general-options.rst - -FILES -===== - - **/etc/watcher/watcher.conf** - Default configuration file for Watcher Decision Engine - -.. include:: footer.rst diff --git a/doc/source/strategies/basic-server-consolidation.rst b/doc/source/strategies/basic-server-consolidation.rst deleted file mode 100644 index ef8bea1..0000000 --- a/doc/source/strategies/basic-server-consolidation.rst +++ /dev/null @@ -1,99 +0,0 @@ -================================== -Basic Offline Server Consolidation -================================== - -Synopsis --------- - -**display name**: ``basic`` - -**goal**: ``server_consolidation`` - - .. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation - -Requirements ------------- - -Metrics -******* - -The *basic* strategy requires the following metrics: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``compute.node.cpu.percent`` ceilometer_ none -``cpu_util`` ceilometer_ none -============================ ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - * - ``change_nova_service_state`` - - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameter is: - -====================== ====== ============= =================================== -parameter type default Value description -====================== ====== ============= =================================== -``migration_attempts`` Number 0 Maximum number of combinations to - be tried by the strategy while - searching for potential candidates. - To remove the limit, set it to 0 -``period`` Number 7200 The time interval in seconds - for getting statistic aggregation - from metric data source -====================== ====== ============= =================================== - -Efficacy Indicator ------------------- - -.. watcher-func:: - :format: literal_block - - watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 server_consolidation --strategy basic - - $ openstack optimize audit create -a at1 -p migration_attempts=4 - -External Links --------------- -None. diff --git a/doc/source/strategies/index.rst b/doc/source/strategies/index.rst deleted file mode 100644 index 9af2667..0000000 --- a/doc/source/strategies/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Strategies -========== - -.. toctree:: - :glob: - :maxdepth: 1 - - ./* diff --git a/doc/source/strategies/outlet_temp_control.rst b/doc/source/strategies/outlet_temp_control.rst deleted file mode 100644 index 5a4294c..0000000 --- a/doc/source/strategies/outlet_temp_control.rst +++ /dev/null @@ -1,104 +0,0 @@ -================================= -Outlet Temperature Based Strategy -================================= - -Synopsis --------- - -**display name**: ``outlet_temperature`` - -**goal**: ``thermal_optimization`` - -Outlet (Exhaust Air) temperature is a new thermal telemetry which can be -used to measure the host's thermal/workload status. This strategy makes -decisions to migrate workloads to the hosts with good thermal condition -(lowest outlet temperature) when the outlet temperature of source hosts -reach a configurable threshold. - -Requirements ------------- - -This strategy has a dependency on the host having Intel's Power -Node Manager 3.0 or later enabled. - - -Metrics -******* - -The *outlet_temperature* strategy requires the following metrics: - -========================================= ============ ======= ======= -metric service name plugins comment -========================================= ============ ======= ======= -``hardware.ipmi.node.outlet_temperature`` ceilometer_ IPMI -========================================= ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#ipmi-based-meters - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameter is: - -============== ====== ============= ==================================== -parameter type default Value description -============== ====== ============= ==================================== -``threshold`` Number 35.0 Temperature threshold for migration -``period`` Number 30 The time interval in seconds for - getting statistic aggregation from - metric data source -============== ====== ============= ==================================== - -Efficacy Indicator ------------------- - -None - -Algorithm ---------- - -For more information on the Outlet Temperature Based Strategy please refer to: -https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/outlet-temperature-based-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 thermal_optimization --strategy outlet_temperature - - $ openstack optimize audit create -a at1 -p threshold=31.0 - -External Links --------------- - -- `Intel Power Node Manager 3.0 `_ diff --git a/doc/source/strategies/strategy-template.rst b/doc/source/strategies/strategy-template.rst deleted file mode 100644 index 4c07edd..0000000 --- a/doc/source/strategies/strategy-template.rst +++ /dev/null @@ -1,115 +0,0 @@ -============= -Strategy name -============= - -Synopsis --------- - -**display name**: - -**goal**: - -Add here a complete description of your strategy - -Requirements ------------- - -Metrics -******* - -Write here the list of metrics required by your strategy algorithm (in the form - of a table). If these metrics requires specific Telemetry plugin or other - additional software, please explain here how to deploy them (and add link to - dedicated installation guide). - -Example: - -======================= ============ ======= ======= -metric service name plugins comment -======================= ============ ======= ======= -compute.node.* ceilometer_ none one point every 60s -vm.cpu.utilization_perc monasca_ none -power ceilometer_ kwapi_ one point every 60s -======================= ============ ======= ======= - - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute -.. _monasca: https://github.com/openstack/monasca-agent/blob/master/docs/Libvirt.md -.. _kwapi: https://kwapi.readthedocs.io/en/latest/index.html - - -Cluster data model -****************** - -Default Watcher's cluster data model. - -or - -If your strategy implementation requires a new cluster data model, please - describe it in this section, with a link to model plugin's installation guide. - -Actions -******* - -Default Watcher's actions. - -or - -If your strategy implementation requires new actions, add the list of Action - plugins here (in the form of a table) with a link to the plugin's installation - procedure. - -======== ================= -action description -======== ================= -action1_ This action1 ... -action2_ This action2 ... -======== ================= - -.. _action1 : https://github.com/myrepo/watcher/plugins/action1 -.. _action2 : https://github.com/myrepo/watcher/plugins/action2 - -Planner -******* - -Default Watcher's planner. - -or - -If your strategy requires also a new planner to schedule built actions in time, - please describe it in this section, with a link to planner plugin's - installation guide. - -Configuration -------------- - -If your strategy use configurable parameters, explain here how to tune them. - - -Efficacy Indicator ------------------- - -Add here the Efficacy indicator computed by your strategy. - -Algorithm ---------- - -Add here either the description of your algorithm or -link to the existing description. - -How to use it ? ---------------- - -.. code-block:: shell - - $ Write the command line to create an audit with your strategy. - -External Links --------------- - -If you have written papers, blog articles .... about your strategy into Watcher, - or if your strategy is based from external publication(s), please add HTTP - links and references in this section. - -- `link1 `_ -- `link2 `_ diff --git a/doc/source/strategies/uniform_airflow.rst b/doc/source/strategies/uniform_airflow.rst deleted file mode 100644 index d31f631..0000000 --- a/doc/source/strategies/uniform_airflow.rst +++ /dev/null @@ -1,107 +0,0 @@ -================================== -Uniform Airflow Migration Strategy -================================== - -Synopsis --------- - -**display name**: ``uniform_airflow`` - -**goal**: ``airflow_optimization`` - -.. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow - -Requirements ------------- - -This strategy has a dependency on the server having Intel's Power -Node Manager 3.0 or later enabled. - -Metrics -******* - -The *uniform_airflow* strategy requires the following metrics: - -================================== ============ ======= ======= -metric service name plugins comment -================================== ============ ======= ======= -``hardware.ipmi.node.airflow`` ceilometer_ IPMI -``hardware.ipmi.node.temperature`` ceilometer_ IPMI -``hardware.ipmi.node.power`` ceilometer_ IPMI -================================== ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#ipmi-based-meters - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameters are: - -====================== ====== ============= =========================== -parameter type default Value description -====================== ====== ============= =========================== -``threshold_airflow`` Number 400.0 Airflow threshold for - migration Unit is 0.1CFM -``threshold_inlet_t`` Number 28.0 Inlet temperature threshold - for migration decision -``threshold_power`` Number 350.0 System power threshold for - migration decision -``period`` Number 300 Aggregate time period of - ceilometer -====================== ====== ============= =========================== - -Efficacy Indicator ------------------- - -None - -Algorithm ---------- - -For more information on the Uniform Airflow Migration Strategy please refer to: -https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/uniform-airflow-migration-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 airflow_optimization --strategy uniform_airflow - - $ openstack optimize audit create -a at1 -p threshold_airflow=410 \ - -p threshold_inlet_t=29.0 -p threshold_power=355.0 -p period=310 - -External Links --------------- - -- `Intel Power Node Manager 3.0 `_ diff --git a/doc/source/strategies/vm_workload_consolidation.rst b/doc/source/strategies/vm_workload_consolidation.rst deleted file mode 100644 index 5d30f9d..0000000 --- a/doc/source/strategies/vm_workload_consolidation.rst +++ /dev/null @@ -1,114 +0,0 @@ -================================== -VM Workload Consolidation Strategy -================================== - -Synopsis --------- - -**display name**: ``vm_workload_consolidation`` - -**goal**: ``vm_consolidation`` - - .. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation - -Requirements ------------- - -Metrics -******* - -The *vm_workload_consolidation* strategy requires the following metrics: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``memory`` ceilometer_ none -``disk.root.size`` ceilometer_ none -============================ ============ ======= ======= - -The following metrics are not required but increase the accuracy of -the strategy if available: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``memory.usage`` ceilometer_ none -``cpu_util`` ceilometer_ none -============================ ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - * - ``change_nova_service_state`` - - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - - -Configuration -------------- - -Strategy parameter is: - -====================== ====== ============= =================================== -parameter type default Value description -====================== ====== ============= =================================== -``period`` Number 3600 The time interval in seconds - for getting statistic aggregation - from metric data source -====================== ====== ============= =================================== - - -Efficacy Indicator ------------------- - -.. watcher-func:: - :format: literal_block - - watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator - -Algorithm ---------- - -For more information on the VM Workload consolidation strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 server_consolidation --strategy vm_workload_consolidation - - $ openstack optimize audit create -a at1 - -External Links --------------- - -*Spec URL* -https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html diff --git a/doc/source/strategies/workload-stabilization.rst b/doc/source/strategies/workload-stabilization.rst deleted file mode 100644 index 567aa1d..0000000 --- a/doc/source/strategies/workload-stabilization.rst +++ /dev/null @@ -1,141 +0,0 @@ -============================================= -Watcher Overload standard deviation algorithm -============================================= - -Synopsis --------- - -**display name**: ``workload_stabilization`` - -**goal**: ``workload_balancing`` - - .. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization - -Requirements ------------- - -Metrics -******* - -The *workload_stabilization* strategy requires the following metrics: - -============================ ============ ======= ======= -metric service name plugins comment -============================ ============ ======= ======= -``compute.node.cpu.percent`` ceilometer_ none -``hardware.memory.used`` ceilometer_ SNMP_ -``cpu_util`` ceilometer_ none -``memory.resident`` ceilometer_ none -============================ ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute -.. _SNMP: http://docs.openstack.org/admin-guide/telemetry-measurements.html - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameters are: - -==================== ====== ===================== ============================= -parameter type default Value description -==================== ====== ===================== ============================= -``metrics`` array |metrics| Metrics used as rates of - cluster loads. -``thresholds`` object |thresholds| Dict where key is a metric - and value is a trigger value. - -``weights`` object |weights| These weights used to - calculate common standard - deviation. Name of weight - contains meter name and - _weight suffix. -``instance_metrics`` object |instance_metrics| Mapping to get hardware - statistics using instance - metrics. -``host_choice`` string retry Method of host's choice. - There are cycle, retry and - fullsearch methods. Cycle - will iterate hosts in cycle. - Retry will get some hosts - random (count defined in - retry_count option). - Fullsearch will return each - host from list. -``retry_count`` number 1 Count of random returned - hosts. -``periods`` object |periods| These periods are used to get - statistic aggregation for - instance and host metrics. - The period is simply a - repeating interval of time - into which the samples are - grouped for aggregation. - Watcher uses only the last - period of all recieved ones. -==================== ====== ===================== ============================= - -.. |metrics| replace:: ["cpu_util", "memory.resident"] -.. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2} -.. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0} -.. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"} -.. |periods| replace:: {"instance": 720, "node": 600} - -Efficacy Indicator ------------------- - -.. watcher-func:: - :format: literal_block - - watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator - -Algorithm ---------- - -You can find description of overload algorithm and role of standard deviation -here: https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/sd-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 workload_balancing --strategy workload_stabilization - - $ openstack optimize audit create -a at1 \ - -p thresholds='{"memory.resident": 0.05}' \ - -p metrics='["memory.resident"]' - -External Links --------------- - -- `Watcher Overload standard deviation algorithm spec `_ diff --git a/doc/source/strategies/workload_balance.rst b/doc/source/strategies/workload_balance.rst deleted file mode 100644 index ea09c6e..0000000 --- a/doc/source/strategies/workload_balance.rst +++ /dev/null @@ -1,98 +0,0 @@ -=================================== -Workload Balance Migration Strategy -=================================== - -Synopsis --------- - -**display name**: ``workload_balance`` - -**goal**: ``workload_balancing`` - -.. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance - -Requirements ------------- - -None. - -Metrics -******* - -The *workload_balance* strategy requires the following metrics: - -======================= ============ ======= ======= -metric service name plugins comment -======================= ============ ======= ======= -``cpu_util`` ceilometer_ none -======================= ============ ======= ======= - -.. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#openstack-compute - - -Cluster data model -****************** - -Default Watcher's Compute cluster data model: - - .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector - -Actions -******* - -Default Watcher's actions: - - .. list-table:: - :widths: 30 30 - :header-rows: 1 - - * - action - - description - * - ``migration`` - - .. watcher-term:: watcher.applier.actions.migration.Migrate - -Planner -******* - -Default Watcher's planner: - - .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner - -Configuration -------------- - -Strategy parameters are: - -============== ====== ============= ==================================== -parameter type default Value description -============== ====== ============= ==================================== -``threshold`` Number 25.0 Workload threshold for migration -``period`` Number 300 Aggregate time period of ceilometer -============== ====== ============= ==================================== - -Efficacy Indicator ------------------- - -None - -Algorithm ---------- - -For more information on the Workload Balance Migration Strategy please refer -to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/workload-balance-migration-strategy.html - -How to use it ? ---------------- - -.. code-block:: shell - - $ openstack optimize audittemplate create \ - at1 workload_balancing --strategy workload_balance - - $ openstack optimize audit create -a at1 -p threshold=26.0 \ - -p period=310 - -External Links --------------- - -None. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst deleted file mode 100644 index bb1db2c..0000000 --- a/doc/source/user/index.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. toctree:: - :maxdepth: 2 - - user-guide diff --git a/doc/source/user/user-guide.rst b/doc/source/user/user-guide.rst deleted file mode 100644 index e0f2b54..0000000 --- a/doc/source/user/user-guide.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _user-guide: - -================== -Watcher User Guide -================== - -See the -`architecture page `_ -for an architectural overview of the different components of Watcher and how -they fit together. - -In this guide we're going to take you through the fundamentals of using -Watcher. - -The following diagram shows the main interactions between the -:ref:`Administrator ` and the Watcher system: - -.. image:: ../images/sequence_overview_watcher_usage.png - :width: 100% - - -Getting started with Watcher ----------------------------- -This guide assumes you have a working installation of Watcher. If you get -"*watcher: command not found*" you may have to verify your installation. -Please refer to the `installation guide`_. -In order to use Watcher, you have to configure your credentials suitable for -watcher command-line tools. - -You can interact with Watcher either by using our dedicated `Watcher CLI`_ -named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``. - -If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon -plugin installation guide`_. - -.. _`installation guide`: http://docs.openstack.org/developer/python-watcherclient -.. _`Watcher Horizon plugin installation guide`: http://docs.openstack.org/developer/watcher-dashboard/deploy/installation.html -.. _`OpenStack CLI`: http://docs.openstack.org/developer/python-openstackclient/man/openstack.html -.. _`Watcher CLI`: http://docs.openstack.org/developer/python-watcherclient/index.html - -Seeing what the Watcher CLI can do ? ------------------------------------- -We can see all of the commands available with Watcher CLI by running the -watcher binary without options. - -.. code:: bash - - $ watcher help - -or:: - - $ openstack help optimize - -How do I run an audit of my cluster ? -------------------------------------- - -First, you need to find the :ref:`goal ` you want to achieve: - -.. code:: bash - - $ watcher goal list - -or:: - - $ openstack optimize goal list - -.. note:: - - If you get "*You must provide a username via either --os-username or via - env[OS_USERNAME]*" you may have to verify your credentials. - -Then, you can create an :ref:`audit template `. -An :ref:`audit template ` defines an optimization -:ref:`goal ` to achieve (i.e. the settings of your audit). - -.. code:: bash - - $ watcher audittemplate create my_first_audit_template - -or:: - - $ openstack optimize audittemplate create my_first_audit_template - -Although optional, you may want to actually set a specific strategy for your -audit template. If so, you may can search of its UUID or name using the -following command: - -.. code:: bash - - $ watcher strategy list --goal - -or:: - - $ openstack optimize strategy list --goal - -You can use the following command to check strategy details including which -parameters of which format it supports: - -.. code:: bash - - $ watcher strategy show - -or:: - - $ openstack optimize strategy show - -The command to create your audit template would then be: - -.. code:: bash - - $ watcher audittemplate create my_first_audit_template \ - --strategy - -or:: - - $ openstack optimize audittemplate create my_first_audit_template \ - --strategy - -Then, you can create an audit. An audit is a request for optimizing your -cluster depending on the specified :ref:`goal `. - -You can launch an audit on your cluster by referencing the -:ref:`audit template ` (i.e. the settings of your -audit) that you want to use. - -- Get the :ref:`audit template ` UUID or name: - -.. code:: bash - - $ watcher audittemplate list - -or:: - - $ openstack optimize audittemplate list - -- Start an audit based on this :ref:`audit template - ` settings: - -.. code:: bash - - $ watcher audit create -a - -or:: - - $ openstack optimize audit create -a - -If your_audit_template was created by --strategy , and it -defines some parameters (command `watcher strategy show` to check parameters -format), your can append `-p` to input required parameters: - -.. code:: bash - - $ watcher audit create -a \ - -p =5.5 -p =hi - -or:: - - $ openstack optimize audit create -a \ - -p =5.5 -p =hi - -Input parameter could cause audit creation failure, when: - -- no predefined strategy for audit template -- no parameters spec in predefined strategy -- input parameters don't comply with spec - -Watcher service will compute an :ref:`Action Plan ` -composed of a list of potential optimization :ref:`actions ` -(instance migration, disabling of a compute node, ...) according to the -:ref:`goal ` to achieve. You can see all of the goals -available in section ``[watcher_strategies]`` of the Watcher service -configuration file. - -- Wait until the Watcher audit has produced a new :ref:`action plan - `, and get it: - -.. code:: bash - - $ watcher actionplan list --audit - -or:: - - $ openstack optimize actionplan list --audit - -- Have a look on the list of optimization :ref:`actions ` - contained in this new :ref:`action plan `: - -.. code:: bash - - $ watcher action list --action-plan - -or:: - - $ openstack optimize action list --action-plan - -Once you have learned how to create an :ref:`Action Plan -`, it's time to go further by applying it to your -cluster: - -- Execute the :ref:`action plan `: - -.. code:: bash - - $ watcher actionplan start - -or:: - - $ openstack optimize actionplan start - -You can follow the states of the :ref:`actions ` by -periodically calling: - -.. code:: bash - - $ watcher action list - -or:: - - $ openstack optimize action list - -You can also obtain more detailed information about a specific action: - -.. code:: bash - - $ watcher action show - -or:: - - $ openstack optimize action show - diff --git a/etc/apache2/watcher b/etc/apache2/watcher deleted file mode 100644 index bdf5562..0000000 --- a/etc/apache2/watcher +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using -# Watcher API through mod_wsgi -Listen 9322 - - - WSGIDaemonProcess watcher-api user=stack group=stack processes=2 threads=2 display-name=%{GROUP} - WSGIScriptAlias / /opt/stack/watcher/watcher/api/app.wsgi - WSGIProcessGroup watcher-api - - ErrorLog /var/log/httpd/watcher_error.log - LogLevel info - CustomLog /var/log/httpd/watcher_access.log combined - - - WSGIProcessGroup watcher-api - WSGIApplicationGroup %{GLOBAL} - AllowOverride All - Require all granted - - - diff --git a/etc/watcher/README-watcher.conf.txt b/etc/watcher/README-watcher.conf.txt deleted file mode 100644 index 59373b8..0000000 --- a/etc/watcher/README-watcher.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample watcher.conf file, run the following -command from the top level of the watcher directory: - -tox -e genconfig diff --git a/etc/watcher/policy.json b/etc/watcher/policy.json deleted file mode 100644 index 5f94931..0000000 --- a/etc/watcher/policy.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "admin_api": "role:admin or role:administrator", - "show_password": "!", - "default": "rule:admin_api", - - "action:detail": "rule:default", - "action:get": "rule:default", - "action:get_all": "rule:default", - - "action_plan:delete": "rule:default", - "action_plan:detail": "rule:default", - "action_plan:get": "rule:default", - "action_plan:get_all": "rule:default", - "action_plan:update": "rule:default", - - "audit:create": "rule:default", - "audit:delete": "rule:default", - "audit:detail": "rule:default", - "audit:get": "rule:default", - "audit:get_all": "rule:default", - "audit:update": "rule:default", - - "audit_template:create": "rule:default", - "audit_template:delete": "rule:default", - "audit_template:detail": "rule:default", - "audit_template:get": "rule:default", - "audit_template:get_all": "rule:default", - "audit_template:update": "rule:default", - - "goal:detail": "rule:default", - "goal:get": "rule:default", - "goal:get_all": "rule:default", - - "scoring_engine:detail": "rule:default", - "scoring_engine:get": "rule:default", - "scoring_engine:get_all": "rule:default", - - "strategy:detail": "rule:default", - "strategy:get": "rule:default", - "strategy:get_all": "rule:default", - - "service:detail": "rule:default", - "service:get": "rule:default", - "service:get_all": "rule:default" -} diff --git a/etc/watcher/watcher-config-generator.conf b/etc/watcher/watcher-config-generator.conf deleted file mode 100644 index 6e51dea..0000000 --- a/etc/watcher/watcher-config-generator.conf +++ /dev/null @@ -1,16 +0,0 @@ -[DEFAULT] -output_file = etc/watcher/watcher.conf.sample -wrap_width = 79 - -namespace = watcher -namespace = keystonemiddleware.auth_token -namespace = oslo.cache -namespace = oslo.concurrency -namespace = oslo.db -namespace = oslo.log -namespace = oslo.messaging -namespace = oslo.policy -namespace = oslo.reports -namespace = oslo.service.periodic_task -namespace = oslo.service.service -namespace = oslo.service.wsgi diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index 37c09ff..0000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,42 +0,0 @@ -Rally job -========= - -We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service. - -To launch this task with configured Rally you just need to run: - -:: - - rally task start watcher/rally-jobs/watcher-watcher.yaml - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* watcher.yaml is a task that is run in gates against OpenStack - deployed by DevStack - - -Useful links ------------- - -* How to install: http://docs.openstack.org/developer/rally/install.html - -* How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/watcher-watcher.yaml b/rally-jobs/watcher-watcher.yaml deleted file mode 100644 index c01310b..0000000 --- a/rally-jobs/watcher-watcher.yaml +++ /dev/null @@ -1,63 +0,0 @@ ---- - Watcher.create_audit_and_delete: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "round_robin" - params: - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 - - Watcher.create_audit_template_and_delete: - - - args: - goal: - name: "dummy" - strategy: - name: "dummy" - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 - - Watcher.list_audit_templates: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml b/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml deleted file mode 100644 index cf4b562..0000000 --- a/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Adds feature to cancel an action-plan. diff --git a/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml b/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml deleted file mode 100644 index 1c73022..0000000 --- a/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add notifications related to Action plan object. diff --git a/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml b/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml deleted file mode 100644 index 0d2b749..0000000 --- a/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Added a standard way to both declare and fetch - configuration options so that whenever the - administrator generates the Watcher - configuration sample file, it contains the - configuration options of the plugins that are - currently available. diff --git a/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml b/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml deleted file mode 100644 index 0a3f172..0000000 --- a/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add action for compute node power on/off diff --git a/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml b/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml deleted file mode 100644 index 1255b71..0000000 --- a/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added a generic scoring engine module, which - will standardize interactions with scoring engines - through the common API. It is possible to use the - scoring engine by different Strategies, which - improve the code and data model re-use. diff --git a/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml b/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml deleted file mode 100644 index 9eb1cd2..0000000 --- a/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add notifications related to Audit object. diff --git a/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml b/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml deleted file mode 100644 index b96723c..0000000 --- a/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Watcher can continuously optimize the OpenStack cloud for a specific - strategy or goal by triggering an audit periodically which generates - an action plan and run it automatically. diff --git a/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml b/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml deleted file mode 100644 index b76db57..0000000 --- a/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Centralize all configuration options for Watcher. diff --git a/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml b/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml deleted file mode 100644 index b0295cf..0000000 --- a/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added cinder cluster data model diff --git a/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml b/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml deleted file mode 100644 index 5d9d014..0000000 --- a/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Added an in-memory cache of the cluster model - built up and kept fresh via notifications from - services of interest in addition to periodic - syncing logic. diff --git a/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml b/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml deleted file mode 100644 index 53e101d..0000000 --- a/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a way to add a new action without having to - amend the source code of the default planner. diff --git a/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml b/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml deleted file mode 100644 index 898887b..0000000 --- a/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a way to create periodic audit to be able to - optimize continuously the cloud infrastructure. diff --git a/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml b/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml deleted file mode 100644 index 63d5ad6..0000000 --- a/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Watcher database can now be upgraded thanks to Alembic. diff --git a/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml b/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml deleted file mode 100644 index 5cea918..0000000 --- a/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Provides a generic way to define the scope of an audit. The set of audited - resources will be called "Audit scope" and will be defined in each audit - template (which contains the audit settings). diff --git a/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml b/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml deleted file mode 100644 index acaddcc..0000000 --- a/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added a way to compare the efficacy of different - strategies for a give optimization goal. diff --git a/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml b/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml deleted file mode 100644 index 9732b15..0000000 --- a/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added a way to return the of available goals depending - on which strategies have been deployed on the node - where the decision engine is running. diff --git a/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml b/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml deleted file mode 100644 index d57a2dd..0000000 --- a/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - The graph model describes how VMs are associated to compute hosts. - This allows for seeing relationships upfront between the entities and hence - can be used to identify hot/cold spots in the data center and influence - a strategy decision. diff --git a/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml b/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml deleted file mode 100644 index a98dff8..0000000 --- a/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Watcher supports multiple metrics backend and relies on Ceilometer and - Monasca. diff --git a/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml b/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml deleted file mode 100644 index 8c4fa6c..0000000 --- a/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Allow decision engine to pass strategy parameters, - like optimization threshold, to selected strategy, - also strategy to provide parameters info to end user. diff --git a/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml b/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml deleted file mode 100644 index 0d3eb61..0000000 --- a/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Copy all audit templates parameters into - audit instead of having a reference to the - audit template. - diff --git a/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml b/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml deleted file mode 100644 index 0d3407f..0000000 --- a/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Watcher can now run specific actions in parallel improving the performances - dramatically when executing an action plan. diff --git a/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml b/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml deleted file mode 100644 index 9ef9a4c..0000000 --- a/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Check the creation time of the action plan, - and set its state to SUPERSEDED if it has expired. diff --git a/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml b/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml deleted file mode 100644 index 0bdd480..0000000 --- a/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added a strategy that monitors if there is a higher - load on some hosts compared to other hosts in the - cluster and re-balances the work across hosts to - minimize the standard deviation of the loads in - the cluster. diff --git a/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml b/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml deleted file mode 100644 index cf66a44..0000000 --- a/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - | - Added SUSPENDED audit state diff --git a/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml b/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml deleted file mode 100644 index 7cbc421..0000000 --- a/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - Added a new strategy based on the airflow - of servers. This strategy makes decisions - to migrate VMs to make the airflow uniform. diff --git a/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml b/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml deleted file mode 100644 index d469c00..0000000 --- a/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -features: - - Provide a notification mechanism into Watcher that supports versioning. - Whenever a Watcher object is created, updated or deleted, a versioned - notification will, if it's relevant, be automatically sent to notify in order - to allow an event-driven style of architecture within Watcher. Moreover, it - will also give other services and/or 3rd party softwares (e.g. monitoring - solutions or rules engines) the ability to react to such events. diff --git a/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml b/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml deleted file mode 100644 index 1c5f813..0000000 --- a/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Added policies to handle user rights - to access Watcher API. diff --git a/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml b/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml deleted file mode 100644 index 9710b97..0000000 --- a/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Add a service supervisor to watch Watcher deamons. diff --git a/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml b/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml deleted file mode 100644 index c2a6df8..0000000 --- a/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - all Watcher objects have been refactored to support OVO - (oslo.versionedobjects) which was a prerequisite step in order to implement - versioned notifications. diff --git a/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml b/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml deleted file mode 100644 index e607426..0000000 --- a/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Added a strategy based on the VM workloads of - hypervisors. This strategy makes decisions to - migrate workloads to make the total VM workloads - of each hypervisor balanced, when the total VM - workloads of hypervisor reaches threshold. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29..0000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 8b36a78..0000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,258 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# watcher documentation build configuration file, created by -# sphinx-quickstart on Fri Jun 3 11:37:52 2016. -# -# This file is execfile()d with the current directory set to its containing dir -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os -from watcher import version as watcher_version - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['reno.sphinxext', - 'openstackdocstheme'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'watcher' -copyright = u'2016, Watcher developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = watcher_version.version_info.release_string() -# The full version, including alpha/beta/rc tags. -release = watcher_version.version_info.version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'watcherdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]) -latex_documents = [ - ('index', 'watcher.tex', u'Watcher Documentation', - u'Watcher developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output ------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'watcher', u'Watcher Documentation', - [u'Watcher developers'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ----------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'watcher', u'Watcher Documentation', - u'Watcher developers', 'watcher', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 2478fff..0000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================================================= -Welcome to watcher's Release Notes documentation! -================================================= - -Contents: - -.. toctree:: - :maxdepth: 1 - - unreleased - ocata - newton - diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po deleted file mode 100644 index d38f86d..0000000 --- a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po +++ /dev/null @@ -1,33 +0,0 @@ -# Gérald LONLAS , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: watcher 1.0.1.dev51\n" -"Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-03-21 11:57+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-10-22 06:44+0000\n" -"Last-Translator: Gérald LONLAS \n" -"Language-Team: French\n" -"Language: fr\n" -"X-Generator: Zanata 3.9.6\n" -"Plural-Forms: nplurals=2; plural=(n > 1)\n" - -msgid "0.29.0" -msgstr "0.29.0" - -msgid "Contents:" -msgstr "Contenu :" - -msgid "Current Series Release Notes" -msgstr "Note de la release actuelle" - -msgid "New Features" -msgstr "Nouvelles fonctionnalités" - -msgid "Newton Series Release Notes" -msgstr "Note de release pour Newton" - -msgid "Welcome to watcher's Release Notes documentation!" -msgstr "Bienvenue dans la documentation de la note de Release de Watcher" diff --git a/releasenotes/source/newton.rst b/releasenotes/source/newton.rst deleted file mode 100644 index 97036ed..0000000 --- a/releasenotes/source/newton.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Newton Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/newton diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst deleted file mode 100644 index ebe62f4..0000000 --- a/releasenotes/source/ocata.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Ocata Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aab..0000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 7a18c2f..0000000 --- a/requirements.txt +++ /dev/null @@ -1,49 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -apscheduler # MIT License -enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD -jsonpatch>=1.1 # BSD -keystoneauth1>=3.0.1 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -keystonemiddleware>=4.12.0 # Apache-2.0 -lxml!=3.7.0,>=2.3 # BSD -croniter>=0.3.4 # MIT License -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.cache>=1.5.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.policy>=1.23.0 # Apache-2.0 -oslo.reports>=0.6.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 -oslo.versionedobjects>=1.17.0 # Apache-2.0 -PasteDeploy>=1.5.0 # MIT -pbr!=2.1.0,>=2.0.0 # Apache-2.0 -pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD -PrettyTable<0.8,>=0.7.1 # BSD -voluptuous>=0.8.9 # BSD License -gnocchiclient>=2.7.0 # Apache-2.0 -python-ceilometerclient>=2.5.0 # Apache-2.0 -python-cinderclient>=3.0.0 # Apache-2.0 -python-glanceclient>=2.7.0 # Apache-2.0 -python-keystoneclient>=3.8.0 # Apache-2.0 -python-monascaclient>=1.1.0 # Apache-2.0 -python-neutronclient>=6.3.0 # Apache-2.0 -python-novaclient>=9.0.0 # Apache-2.0 -python-openstackclient!=3.10.0,>=3.3.0 # Apache-2.0 -python-ironicclient>=1.14.0 # Apache-2.0 -six>=1.9.0 # MIT -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT -stevedore>=1.20.0 # Apache-2.0 -taskflow>=2.7.0 # Apache-2.0 -WebOb>=1.7.1 # MIT -WSME>=0.8 # MIT -networkx>=1.10 # BSD - diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index bbd993f..0000000 --- a/setup.cfg +++ /dev/null @@ -1,129 +0,0 @@ -[metadata] -name = python-watcher -summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = https://docs.openstack.org/watcher/latest/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.5 - -[files] -packages = - watcher - watcher_tempest_plugin -data_files = - etc/ = etc/* - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[entry_points] -oslo.config.opts = - watcher = watcher.conf.opts:list_opts - -console_scripts = - watcher-api = watcher.cmd.api:main - watcher-db-manage = watcher.cmd.dbmanage:main - watcher-decision-engine = watcher.cmd.decisionengine:main - watcher-applier = watcher.cmd.applier:main - watcher-sync = watcher.cmd.sync:main - -tempest.test_plugins = - watcher_tests = watcher_tempest_plugin.plugin:WatcherTempestPlugin - -watcher.database.migration_backend = - sqlalchemy = watcher.db.sqlalchemy.migration - -watcher_goals = - unclassified = watcher.decision_engine.goal.goals:Unclassified - dummy = watcher.decision_engine.goal.goals:Dummy - server_consolidation = watcher.decision_engine.goal.goals:ServerConsolidation - thermal_optimization = watcher.decision_engine.goal.goals:ThermalOptimization - workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing - airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization - noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization - -watcher_scoring_engines = - dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer - -watcher_scoring_engine_containers = - dummy_scoring_container = watcher.decision_engine.scoring.dummy_scoring_container:DummyScoringContainer - -watcher_strategies = - dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy - dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer - dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize - basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation - outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl - vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation - workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization - workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance - uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow - noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor - -watcher_actions = - migrate = watcher.applier.actions.migration:Migrate - nop = watcher.applier.actions.nop:Nop - sleep = watcher.applier.actions.sleep:Sleep - change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState - resize = watcher.applier.actions.resize:Resize - change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState - -watcher_workflow_engines = - taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine - -watcher_planners = - weight = watcher.decision_engine.planner.weight:WeightPlanner - workload_stabilization = watcher.decision_engine.planner.workload_stabilization:WorkloadStabilizationPlanner - -watcher_cluster_data_model_collectors = - compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector - storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector - - -[pbr] -warnerrors = true -autodoc_index_modules = true -autodoc_exclude_modules = - watcher.db.sqlalchemy.alembic.env - watcher.db.sqlalchemy.alembic.versions.* - watcher.tests.* - watcher_tempest_plugin.* - watcher.doc - - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -fresh_env = 1 -all_files = 1 - -[upload_sphinx] -upload-dir = doc/build/html - - -[compile_catalog] -directory = watcher/locale -domain = watcher - -[update_catalog] -domain = watcher -output_dir = watcher/locale -input_file = watcher/locale/watcher.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext _LI _LW _LE _LC -mapping_file = babel.cfg -output_file = watcher/locale/watcher.pot diff --git a/setup.py b/setup.py deleted file mode 100644 index 566d844..0000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=2.0.0'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 84a7d89..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. - -coverage!=4.4,>=4.0 # Apache-2.0 -doc8 # Apache-2.0 -freezegun>=0.3.6 # Apache-2.0 -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 -mock>=2.0 # BSD -oslotest>=1.10.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -python-subunit>=0.0.18 # Apache-2.0/BSD -testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT - -# Doc requirements -openstackdocstheme>=1.11.0 # Apache-2.0 -sphinx>=1.6.2 # BSD -sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 - - -# releasenotes -reno!=2.3.1,>=1.8.0 # Apache-2.0 - -# bandit -bandit>=1.1.0 # Apache-2.0 diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 89148b2..0000000 --- a/tox.ini +++ /dev/null @@ -1,73 +0,0 @@ -[tox] -minversion = 1.8 -envlist = py35,py27,pep8 -skipsdist = True - -[testenv] -usedevelop = True -whitelist_externals = find - rm -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} -setenv = - VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/test-requirements.txt -commands = - rm -f .testrepository/times.dbm - find . -type f -name "*.py[c|o]" -delete - ostestr --concurrency=6 {posargs} -passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY - -[testenv:pep8] -commands = - doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst - flake8 - bandit -r watcher -x tests -n5 -ll -s B320 - -[testenv:venv] -setenv = PYTHONHASHSEED=0 -commands = {posargs} - -[testenv:cover] -commands = - python setup.py testr --coverage --testr-args='{posargs}' - coverage report - -[testenv:docs] -setenv = PYTHONHASHSEED=0 -commands = - doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst - python setup.py build_sphinx - -[testenv:debug] -commands = oslo_debug_helper -t watcher/tests {posargs} - -[testenv:genconfig] -sitepackages = False -commands = - oslo-config-generator --config-file etc/watcher/watcher-config-generator.conf - -[flake8] -show-source=True -ignore= H105,E123,E226,N320,H202 -builtins= _ -enable-extensions = H106,H203 -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes - -[testenv:wheel] -commands = python setup.py bdist_wheel - -[hacking] -import_exceptions = watcher._i18n -local-check-factory = watcher.hacking.checks.factory - -[doc8] -extension=.rst -# todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed -ignore-path=doc/source/image_src,doc/source/man,doc/source/api - -[testenv:releasenotes] -commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:bandit] -deps = -r{toxinidir}/test-requirements.txt -commands = bandit -r watcher -x tests -n5 -ll -s B320 diff --git a/watcher/__init__.py b/watcher/__init__.py deleted file mode 100644 index 403d61b..0000000 --- a/watcher/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - - -__version__ = pbr.version.VersionInfo('python-watcher').version_string() diff --git a/watcher/_i18n.py b/watcher/_i18n.py deleted file mode 100644 index b410850..0000000 --- a/watcher/_i18n.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import oslo_i18n -from oslo_i18n import _lazy - -# The domain is the name of the App which is used to generate the folder -# containing the translation files (i.e. the .pot file and the various locales) -DOMAIN = "watcher" - -_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) - -# The primary translation function using the well-known name "_" -_ = _translators.primary - -# The contextual translation function using the name "_C" -_C = _translators.contextual_form - -# The plural translation function using the name "_P" -_P = _translators.plural_form - - -def lazy_translation_enabled(): - return _lazy.USE_LAZY - - -def get_available_languages(): - return oslo_i18n.get_available_languages(DOMAIN) diff --git a/watcher/api/__init__.py b/watcher/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/api/acl.py b/watcher/api/acl.py deleted file mode 100644 index 75b8019..0000000 --- a/watcher/api/acl.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Access Control Lists (ACL's) control access the API server.""" - -from watcher.api.middleware import auth_token -from watcher import conf - -CONF = conf.CONF - - -def install(app, conf, public_routes): - """Install ACL check on application. - - :param app: A WSGI application. - :param conf: Settings. Dict'ified and passed to keystonemiddleware - :param public_routes: The list of the routes which will be allowed to - access without authentication. - :return: The same WSGI application with ACL installed. - - """ - if not CONF.get('enable_authentication'): - return app - return auth_token.AuthTokenMiddleware(app, - conf=dict(conf), - public_api_routes=public_routes) diff --git a/watcher/api/app.py b/watcher/api/app.py deleted file mode 100644 index 7926eda..0000000 --- a/watcher/api/app.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- encoding: utf-8 -*- - -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# All Rights Reserved. -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pecan - -from watcher.api import acl -from watcher.api import config as api_config -from watcher.api import middleware -from watcher import conf - -CONF = conf.CONF - - -def get_pecan_config(): - # Set up the pecan configuration - return pecan.configuration.conf_from_dict(api_config.PECAN_CONFIG) - - -def setup_app(config=None): - if not config: - config = get_pecan_config() - - app_conf = dict(config.app) - - app = pecan.make_app( - app_conf.pop('root'), - logging=getattr(config, 'logging', {}), - debug=CONF.debug, - wrap_app=middleware.ParsableErrorMiddleware, - **app_conf - ) - - return acl.install(app, CONF, config.app.acl_public_routes) - - -class VersionSelectorApplication(object): - def __init__(self): - pc = get_pecan_config() - self.v1 = setup_app(config=pc) - - def __call__(self, environ, start_response): - return self.v1(environ, start_response) diff --git a/watcher/api/app.wsgi b/watcher/api/app.wsgi deleted file mode 100644 index c2b0609..0000000 --- a/watcher/api/app.wsgi +++ /dev/null @@ -1,40 +0,0 @@ -# -*- mode: python -*- -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Use this file for deploying the API service under Apache2 mod_wsgi. -""" - -import sys - -from oslo_config import cfg -import oslo_i18n as i18n -from oslo_log import log - -from watcher.api import app -from watcher.common import service - - -CONF = cfg.CONF - -i18n.install('watcher') - -service.prepare_service(sys.argv) - -LOG = log.getLogger(__name__) -LOG.debug("Configuration:") -CONF.log_opt_values(LOG, log.DEBUG) - -application = app.VersionSelectorApplication() - diff --git a/watcher/api/config.py b/watcher/api/config.py deleted file mode 100644 index 3952459..0000000 --- a/watcher/api/config.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_config import cfg -from watcher.api import hooks - -# Server Specific Configurations -# See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa -server = { - 'port': '9322', - 'host': '127.0.0.1' -} - -# Pecan Application Configurations -# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa -app = { - 'root': 'watcher.api.controllers.root.RootController', - 'modules': ['watcher.api'], - 'hooks': [ - hooks.ContextHook(), - hooks.NoExceptionTracebackHook(), - ], - 'static_root': '%(confdir)s/public', - 'enable_acl': True, - 'acl_public_routes': [ - '/', - ], -} - -# WSME Configurations -# See https://wsme.readthedocs.org/en/latest/integrate.html#configuration -wsme = { - 'debug': cfg.CONF.get("debug") if "debug" in cfg.CONF else False, -} - -PECAN_CONFIG = { - "server": server, - "app": app, - "wsme": wsme, -} diff --git a/watcher/api/controllers/__init__.py b/watcher/api/controllers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/api/controllers/base.py b/watcher/api/controllers/base.py deleted file mode 100644 index 54b5c3f..0000000 --- a/watcher/api/controllers/base.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import wsme -from wsme import types as wtypes - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - deleted_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is deleted""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) diff --git a/watcher/api/controllers/link.py b/watcher/api/controllers/link.py deleted file mode 100644 index 6c89fe1..0000000 --- a/watcher/api/controllers/link.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pecan -from wsme import types as wtypes - -from watcher.api.controllers import base - - -def build_url(resource, resource_args, bookmark=False, base_url=None): - if base_url is None: - base_url = pecan.request.host_url - - template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' - # FIXME(lucasagomes): I'm getting a 404 when doing a GET on - # a nested resource that the URL ends with a '/'. - # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs - template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' - return template % {'url': base_url, 'res': resource, 'args': resource_args} - - -class Link(base.APIBase): - """A link representation.""" - - href = wtypes.text - """The url of a link.""" - - rel = wtypes.text - """The name of a link.""" - - type = wtypes.text - """Indicates the type of document/link.""" - - @staticmethod - def make_link(rel_name, url, resource, resource_args, - bookmark=False, type=wtypes.Unset): - href = build_url(resource, resource_args, - bookmark=bookmark, base_url=url) - return Link(href=href, rel=rel_name, type=type) - - @classmethod - def sample(cls): - sample = cls(href="http://localhost:6385/chassis/" - "eaaca217-e7d8-47b4-bb41-3f99f20eed89", - rel="bookmark") - return sample diff --git a/watcher/api/controllers/root.py b/watcher/api/controllers/root.py deleted file mode 100644 index e42734c..0000000 --- a/watcher/api/controllers/root.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers import v1 - - -class Version(base.APIBase): - """An API version representation.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - links = [link.Link] - """A Link that point to a specific version of the API""" - - @staticmethod - def convert(id): - version = Version() - version.id = id - version.links = [link.Link.make_link('self', pecan.request.host_url, - id, '', bookmark=True)] - return version - - -class Root(base.APIBase): - - name = wtypes.text - """The name of the API""" - - description = wtypes.text - """Some information about this API""" - - versions = [Version] - """Links to all the versions available in this API""" - - default_version = Version - """A link to the default version of the API""" - - @staticmethod - def convert(): - root = Root() - root.name = "OpenStack Watcher API" - root.description = ("Watcher is an OpenStack project which aims to " - "improve physical resources usage through " - "better VM placement.") - root.versions = [Version.convert('v1')] - root.default_version = Version.convert('v1') - return root - - -class RootController(rest.RestController): - - _versions = ['v1'] - """All supported API versions""" - - _default_version = 'v1' - """The default API version""" - - v1 = v1.Controller() - - @wsme_pecan.wsexpose(Root) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return Root.convert() - - @pecan.expose() - def _route(self, args): - """Overrides the default routing behavior. - - It redirects the request to the default version of the watcher API - if the version number is not specified in the url. - """ - - if args[0] and args[0] not in self._versions: - args = [self._default_version] + args - return super(RootController, self)._route(args) diff --git a/watcher/api/controllers/v1/__init__.py b/watcher/api/controllers/v1/__init__.py deleted file mode 100644 index 1627955..0000000 --- a/watcher/api/controllers/v1/__init__.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -Version 1 of the Watcher API - -NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import link -from watcher.api.controllers.v1 import action -from watcher.api.controllers.v1 import action_plan -from watcher.api.controllers.v1 import audit -from watcher.api.controllers.v1 import audit_template -from watcher.api.controllers.v1 import goal -from watcher.api.controllers.v1 import scoring_engine -from watcher.api.controllers.v1 import service -from watcher.api.controllers.v1 import strategy - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - deleted_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is deleted""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) - - -class MediaType(APIBase): - """A media type representation.""" - - base = wtypes.text - type = wtypes.text - - def __init__(self, base, type): - self.base = base - self.type = type - - -class V1(APIBase): - """The representation of the version 1 of the API.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - media_types = [MediaType] - """An array of supcontainersed media types for this version""" - - audit_templates = [link.Link] - """Links to the audit templates resource""" - - audits = [link.Link] - """Links to the audits resource""" - - actions = [link.Link] - """Links to the actions resource""" - - action_plans = [link.Link] - """Links to the action plans resource""" - - scoring_engines = [link.Link] - """Links to the Scoring Engines resource""" - - services = [link.Link] - """Links to the services resource""" - - links = [link.Link] - """Links that point to a specific URL for this version and documentation""" - - @staticmethod - def convert(): - v1 = V1() - v1.id = "v1" - v1.links = [link.Link.make_link('self', pecan.request.host_url, - 'v1', '', bookmark=True), - link.Link.make_link('describedby', - 'http://docs.openstack.org', - 'developer/watcher/dev', - 'api-spec-v1.html', - bookmark=True, type='text/html') - ] - v1.media_types = [MediaType('application/json', - 'application/vnd.openstack.watcher.v1+json')] - v1.audit_templates = [link.Link.make_link('self', - pecan.request.host_url, - 'audit_templates', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'audit_templates', '', - bookmark=True) - ] - v1.audits = [link.Link.make_link('self', pecan.request.host_url, - 'audits', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'audits', '', - bookmark=True) - ] - v1.actions = [link.Link.make_link('self', pecan.request.host_url, - 'actions', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'actions', '', - bookmark=True) - ] - v1.action_plans = [link.Link.make_link( - 'self', pecan.request.host_url, 'action_plans', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'action_plans', '', - bookmark=True) - ] - - v1.scoring_engines = [link.Link.make_link( - 'self', pecan.request.host_url, 'scoring_engines', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'scoring_engines', '', - bookmark=True) - ] - - v1.services = [link.Link.make_link( - 'self', pecan.request.host_url, 'services', ''), - link.Link.make_link('bookmark', - pecan.request.host_url, - 'services', '', - bookmark=True) - ] - return v1 - - -class Controller(rest.RestController): - """Version 1 API controller root.""" - - audits = audit.AuditsController() - audit_templates = audit_template.AuditTemplatesController() - actions = action.ActionsController() - action_plans = action_plan.ActionPlansController() - goals = goal.GoalsController() - scoring_engines = scoring_engine.ScoringEngineController() - services = service.ServicesController() - strategies = strategy.StrategiesController() - - @wsme_pecan.wsexpose(V1) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return V1.convert() - - -__all__ = ("Controller", ) diff --git a/watcher/api/controllers/v1/action.py b/watcher/api/controllers/v1/action.py deleted file mode 100644 index 3e96fdb..0000000 --- a/watcher/api/controllers/v1/action.py +++ /dev/null @@ -1,403 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Action ` is what enables Watcher to transform the -current state of a :ref:`Cluster ` after an -:ref:`Audit `. - -An :ref:`Action ` is an atomic task which changes the -current state of a target :ref:`Managed resource ` -of the OpenStack :ref:`Cluster ` such as: - -- Live migration of an instance from one compute node to another compute - node with Nova -- Changing the power level of a compute node (ACPI level, ...) -- Changing the current state of a compute node (enable or disable) with Nova - -In most cases, an :ref:`Action ` triggers some concrete -commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). - -An :ref:`Action ` has a life-cycle and its current state may -be one of the following: - -- **PENDING** : the :ref:`Action ` has not been executed - yet by the :ref:`Watcher Applier ` -- **ONGOING** : the :ref:`Action ` is currently being - processed by the :ref:`Watcher Applier ` -- **SUCCEEDED** : the :ref:`Action ` has been executed - successfully -- **FAILED** : an error occurred while trying to execute the - :ref:`Action ` -- **DELETED** : the :ref:`Action ` is still stored in the - :ref:`Watcher database ` but is not returned - any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Action ` was in **PENDING** or - **ONGOING** state and was cancelled by the - :ref:`Administrator ` - -:ref:`Some default implementations are provided `, but it is -possible to :ref:`develop new implementations ` which -are dynamically loaded by Watcher at launch time. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -class ActionPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return [] - - -class Action(base.APIBase): - """API representation of a action. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a action. - """ - _action_plan_uuid = None - - def _get_action_plan_uuid(self): - return self._action_plan_uuid - - def _set_action_plan_uuid(self, value): - if value == wtypes.Unset: - self._action_plan_uuid = wtypes.Unset - elif value and self._action_plan_uuid != value: - try: - action_plan = objects.ActionPlan.get( - pecan.request.context, value) - self._action_plan_uuid = action_plan.uuid - self.action_plan_id = action_plan.id - except exception.ActionPlanNotFound: - self._action_plan_uuid = None - - uuid = wtypes.wsattr(types.uuid, readonly=True) - """Unique UUID for this action""" - - action_plan_uuid = wsme.wsproperty(types.uuid, _get_action_plan_uuid, - _set_action_plan_uuid, - mandatory=True) - """The action plan this action belongs to """ - - state = wtypes.text - """This audit state""" - - action_type = wtypes.text - """Action type""" - - input_parameters = types.jsontype - """One or more key/value pairs """ - - parents = wtypes.wsattr(types.jsontype, readonly=True) - """UUIDs of parent actions""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated action links""" - - def __init__(self, **kwargs): - super(Action, self).__init__() - - self.fields = [] - fields = list(objects.Action.fields) - fields.append('action_plan_uuid') - for field in fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - self.fields.append('action_plan_id') - setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id', - wtypes.Unset)) - - @staticmethod - def _convert_with_links(action, url, expand=True): - if not expand: - action.unset_fields_except(['uuid', 'state', 'action_plan_uuid', - 'action_plan_id', 'action_type', - 'parents']) - - action.links = [link.Link.make_link('self', url, - 'actions', action.uuid), - link.Link.make_link('bookmark', url, - 'actions', action.uuid, - bookmark=True) - ] - return action - - @classmethod - def convert_with_links(cls, action, expand=True): - action = Action(**action.as_dict()) - return cls._convert_with_links(action, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - description='action description', - state='PENDING', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow(), - parents=[]) - sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ActionCollection(collection.Collection): - """API representation of a collection of actions.""" - - actions = [Action] - """A list containing actions objects""" - - def __init__(self, **kwargs): - self._type = 'actions' - - @staticmethod - def convert_with_links(actions, limit, url=None, expand=False, - **kwargs): - - collection = ActionCollection() - collection.actions = [Action.convert_with_links(p, expand) - for p in actions] - - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.actions = [Action.sample(expand=False)] - return sample - - -class ActionsController(rest.RestController): - """REST controller for Actions.""" - def __init__(self): - super(ActionsController, self).__init__() - - from_actions = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Actions.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_actions_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None, - action_plan_uuid=None, audit_uuid=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Action.get_by_uuid(pecan.request.context, - marker) - - filters = {} - if action_plan_uuid: - filters['action_plan_uuid'] = action_plan_uuid - - if audit_uuid: - filters['audit_uuid'] = audit_uuid - - sort_db_key = sort_key - - actions = objects.Action.list(pecan.request.context, - limit, - marker_obj, sort_key=sort_db_key, - sort_dir=sort_dir, - filters=filters) - - return ActionCollection.convert_with_links(actions, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, - wtypes.text, wtypes.text, types.uuid, - types.uuid) - def get_all(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', action_plan_uuid=None, - audit_uuid=None): - """Retrieve a list of actions. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param action_plan_uuid: Optional UUID of an action plan, - to get only actions for that action plan. - :param audit_uuid: Optional UUID of an audit, - to get only actions for that audit. - """ - context = pecan.request.context - policy.enforce(context, 'action:get_all', - action='action:get_all') - - if action_plan_uuid and audit_uuid: - raise exception.ActionFilterCombinationProhibited - - return self._get_actions_collection( - marker, limit, sort_key, sort_dir, - action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) - - @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, - wtypes.text, wtypes.text, types.uuid, - types.uuid) - def detail(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', action_plan_uuid=None, - audit_uuid=None): - """Retrieve a list of actions with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param action_plan_uuid: Optional UUID of an action plan, - to get only actions for that action plan. - :param audit_uuid: Optional UUID of an audit, - to get only actions for that audit. - """ - context = pecan.request.context - policy.enforce(context, 'action:detail', - action='action:detail') - - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "actions": - raise exception.HTTPNotFound - - if action_plan_uuid and audit_uuid: - raise exception.ActionFilterCombinationProhibited - - expand = True - resource_url = '/'.join(['actions', 'detail']) - return self._get_actions_collection( - marker, limit, sort_key, sort_dir, expand, resource_url, - action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) - - @wsme_pecan.wsexpose(Action, types.uuid) - def get_one(self, action_uuid): - """Retrieve information about the given action. - - :param action_uuid: UUID of a action. - """ - if self.from_actions: - raise exception.OperationNotPermitted - - context = pecan.request.context - action = api_utils.get_resource('Action', action_uuid) - policy.enforce(context, 'action:get', action, action='action:get') - - return Action.convert_with_links(action) - - @wsme_pecan.wsexpose(Action, body=Action, status_code=201) - def post(self, action): - """Create a new action. - - :param action: a action within the request body. - """ - # FIXME: blueprint edit-action-plan-flow - raise exception.OperationNotPermitted( - _("Cannot create an action directly")) - - if self.from_actions: - raise exception.OperationNotPermitted - - action_dict = action.as_dict() - context = pecan.request.context - new_action = objects.Action(context, **action_dict) - new_action.create() - - # Set the HTTP Location Header - pecan.response.location = link.build_url('actions', new_action.uuid) - return Action.convert_with_links(new_action) - - @wsme.validate(types.uuid, [ActionPatchType]) - @wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType]) - def patch(self, action_uuid, patch): - """Update an existing action. - - :param action_uuid: UUID of a action. - :param patch: a json PATCH document to apply to this action. - """ - # FIXME: blueprint edit-action-plan-flow - raise exception.OperationNotPermitted( - _("Cannot modify an action directly")) - - if self.from_actions: - raise exception.OperationNotPermitted - - action_to_update = objects.Action.get_by_uuid(pecan.request.context, - action_uuid) - try: - action_dict = action_to_update.as_dict() - action = Action(**api_utils.apply_jsonpatch(action_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Action.fields: - try: - patch_val = getattr(action, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if action_to_update[field] != patch_val: - action_to_update[field] = patch_val - - action_to_update.save() - return Action.convert_with_links(action_to_update) - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, action_uuid): - """Delete a action. - - :param action_uuid: UUID of a action. - """ - # FIXME: blueprint edit-action-plan-flow - raise exception.OperationNotPermitted( - _("Cannot delete an action directly")) - - action_to_delete = objects.Action.get_by_uuid( - pecan.request.context, - action_uuid) - action_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/action_plan.py b/watcher/api/controllers/v1/action_plan.py deleted file mode 100644 index 79dd5d8..0000000 --- a/watcher/api/controllers/v1/action_plan.py +++ /dev/null @@ -1,558 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Action Plan ` specifies a flow of -:ref:`Actions ` that should be executed in order to satisfy -a given :ref:`Goal `. It also contains an estimated -:ref:`global efficacy ` alongside a set of -:ref:`efficacy indicators `. - -An :ref:`Action Plan ` is generated by Watcher when an -:ref:`Audit ` is successful which implies that the -:ref:`Strategy ` -which was used has found a :ref:`Solution ` to achieve the -:ref:`Goal ` of this :ref:`Audit `. - -In the default implementation of Watcher, an action plan is composed of -a list of successive :ref:`Actions ` (i.e., a Workflow of -:ref:`Actions ` belonging to a unique branch). - -However, Watcher provides abstract interfaces for many of its components, -allowing other implementations to generate and handle more complex :ref:`Action -Plan(s) ` composed of two types of Action Item(s): - -- simple :ref:`Actions `: atomic tasks, which means it - can not be split into smaller tasks or commands from an OpenStack point of - view. -- composite Actions: which are composed of several simple - :ref:`Actions ` - ordered in sequential and/or parallel flows. - -An :ref:`Action Plan ` may be described using -standard workflow model description formats such as -`Business Process Model and Notation 2.0 (BPMN 2.0) -`_ or `Unified Modeling Language (UML) -`_. - -To see the life-cycle and description of -:ref:`Action Plan ` states, visit :ref:`the Action Plan -state machine `. -""" - -import datetime - -from oslo_log import log -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.applier import rpcapi -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils -from watcher import objects -from watcher.objects import action_plan as ap_objects - -LOG = log.getLogger(__name__) - - -class ActionPlanPatchType(types.JsonPatchType): - - @staticmethod - def _validate_state(patch): - serialized_patch = {'path': patch.path, 'op': patch.op} - if patch.value is not wsme.Unset: - serialized_patch['value'] = patch.value - # todo: use state machines to handle state transitions - state_value = patch.value - if state_value and not hasattr(ap_objects.State, state_value): - msg = _("Invalid state: %(state)s") - raise exception.PatchError( - patch=serialized_patch, reason=msg % dict(state=state_value)) - - @staticmethod - def validate(patch): - if patch.path == "/state": - ActionPlanPatchType._validate_state(patch) - return types.JsonPatchType.validate(patch) - - @staticmethod - def internal_attrs(): - return types.JsonPatchType.internal_attrs() - - @staticmethod - def mandatory_attrs(): - return ["audit_id", "state"] - - -class ActionPlan(base.APIBase): - """API representation of a action plan. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - action plan. - """ - - _audit_uuid = None - _strategy_uuid = None - _strategy_name = None - _efficacy_indicators = None - - def _get_audit_uuid(self): - return self._audit_uuid - - def _set_audit_uuid(self, value): - if value == wtypes.Unset: - self._audit_uuid = wtypes.Unset - elif value and self._audit_uuid != value: - try: - audit = objects.Audit.get(pecan.request.context, value) - self._audit_uuid = audit.uuid - self.audit_id = audit.id - except exception.AuditNotFound: - self._audit_uuid = None - - def _get_efficacy_indicators(self): - if self._efficacy_indicators is None: - self._set_efficacy_indicators(wtypes.Unset) - return self._efficacy_indicators - - def _set_efficacy_indicators(self, value): - efficacy_indicators = [] - if value == wtypes.Unset and not self._efficacy_indicators: - try: - _efficacy_indicators = objects.EfficacyIndicator.list( - pecan.request.context, - filters={"action_plan_uuid": self.uuid}) - - for indicator in _efficacy_indicators: - efficacy_indicator = efficacyindicator.EfficacyIndicator( - context=pecan.request.context, - name=indicator.name, - description=indicator.description, - unit=indicator.unit, - value=indicator.value, - ) - efficacy_indicators.append(efficacy_indicator.as_dict()) - self._efficacy_indicators = efficacy_indicators - except exception.EfficacyIndicatorNotFound as exc: - LOG.exception(exc) - elif value and self._efficacy_indicators != value: - self._efficacy_indicators = value - - def _get_strategy(self, value): - if value == wtypes.Unset: - return None - strategy = None - try: - if utils.is_uuid_like(value) or utils.is_int_like(value): - strategy = objects.Strategy.get( - pecan.request.context, value) - else: - strategy = objects.Strategy.get_by_name( - pecan.request.context, value) - except exception.StrategyNotFound: - pass - if strategy: - self.strategy_id = strategy.id - return strategy - - def _get_strategy_uuid(self): - return self._strategy_uuid - - def _set_strategy_uuid(self, value): - if value and self._strategy_uuid != value: - self._strategy_uuid = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_uuid = strategy.uuid - - def _get_strategy_name(self): - return self._strategy_name - - def _set_strategy_name(self, value): - if value and self._strategy_name != value: - self._strategy_name = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_name = strategy.name - - uuid = wtypes.wsattr(types.uuid, readonly=True) - """Unique UUID for this action plan""" - - audit_uuid = wsme.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, - mandatory=True) - """The UUID of the audit this port belongs to""" - - strategy_uuid = wsme.wsproperty( - wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) - """Strategy UUID the action plan refers to""" - - strategy_name = wsme.wsproperty( - wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) - """The name of the strategy this action plan refers to""" - - efficacy_indicators = wsme.wsproperty( - types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, - mandatory=True) - """The list of efficacy indicators associated to this action plan""" - - global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) - """The global efficacy of this action plan""" - - state = wtypes.text - """This action plan state""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated action links""" - - def __init__(self, **kwargs): - super(ActionPlan, self).__init__() - self.fields = [] - fields = list(objects.ActionPlan.fields) - for field in fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) - - self.fields.append('audit_uuid') - self.fields.append('efficacy_indicators') - - setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) - fields.append('strategy_uuid') - setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) - fields.append('strategy_name') - setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) - - @staticmethod - def _convert_with_links(action_plan, url, expand=True): - if not expand: - action_plan.unset_fields_except( - ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', - 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) - - action_plan.links = [ - link.Link.make_link( - 'self', url, - 'action_plans', action_plan.uuid), - link.Link.make_link( - 'bookmark', url, - 'action_plans', action_plan.uuid, - bookmark=True)] - return action_plan - - @classmethod - def convert_with_links(cls, rpc_action_plan, expand=True): - action_plan = ActionPlan(**rpc_action_plan.as_dict()) - return cls._convert_with_links(action_plan, pecan.request.host_url, - expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', - state='ONGOING', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow()) - sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' - sample._efficacy_indicators = [{'description': 'Test indicator', - 'name': 'test_indicator', - 'unit': '%'}] - sample._global_efficacy = {'description': 'Global efficacy', - 'name': 'test_global_efficacy', - 'unit': '%'} - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ActionPlanCollection(collection.Collection): - """API representation of a collection of action_plans.""" - - action_plans = [ActionPlan] - """A list containing action_plans objects""" - - def __init__(self, **kwargs): - self._type = 'action_plans' - - @staticmethod - def convert_with_links(rpc_action_plans, limit, url=None, expand=False, - **kwargs): - ap_collection = ActionPlanCollection() - ap_collection.action_plans = [ActionPlan.convert_with_links( - p, expand) for p in rpc_action_plans] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'audit_uuid': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - ap_collection.action_plans = sorted( - ap_collection.action_plans, - key=lambda action_plan: action_plan.audit_uuid, - reverse=reverse) - - ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) - return ap_collection - - @classmethod - def sample(cls): - sample = cls() - sample.action_plans = [ActionPlan.sample(expand=False)] - return sample - - -class ActionPlansController(rest.RestController): - """REST controller for Actions.""" - - def __init__(self): - super(ActionPlansController, self).__init__() - - from_actionsPlans = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource ActionPlan.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_action_plans_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None, audit_uuid=None, - strategy=None): - - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.ActionPlan.get_by_uuid( - pecan.request.context, marker) - - filters = {} - if audit_uuid: - filters['audit_uuid'] = audit_uuid - - if strategy: - if utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - filters['strategy_name'] = strategy - - if sort_key == 'audit_uuid': - sort_db_key = None - else: - sort_db_key = sort_key - - action_plans = objects.ActionPlan.list( - pecan.request.context, - limit, - marker_obj, sort_key=sort_db_key, - sort_dir=sort_dir, filters=filters) - - return ActionPlanCollection.convert_with_links( - action_plans, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, - wtypes.text, types.uuid, wtypes.text) - def get_all(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): - """Retrieve a list of action plans. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param audit_uuid: Optional UUID of an audit, to get only actions - for that audit. - :param strategy: strategy UUID or name to filter by - """ - context = pecan.request.context - policy.enforce(context, 'action_plan:get_all', - action='action_plan:get_all') - - return self._get_action_plans_collection( - marker, limit, sort_key, sort_dir, - audit_uuid=audit_uuid, strategy=strategy) - - @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, - wtypes.text, types.uuid, wtypes.text) - def detail(self, marker=None, limit=None, - sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): - """Retrieve a list of action_plans with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param audit_uuid: Optional UUID of an audit, to get only actions - for that audit. - :param strategy: strategy UUID or name to filter by - """ - context = pecan.request.context - policy.enforce(context, 'action_plan:detail', - action='action_plan:detail') - - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "action_plans": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['action_plans', 'detail']) - return self._get_action_plans_collection( - marker, limit, sort_key, sort_dir, expand, - resource_url, audit_uuid=audit_uuid, strategy=strategy) - - @wsme_pecan.wsexpose(ActionPlan, types.uuid) - def get_one(self, action_plan_uuid): - """Retrieve information about the given action plan. - - :param action_plan_uuid: UUID of a action plan. - """ - if self.from_actionsPlans: - raise exception.OperationNotPermitted - - context = pecan.request.context - action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) - policy.enforce( - context, 'action_plan:get', action_plan, action='action_plan:get') - - return ActionPlan.convert_with_links(action_plan) - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, action_plan_uuid): - """Delete an action plan. - - :param action_plan_uuid: UUID of a action. - """ - context = pecan.request.context - action_plan = api_utils.get_resource( - 'ActionPlan', action_plan_uuid, eager=True) - policy.enforce(context, 'action_plan:delete', action_plan, - action='action_plan:delete') - - action_plan.soft_delete() - - @wsme.validate(types.uuid, [ActionPlanPatchType]) - @wsme_pecan.wsexpose(ActionPlan, types.uuid, - body=[ActionPlanPatchType]) - def patch(self, action_plan_uuid, patch): - """Update an existing action plan. - - :param action_plan_uuid: UUID of a action plan. - :param patch: a json PATCH document to apply to this action plan. - """ - if self.from_actionsPlans: - raise exception.OperationNotPermitted - - context = pecan.request.context - action_plan_to_update = api_utils.get_resource( - 'ActionPlan', action_plan_uuid, eager=True) - policy.enforce(context, 'action_plan:update', action_plan_to_update, - action='action_plan:update') - - try: - action_plan_dict = action_plan_to_update.as_dict() - action_plan = ActionPlan(**api_utils.apply_jsonpatch( - action_plan_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - launch_action_plan = False - cancel_action_plan = False - - # transitions that are allowed via PATCH - allowed_patch_transitions = [ - (ap_objects.State.RECOMMENDED, - ap_objects.State.PENDING), - (ap_objects.State.RECOMMENDED, - ap_objects.State.CANCELLED), - (ap_objects.State.ONGOING, - ap_objects.State.CANCELLING), - (ap_objects.State.PENDING, - ap_objects.State.CANCELLED), - ] - - # todo: improve this in blueprint watcher-api-validation - if hasattr(action_plan, 'state'): - transition = (action_plan_to_update.state, action_plan.state) - if transition not in allowed_patch_transitions: - error_message = _("State transition not allowed: " - "(%(initial_state)s -> %(new_state)s)") - raise exception.PatchError( - patch=patch, - reason=error_message % dict( - initial_state=action_plan_to_update.state, - new_state=action_plan.state)) - - if action_plan.state == ap_objects.State.PENDING: - launch_action_plan = True - if action_plan.state == ap_objects.State.CANCELLED: - cancel_action_plan = True - - # Update only the fields that have changed - for field in objects.ActionPlan.fields: - try: - patch_val = getattr(action_plan, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if action_plan_to_update[field] != patch_val: - action_plan_to_update[field] = patch_val - - if (field == 'state'and - patch_val == objects.action_plan.State.PENDING): - launch_action_plan = True - - action_plan_to_update.save() - - # NOTE: if action plan is cancelled from pending or recommended - # state update action state here only - if cancel_action_plan: - filters = {'action_plan_uuid': action_plan.uuid} - actions = objects.Action.list(pecan.request.context, - filters=filters, eager=True) - for a in actions: - a.state = objects.action.State.CANCELLED - a.save() - - if launch_action_plan: - applier_client = rpcapi.ApplierAPI() - applier_client.launch_action_plan(pecan.request.context, - action_plan.uuid) - - action_plan_to_update = objects.ActionPlan.get_by_uuid( - pecan.request.context, - action_plan_uuid) - return ActionPlan.convert_with_links(action_plan_to_update) diff --git a/watcher/api/controllers/v1/audit.py b/watcher/api/controllers/v1/audit.py deleted file mode 100644 index f654c63..0000000 --- a/watcher/api/controllers/v1/audit.py +++ /dev/null @@ -1,615 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -In the Watcher system, an :ref:`Audit ` is a request for -optimizing a :ref:`Cluster `. - -The optimization is done in order to satisfy one :ref:`Goal ` -on a given :ref:`Cluster `. - -For each :ref:`Audit `, the Watcher system generates an -:ref:`Action Plan `. - -To see the life-cycle and description of an :ref:`Audit ` -states, visit :ref:`the Audit State machine `. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils -from watcher.decision_engine import rpcapi -from watcher import objects - - -class AuditPostType(wtypes.Base): - - audit_template_uuid = wtypes.wsattr(types.uuid, mandatory=False) - - goal = wtypes.wsattr(wtypes.text, mandatory=False) - - strategy = wtypes.wsattr(wtypes.text, mandatory=False) - - audit_type = wtypes.wsattr(wtypes.text, mandatory=True) - - state = wsme.wsattr(wtypes.text, readonly=True, - default=objects.audit.State.PENDING) - - parameters = wtypes.wsattr({wtypes.text: types.jsontype}, mandatory=False, - default={}) - interval = wsme.wsattr(types.interval_or_cron, mandatory=False) - - scope = wtypes.wsattr(types.jsontype, readonly=True) - - auto_trigger = wtypes.wsattr(bool, mandatory=False) - - def as_audit(self, context): - audit_type_values = [val.value for val in objects.audit.AuditType] - if self.audit_type not in audit_type_values: - raise exception.AuditTypeNotFound(audit_type=self.audit_type) - - if (self.audit_type == objects.audit.AuditType.ONESHOT.value and - self.interval not in (wtypes.Unset, None)): - raise exception.AuditIntervalNotAllowed(audit_type=self.audit_type) - - if (self.audit_type == objects.audit.AuditType.CONTINUOUS.value and - self.interval in (wtypes.Unset, None)): - raise exception.AuditIntervalNotSpecified( - audit_type=self.audit_type) - - # If audit_template_uuid was provided, we will provide any - # variables not included in the request, but not override - # those variables that were included. - if self.audit_template_uuid: - try: - audit_template = objects.AuditTemplate.get( - context, self.audit_template_uuid) - except exception.AuditTemplateNotFound: - raise exception.Invalid( - message=_('The audit template UUID or name specified is ' - 'invalid')) - at2a = { - 'goal': 'goal_id', - 'strategy': 'strategy_id', - 'scope': 'scope', - } - to_string_fields = set(['goal', 'strategy']) - for k in at2a: - if not getattr(self, k): - try: - at_attr = getattr(audit_template, at2a[k]) - if at_attr and (k in to_string_fields): - at_attr = str(at_attr) - setattr(self, k, at_attr) - except AttributeError: - pass - return Audit( - audit_type=self.audit_type, - parameters=self.parameters, - goal_id=self.goal, - strategy_id=self.strategy, - interval=self.interval, - scope=self.scope, - auto_trigger=self.auto_trigger) - - -class AuditPatchType(types.JsonPatchType): - - @staticmethod - def mandatory_attrs(): - return ['/audit_template_uuid', '/type'] - - @staticmethod - def validate(patch): - - def is_new_state_none(p): - return p.path == '/state' and p.op == 'replace' and p.value is None - - serialized_patch = {'path': patch.path, - 'op': patch.op, - 'value': patch.value} - if (patch.path in AuditPatchType.mandatory_attrs() or - is_new_state_none(patch)): - msg = _("%(field)s can't be updated.") - raise exception.PatchError( - patch=serialized_patch, - reason=msg % dict(field=patch.path)) - return types.JsonPatchType.validate(patch) - - -class Audit(base.APIBase): - """API representation of a audit. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a audit. - """ - _goal_uuid = None - _goal_name = None - _strategy_uuid = None - _strategy_name = None - - def _get_goal(self, value): - if value == wtypes.Unset: - return None - goal = None - try: - if utils.is_uuid_like(value) or utils.is_int_like(value): - goal = objects.Goal.get( - pecan.request.context, value) - else: - goal = objects.Goal.get_by_name( - pecan.request.context, value) - except exception.GoalNotFound: - pass - if goal: - self.goal_id = goal.id - return goal - - def _get_goal_uuid(self): - return self._goal_uuid - - def _set_goal_uuid(self, value): - if value and self._goal_uuid != value: - self._goal_uuid = None - goal = self._get_goal(value) - if goal: - self._goal_uuid = goal.uuid - - def _get_goal_name(self): - return self._goal_name - - def _set_goal_name(self, value): - if value and self._goal_name != value: - self._goal_name = None - goal = self._get_goal(value) - if goal: - self._goal_name = goal.name - - def _get_strategy(self, value): - if value == wtypes.Unset: - return None - strategy = None - try: - if utils.is_uuid_like(value) or utils.is_int_like(value): - strategy = objects.Strategy.get( - pecan.request.context, value) - else: - strategy = objects.Strategy.get_by_name( - pecan.request.context, value) - except exception.StrategyNotFound: - pass - if strategy: - self.strategy_id = strategy.id - return strategy - - def _get_strategy_uuid(self): - return self._strategy_uuid - - def _set_strategy_uuid(self, value): - if value and self._strategy_uuid != value: - self._strategy_uuid = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_uuid = strategy.uuid - - def _get_strategy_name(self): - return self._strategy_name - - def _set_strategy_name(self, value): - if value and self._strategy_name != value: - self._strategy_name = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_name = strategy.name - - uuid = types.uuid - """Unique UUID for this audit""" - - audit_type = wtypes.text - """Type of this audit""" - - state = wtypes.text - """This audit state""" - - goal_uuid = wsme.wsproperty( - wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) - """Goal UUID the audit template refers to""" - - goal_name = wsme.wsproperty( - wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) - """The name of the goal this audit template refers to""" - - strategy_uuid = wsme.wsproperty( - wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) - """Strategy UUID the audit template refers to""" - - strategy_name = wsme.wsproperty( - wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) - """The name of the strategy this audit template refers to""" - - parameters = {wtypes.text: types.jsontype} - """The strategy parameters for this audit""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated audit links""" - - interval = wsme.wsattr(wtypes.text, mandatory=False) - """Launch audit periodically (in seconds)""" - - scope = wsme.wsattr(types.jsontype, mandatory=False) - """Audit Scope""" - - auto_trigger = wsme.wsattr(bool, mandatory=False, default=False) - """Autoexecute action plan once audit is succeeded""" - - next_run_time = wsme.wsattr(datetime.datetime, mandatory=False) - """The next time audit launch""" - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Audit.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - self.fields.append('goal_id') - self.fields.append('strategy_id') - fields.append('goal_uuid') - setattr(self, 'goal_uuid', kwargs.get('goal_id', - wtypes.Unset)) - fields.append('goal_name') - setattr(self, 'goal_name', kwargs.get('goal_id', - wtypes.Unset)) - fields.append('strategy_uuid') - setattr(self, 'strategy_uuid', kwargs.get('strategy_id', - wtypes.Unset)) - fields.append('strategy_name') - setattr(self, 'strategy_name', kwargs.get('strategy_id', - wtypes.Unset)) - - @staticmethod - def _convert_with_links(audit, url, expand=True): - if not expand: - audit.unset_fields_except(['uuid', 'audit_type', 'state', - 'goal_uuid', 'interval', 'scope', - 'strategy_uuid', 'goal_name', - 'strategy_name', 'auto_trigger', - 'next_run_time']) - - audit.links = [link.Link.make_link('self', url, - 'audits', audit.uuid), - link.Link.make_link('bookmark', url, - 'audits', audit.uuid, - bookmark=True) - ] - - return audit - - @classmethod - def convert_with_links(cls, rpc_audit, expand=True): - audit = Audit(**rpc_audit.as_dict()) - return cls._convert_with_links(audit, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - audit_type='ONESHOT', - state='PENDING', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow(), - interval='7200', - scope=[], - auto_trigger=False, - next_run_time=datetime.datetime.utcnow()) - - sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' - sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff' - - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class AuditCollection(collection.Collection): - """API representation of a collection of audits.""" - - audits = [Audit] - """A list containing audits objects""" - - def __init__(self, **kwargs): - super(AuditCollection, self).__init__() - self._type = 'audits' - - @staticmethod - def convert_with_links(rpc_audits, limit, url=None, expand=False, - **kwargs): - collection = AuditCollection() - collection.audits = [Audit.convert_with_links(p, expand) - for p in rpc_audits] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'goal_uuid': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - collection.audits = sorted( - collection.audits, - key=lambda audit: audit.goal_uuid, - reverse=reverse) - - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.audits = [Audit.sample(expand=False)] - return sample - - -class AuditsController(rest.RestController): - """REST controller for Audits.""" - def __init__(self): - super(AuditsController, self).__init__() - - from_audits = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Audits.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_audits_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None, goal=None, - strategy=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - marker_obj = None - if marker: - marker_obj = objects.Audit.get_by_uuid(pecan.request.context, - marker) - - filters = {} - if goal: - if utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - # TODO(michaelgugino): add method to get goal by name. - filters['goal_name'] = goal - - if strategy: - if utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - # TODO(michaelgugino): add method to get goal by name. - filters['strategy_name'] = strategy - - if sort_key == 'goal_uuid': - sort_db_key = 'goal_id' - elif sort_key == 'strategy_uuid': - sort_db_key = 'strategy_id' - else: - sort_db_key = sort_key - - audits = objects.Audit.list(pecan.request.context, - limit, - marker_obj, sort_key=sort_db_key, - sort_dir=sort_dir, filters=filters) - - return AuditCollection.convert_with_links(audits, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text, - wtypes.text, wtypes.text, wtypes.text, int) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', - goal=None, strategy=None): - """Retrieve a list of audits. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param goal: goal UUID or name to filter by - :param strategy: strategy UUID or name to filter by - """ - - context = pecan.request.context - policy.enforce(context, 'audit:get_all', - action='audit:get_all') - - return self._get_audits_collection(marker, limit, sort_key, - sort_dir, goal=goal, - strategy=strategy) - - @wsme_pecan.wsexpose(AuditCollection, wtypes.text, types.uuid, int, - wtypes.text, wtypes.text) - def detail(self, goal=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of audits with detail. - - :param goal: goal UUID or name to filter by - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'audit:detail', - action='audit:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "audits": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['audits', 'detail']) - return self._get_audits_collection(marker, limit, - sort_key, sort_dir, expand, - resource_url, - goal=goal) - - @wsme_pecan.wsexpose(Audit, types.uuid) - def get_one(self, audit_uuid): - """Retrieve information about the given audit. - - :param audit_uuid: UUID of a audit. - """ - if self.from_audits: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_audit = api_utils.get_resource('Audit', audit_uuid) - policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') - - return Audit.convert_with_links(rpc_audit) - - @wsme_pecan.wsexpose(Audit, body=AuditPostType, status_code=201) - def post(self, audit_p): - """Create a new audit. - - :param audit_p: a audit within the request body. - """ - context = pecan.request.context - policy.enforce(context, 'audit:create', - action='audit:create') - audit = audit_p.as_audit(context) - - if self.from_audits: - raise exception.OperationNotPermitted - - if not audit._goal_uuid: - raise exception.Invalid( - message=_('A valid goal_id or audit_template_id ' - 'must be provided')) - - strategy_uuid = audit.strategy_uuid - no_schema = True - if strategy_uuid is not None: - # validate parameter when predefined strategy in audit template - strategy = objects.Strategy.get(pecan.request.context, - strategy_uuid) - schema = strategy.parameters_spec - if schema: - # validate input parameter with default value feedback - no_schema = False - utils.StrictDefaultValidatingDraft4Validator(schema).validate( - audit.parameters) - - if no_schema and audit.parameters: - raise exception.Invalid(_('Specify parameters but no predefined ' - 'strategy for audit template, or no ' - 'parameter spec in predefined strategy')) - - audit_dict = audit.as_dict() - - new_audit = objects.Audit(context, **audit_dict) - new_audit.create() - - # Set the HTTP Location Header - pecan.response.location = link.build_url('audits', new_audit.uuid) - - # trigger decision-engine to run the audit - if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value: - dc_client = rpcapi.DecisionEngineAPI() - dc_client.trigger_audit(context, new_audit.uuid) - - return Audit.convert_with_links(new_audit) - - @wsme.validate(types.uuid, [AuditPatchType]) - @wsme_pecan.wsexpose(Audit, types.uuid, body=[AuditPatchType]) - def patch(self, audit_uuid, patch): - """Update an existing audit. - - :param audit_uuid: UUID of a audit. - :param patch: a json PATCH document to apply to this audit. - """ - if self.from_audits: - raise exception.OperationNotPermitted - - context = pecan.request.context - audit_to_update = api_utils.get_resource( - 'Audit', audit_uuid, eager=True) - policy.enforce(context, 'audit:update', audit_to_update, - action='audit:update') - - try: - audit_dict = audit_to_update.as_dict() - - initial_state = audit_dict['state'] - new_state = api_utils.get_patch_value(patch, 'state') - if not api_utils.check_audit_state_transition( - patch, initial_state): - error_message = _("State transition not allowed: " - "(%(initial_state)s -> %(new_state)s)") - raise exception.PatchError( - patch=patch, - reason=error_message % dict( - initial_state=initial_state, new_state=new_state)) - - audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.Audit.fields: - try: - patch_val = getattr(audit, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if audit_to_update[field] != patch_val: - audit_to_update[field] = patch_val - - audit_to_update.save() - return Audit.convert_with_links(audit_to_update) - - @wsme_pecan.wsexpose(None, types.uuid, status_code=204) - def delete(self, audit_uuid): - """Delete a audit. - - :param audit_uuid: UUID of a audit. - """ - context = pecan.request.context - audit_to_delete = api_utils.get_resource( - 'Audit', audit_uuid, eager=True) - policy.enforce(context, 'audit:update', audit_to_delete, - action='audit:update') - - audit_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/audit_template.py b/watcher/api/controllers/v1/audit_template.py deleted file mode 100644 index b85e2f2..0000000 --- a/watcher/api/controllers/v1/audit_template.py +++ /dev/null @@ -1,657 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Audit ` may be launched several times with the same -settings (:ref:`Goal `, thresholds, ...). Therefore it makes -sense to save those settings in some sort of Audit preset object, which is -known as an :ref:`Audit Template `. - -An :ref:`Audit Template ` contains at least the -:ref:`Goal ` of the :ref:`Audit `. - -It may also contain some error handling settings indicating whether: - -- :ref:`Watcher Applier ` stops the - entire operation -- :ref:`Watcher Applier ` performs a rollback - -and how many retries should be attempted before failure occurs (also the latter -can be complex: for example the scenario in which there are many first-time -failures on ultimately successful :ref:`Actions `). - -Moreover, an :ref:`Audit Template ` may contain some -settings related to the level of automation for the -:ref:`Action Plan ` that will be generated by the -:ref:`Audit `. -A flag will indicate whether the :ref:`Action Plan ` -will be launched automatically or will need a manual confirmation from the -:ref:`Administrator `. -""" - -import datetime - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher._i18n import _ -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import context as context_utils -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils as common_utils -from watcher.decision_engine.scope import default -from watcher import objects - - -class AuditTemplatePostType(wtypes.Base): - _ctx = context_utils.make_context() - - name = wtypes.wsattr(wtypes.text, mandatory=True) - """Name of this audit template""" - - description = wtypes.wsattr(wtypes.text, mandatory=False) - """Short description of this audit template""" - - goal = wtypes.wsattr(wtypes.text, mandatory=True) - """Goal UUID or name of the audit template""" - - strategy = wtypes.wsattr(wtypes.text, mandatory=False) - """Strategy UUID or name of the audit template""" - - scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[]) - """Audit Scope""" - - def as_audit_template(self): - return AuditTemplate( - name=self.name, - description=self.description, - goal_id=self.goal, # Dirty trick ... - goal=self.goal, - strategy_id=self.strategy, # Dirty trick ... - strategy_uuid=self.strategy, - scope=self.scope, - ) - - @staticmethod - def validate(audit_template): - available_goals = objects.Goal.list(AuditTemplatePostType._ctx) - available_goal_uuids_map = {g.uuid: g for g in available_goals} - available_goal_names_map = {g.name: g for g in available_goals} - if audit_template.goal in available_goal_uuids_map: - goal = available_goal_uuids_map[audit_template.goal] - elif audit_template.goal in available_goal_names_map: - goal = available_goal_names_map[audit_template.goal] - else: - raise exception.InvalidGoal(goal=audit_template.goal) - - common_utils.Draft4Validator( - default.DefaultScope.DEFAULT_SCHEMA).validate(audit_template.scope) - - include_host_aggregates = False - exclude_host_aggregates = False - for rule in audit_template.scope: - if 'host_aggregates' in rule: - include_host_aggregates = True - elif 'exclude' in rule: - for resource in rule['exclude']: - if 'host_aggregates' in resource: - exclude_host_aggregates = True - if include_host_aggregates and exclude_host_aggregates: - raise exception.Invalid( - message=_( - "host_aggregates can't be " - "included and excluded together")) - - if audit_template.strategy: - available_strategies = objects.Strategy.list( - AuditTemplatePostType._ctx) - available_strategies_map = { - s.uuid: s for s in available_strategies} - if audit_template.strategy not in available_strategies_map: - raise exception.InvalidStrategy( - strategy=audit_template.strategy) - - strategy = available_strategies_map[audit_template.strategy] - # Check that the strategy we indicate is actually related to the - # specified goal - if strategy.goal_id != goal.id: - choices = ["'%s' (%s)" % (s.uuid, s.name) - for s in available_strategies] - raise exception.InvalidStrategy( - message=_( - "'%(strategy)s' strategy does relate to the " - "'%(goal)s' goal. Possible choices: %(choices)s") - % dict(strategy=strategy.name, goal=goal.name, - choices=", ".join(choices))) - audit_template.strategy = strategy.uuid - - # We force the UUID so that we do not need to query the DB with the - # name afterwards - audit_template.goal = goal.uuid - - return audit_template - - -class AuditTemplatePatchType(types.JsonPatchType): - - _ctx = context_utils.make_context() - - @staticmethod - def mandatory_attrs(): - return [] - - @staticmethod - def validate(patch): - if patch.path == "/goal" and patch.op != "remove": - AuditTemplatePatchType._validate_goal(patch) - elif patch.path == "/goal" and patch.op == "remove": - raise exception.OperationNotPermitted( - _("Cannot remove 'goal' attribute " - "from an audit template")) - if patch.path == "/strategy": - AuditTemplatePatchType._validate_strategy(patch) - return types.JsonPatchType.validate(patch) - - @staticmethod - def _validate_goal(patch): - patch.path = "/goal_id" - goal = patch.value - - if goal: - available_goals = objects.Goal.list( - AuditTemplatePatchType._ctx) - available_goal_uuids_map = {g.uuid: g for g in available_goals} - available_goal_names_map = {g.name: g for g in available_goals} - if goal in available_goal_uuids_map: - patch.value = available_goal_uuids_map[goal].id - elif goal in available_goal_names_map: - patch.value = available_goal_names_map[goal].id - else: - raise exception.InvalidGoal(goal=goal) - - @staticmethod - def _validate_strategy(patch): - patch.path = "/strategy_id" - strategy = patch.value - if strategy: - available_strategies = objects.Strategy.list( - AuditTemplatePatchType._ctx) - available_strategy_uuids_map = { - s.uuid: s for s in available_strategies} - available_strategy_names_map = { - s.name: s for s in available_strategies} - if strategy in available_strategy_uuids_map: - patch.value = available_strategy_uuids_map[strategy].id - elif strategy in available_strategy_names_map: - patch.value = available_strategy_names_map[strategy].id - else: - raise exception.InvalidStrategy(strategy=strategy) - - -class AuditTemplate(base.APIBase): - """API representation of a audit template. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - audit template. - """ - - _goal_uuid = None - _goal_name = None - - _strategy_uuid = None - _strategy_name = None - - def _get_goal(self, value): - if value == wtypes.Unset: - return None - goal = None - try: - if (common_utils.is_uuid_like(value) or - common_utils.is_int_like(value)): - goal = objects.Goal.get( - pecan.request.context, value) - else: - goal = objects.Goal.get_by_name( - pecan.request.context, value) - except exception.GoalNotFound: - pass - if goal: - self.goal_id = goal.id - return goal - - def _get_strategy(self, value): - if value == wtypes.Unset: - return None - strategy = None - try: - if (common_utils.is_uuid_like(value) or - common_utils.is_int_like(value)): - strategy = objects.Strategy.get( - pecan.request.context, value) - else: - strategy = objects.Strategy.get_by_name( - pecan.request.context, value) - except exception.StrategyNotFound: - pass - if strategy: - self.strategy_id = strategy.id - return strategy - - def _get_goal_uuid(self): - return self._goal_uuid - - def _set_goal_uuid(self, value): - if value and self._goal_uuid != value: - self._goal_uuid = None - goal = self._get_goal(value) - if goal: - self._goal_uuid = goal.uuid - - def _get_strategy_uuid(self): - return self._strategy_uuid - - def _set_strategy_uuid(self, value): - if value and self._strategy_uuid != value: - self._strategy_uuid = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_uuid = strategy.uuid - - def _get_goal_name(self): - return self._goal_name - - def _set_goal_name(self, value): - if value and self._goal_name != value: - self._goal_name = None - goal = self._get_goal(value) - if goal: - self._goal_name = goal.name - - def _get_strategy_name(self): - return self._strategy_name - - def _set_strategy_name(self, value): - if value and self._strategy_name != value: - self._strategy_name = None - strategy = self._get_strategy(value) - if strategy: - self._strategy_name = strategy.name - - uuid = wtypes.wsattr(types.uuid, readonly=True) - """Unique UUID for this audit template""" - - name = wtypes.text - """Name of this audit template""" - - description = wtypes.wsattr(wtypes.text, mandatory=False) - """Short description of this audit template""" - - goal_uuid = wsme.wsproperty( - wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) - """Goal UUID the audit template refers to""" - - goal_name = wsme.wsproperty( - wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) - """The name of the goal this audit template refers to""" - - strategy_uuid = wsme.wsproperty( - wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) - """Strategy UUID the audit template refers to""" - - strategy_name = wsme.wsproperty( - wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) - """The name of the strategy this audit template refers to""" - - audits = wsme.wsattr([link.Link], readonly=True) - """Links to the collection of audits contained in this audit template""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated audit template links""" - - scope = wsme.wsattr(types.jsontype, mandatory=False) - """Audit Scope""" - - def __init__(self, **kwargs): - super(AuditTemplate, self).__init__() - self.fields = [] - fields = list(objects.AuditTemplate.fields) - - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - self.fields.append('goal_id') - self.fields.append('strategy_id') - setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset)) - - # goal_uuid & strategy_uuid are not part of - # objects.AuditTemplate.fields because they're API-only attributes. - self.fields.append('goal_uuid') - self.fields.append('goal_name') - self.fields.append('strategy_uuid') - self.fields.append('strategy_name') - setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'strategy_uuid', - kwargs.get('strategy_id', wtypes.Unset)) - setattr(self, 'strategy_name', - kwargs.get('strategy_id', wtypes.Unset)) - - @staticmethod - def _convert_with_links(audit_template, url, expand=True): - if not expand: - audit_template.unset_fields_except( - ['uuid', 'name', 'goal_uuid', 'goal_name', - 'scope', 'strategy_uuid', 'strategy_name']) - - # The numeric ID should not be exposed to - # the user, it's internal only. - audit_template.goal_id = wtypes.Unset - audit_template.strategy_id = wtypes.Unset - - audit_template.links = [link.Link.make_link('self', url, - 'audit_templates', - audit_template.uuid), - link.Link.make_link('bookmark', url, - 'audit_templates', - audit_template.uuid, - bookmark=True)] - return audit_template - - @classmethod - def convert_with_links(cls, rpc_audit_template, expand=True): - audit_template = AuditTemplate(**rpc_audit_template.as_dict()) - return cls._convert_with_links(audit_template, pecan.request.host_url, - expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='My Audit Template', - description='Description of my audit template', - goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6', - strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986', - created_at=datetime.datetime.utcnow(), - deleted_at=None, - updated_at=datetime.datetime.utcnow(), - scope=[],) - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class AuditTemplateCollection(collection.Collection): - """API representation of a collection of audit templates.""" - - audit_templates = [AuditTemplate] - """A list containing audit templates objects""" - - def __init__(self, **kwargs): - super(AuditTemplateCollection, self).__init__() - self._type = 'audit_templates' - - @staticmethod - def convert_with_links(rpc_audit_templates, limit, url=None, expand=False, - **kwargs): - at_collection = AuditTemplateCollection() - at_collection.audit_templates = [ - AuditTemplate.convert_with_links(p, expand) - for p in rpc_audit_templates] - at_collection.next = at_collection.get_next(limit, url=url, **kwargs) - return at_collection - - @classmethod - def sample(cls): - sample = cls() - sample.audit_templates = [AuditTemplate.sample(expand=False)] - return sample - - -class AuditTemplatesController(rest.RestController): - """REST controller for AuditTemplates.""" - def __init__(self): - super(AuditTemplatesController, self).__init__() - - from_audit_templates = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource AuditTemplates.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_audit_templates_collection(self, filters, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None): - api_utils.validate_search_filters( - filters, list(objects.audit_template.AuditTemplate.fields.keys()) + - ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"]) - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.AuditTemplate.get_by_uuid( - pecan.request.context, - marker) - - audit_templates = objects.AuditTemplate.list( - pecan.request.context, - filters, - limit, - marker_obj, sort_key=sort_key, - sort_dir=sort_dir) - - return AuditTemplateCollection.convert_with_links(audit_templates, - limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, - types.uuid, int, wtypes.text, wtypes.text) - def get_all(self, goal=None, strategy=None, marker=None, - limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of audit templates. - - :param goal: goal UUID or name to filter by - :param strategy: strategy UUID or name to filter by - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'audit_template:get_all', - action='audit_template:get_all') - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - if strategy: - if common_utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - filters['strategy_name'] = strategy - - return self._get_audit_templates_collection( - filters, marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, - types.uuid, int, wtypes.text, wtypes.text) - def detail(self, goal=None, strategy=None, marker=None, - limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of audit templates with detail. - - :param goal: goal UUID or name to filter by - :param strategy: strategy UUID or name to filter by - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'audit_template:detail', - action='audit_template:detail') - - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "audit_templates": - raise exception.HTTPNotFound - - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - if strategy: - if common_utils.is_uuid_like(strategy): - filters['strategy_uuid'] = strategy - else: - filters['strategy_name'] = strategy - - expand = True - resource_url = '/'.join(['audit_templates', 'detail']) - return self._get_audit_templates_collection(filters, marker, limit, - sort_key, sort_dir, expand, - resource_url) - - @wsme_pecan.wsexpose(AuditTemplate, wtypes.text) - def get_one(self, audit_template): - """Retrieve information about the given audit template. - - :param audit audit_template: UUID or name of an audit template. - """ - if self.from_audit_templates: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_audit_template = api_utils.get_resource('AuditTemplate', - audit_template) - policy.enforce(context, 'audit_template:get', rpc_audit_template, - action='audit_template:get') - - return AuditTemplate.convert_with_links(rpc_audit_template) - - @wsme.validate(types.uuid, AuditTemplatePostType) - @wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType, - status_code=201) - def post(self, audit_template_postdata): - """Create a new audit template. - - :param audit_template_postdata: the audit template POST data - from the request body. - """ - if self.from_audit_templates: - raise exception.OperationNotPermitted - - context = pecan.request.context - policy.enforce(context, 'audit_template:create', - action='audit_template:create') - - context = pecan.request.context - audit_template = audit_template_postdata.as_audit_template() - audit_template_dict = audit_template.as_dict() - new_audit_template = objects.AuditTemplate(context, - **audit_template_dict) - new_audit_template.create() - - # Set the HTTP Location Header - pecan.response.location = link.build_url( - 'audit_templates', new_audit_template.uuid) - return AuditTemplate.convert_with_links(new_audit_template) - - @wsme.validate(types.uuid, [AuditTemplatePatchType]) - @wsme_pecan.wsexpose(AuditTemplate, wtypes.text, - body=[AuditTemplatePatchType]) - def patch(self, audit_template, patch): - """Update an existing audit template. - - :param audit template_uuid: UUID of a audit template. - :param patch: a json PATCH document to apply to this audit template. - """ - if self.from_audit_templates: - raise exception.OperationNotPermitted - - context = pecan.request.context - audit_template_to_update = api_utils.get_resource('AuditTemplate', - audit_template) - policy.enforce(context, 'audit_template:update', - audit_template_to_update, - action='audit_template:update') - - if common_utils.is_uuid_like(audit_template): - audit_template_to_update = objects.AuditTemplate.get_by_uuid( - pecan.request.context, - audit_template) - else: - audit_template_to_update = objects.AuditTemplate.get_by_name( - pecan.request.context, - audit_template) - - try: - audit_template_dict = audit_template_to_update.as_dict() - audit_template = AuditTemplate(**api_utils.apply_jsonpatch( - audit_template_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - # Update only the fields that have changed - for field in objects.AuditTemplate.fields: - try: - patch_val = getattr(audit_template, field) - except AttributeError: - # Ignore fields that aren't exposed in the API - continue - if patch_val == wtypes.Unset: - patch_val = None - if audit_template_to_update[field] != patch_val: - audit_template_to_update[field] = patch_val - - audit_template_to_update.save() - return AuditTemplate.convert_with_links(audit_template_to_update) - - @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) - def delete(self, audit_template): - """Delete a audit template. - - :param audit template_uuid: UUID or name of an audit template. - """ - context = pecan.request.context - audit_template_to_delete = api_utils.get_resource('AuditTemplate', - audit_template) - policy.enforce(context, 'audit_template:update', - audit_template_to_delete, - action='audit_template:update') - - audit_template_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/collection.py b/watcher/api/controllers/v1/collection.py deleted file mode 100644 index 05e05df..0000000 --- a/watcher/api/controllers/v1/collection.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pecan -from wsme import types as wtypes - -from watcher.api.controllers import base -from watcher.api.controllers import link - - -class Collection(base.APIBase): - - next = wtypes.text - """A link to retrieve the next subset of the collection""" - - @property - def collection(self): - return getattr(self, self._type) - - def has_next(self, limit): - """Return whether collection has more items.""" - return len(self.collection) and len(self.collection) == limit - - def get_next(self, limit, url=None, marker_field="uuid", **kwargs): - """Return a link to the next subset of the collection.""" - if not self.has_next(limit): - return wtypes.Unset - - resource_url = url or self._type - q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) - next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { - 'args': q_args, 'limit': limit, - 'marker': getattr(self.collection[-1], marker_field)} - - return link.Link.make_link('next', pecan.request.host_url, - resource_url, next_args).href diff --git a/watcher/api/controllers/v1/efficacy_indicator.py b/watcher/api/controllers/v1/efficacy_indicator.py deleted file mode 100644 index b17ccf2..0000000 --- a/watcher/api/controllers/v1/efficacy_indicator.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An efficacy indicator is a single value that gives an indication on how the -:ref:`solution ` produced by a given :ref:`strategy -` performed. These efficacy indicators are specific to a -given :ref:`goal ` and are usually used to compute the -:ref:`global efficacy ` of the resulting :ref:`action plan -`. - -In Watcher, these efficacy indicators are specified alongside the goal they -relate to. When a strategy (which always relates to a goal) is executed, it -produces a solution containing the efficacy indicators specified by the goal. -This solution, which has been translated by the :ref:`Watcher Planner -` into an action plan, will see its indicators and -global efficacy stored and would now be accessible through the :ref:`Watcher -API `. -""" - -import numbers - -from wsme import types as wtypes - -from watcher.api.controllers import base -from watcher import objects - - -class EfficacyIndicator(base.APIBase): - """API representation of a efficacy indicator. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of an - efficacy indicator. - """ - - name = wtypes.wsattr(wtypes.text, mandatory=True) - """Name of this efficacy indicator""" - - description = wtypes.wsattr(wtypes.text, mandatory=False) - """Description of this efficacy indicator""" - - unit = wtypes.wsattr(wtypes.text, mandatory=False) - """Unit of this efficacy indicator""" - - value = wtypes.wsattr(numbers.Number, mandatory=True) - """Value of this efficacy indicator""" - - def __init__(self, **kwargs): - super(EfficacyIndicator, self).__init__() - - self.fields = [] - fields = list(objects.EfficacyIndicator.fields) - for field in fields: - # Skip fields we do not expose. - if not hasattr(self, field): - continue - self.fields.append(field) - setattr(self, field, kwargs.get(field, wtypes.Unset)) diff --git a/watcher/api/controllers/v1/goal.py b/watcher/api/controllers/v1/goal.py deleted file mode 100644 index a7dd28b..0000000 --- a/watcher/api/controllers/v1/goal.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Goal ` is a human readable, observable and measurable -end result having one objective to be achieved. - -Here are some examples of :ref:`Goals `: - -- minimize the energy consumption -- minimize the number of compute nodes (consolidation) -- balance the workload among compute nodes -- minimize the license cost (some softwares have a licensing model which is - based on the number of sockets or cores where the software is deployed) -- find the most appropriate moment for a planned maintenance on a - given group of host (which may be an entire availability zone): - power supply replacement, cooling system replacement, hardware - modification, ... -""" - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -class Goal(base.APIBase): - """API representation of a goal. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a goal. - """ - - uuid = types.uuid - """Unique UUID for this goal""" - - name = wtypes.text - """Name of the goal""" - - display_name = wtypes.text - """Localized name of the goal""" - - efficacy_specification = wtypes.wsattr(types.jsontype, readonly=True) - """Efficacy specification for this goal""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated audit template links""" - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Goal.fields) - - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(goal, url, expand=True): - if not expand: - goal.unset_fields_except(['uuid', 'name', 'display_name', - 'efficacy_specification']) - - goal.links = [link.Link.make_link('self', url, - 'goals', goal.uuid), - link.Link.make_link('bookmark', url, - 'goals', goal.uuid, - bookmark=True)] - return goal - - @classmethod - def convert_with_links(cls, goal, expand=True): - goal = Goal(**goal.as_dict()) - return cls._convert_with_links(goal, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls( - uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='DUMMY', - display_name='Dummy strategy', - efficacy_specification=[ - {'description': 'Dummy indicator', 'name': 'dummy', - 'schema': 'Range(min=0, max=100, min_included=True, ' - 'max_included=True, msg=None)', - 'unit': '%'} - ]) - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class GoalCollection(collection.Collection): - """API representation of a collection of goals.""" - - goals = [Goal] - """A list containing goals objects""" - - def __init__(self, **kwargs): - super(GoalCollection, self).__init__() - self._type = 'goals' - - @staticmethod - def convert_with_links(goals, limit, url=None, expand=False, - **kwargs): - goal_collection = GoalCollection() - goal_collection.goals = [ - Goal.convert_with_links(g, expand) for g in goals] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'strategy': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - goal_collection.goals = sorted( - goal_collection.goals, - key=lambda goal: goal.uuid, - reverse=reverse) - - goal_collection.next = goal_collection.get_next( - limit, url=url, **kwargs) - return goal_collection - - @classmethod - def sample(cls): - sample = cls() - sample.goals = [Goal.sample(expand=False)] - return sample - - -class GoalsController(rest.RestController): - """REST controller for Goals.""" - def __init__(self): - super(GoalsController, self).__init__() - - from_goals = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Goals.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_goals_collection(self, marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - sort_db_key = (sort_key if sort_key in objects.Goal.fields.keys() - else None) - - marker_obj = None - if marker: - marker_obj = objects.Goal.get_by_uuid( - pecan.request.context, marker) - - goals = objects.Goal.list(pecan.request.context, limit, marker_obj, - sort_key=sort_db_key, sort_dir=sort_dir) - - return GoalCollection.convert_with_links(goals, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(GoalCollection, wtypes.text, - int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of goals. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'goal:get_all', - action='goal:get_all') - return self._get_goals_collection(marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(GoalCollection, wtypes.text, int, - wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of goals with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'goal:detail', - action='goal:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "goals": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['goals', 'detail']) - return self._get_goals_collection(marker, limit, sort_key, sort_dir, - expand, resource_url) - - @wsme_pecan.wsexpose(Goal, wtypes.text) - def get_one(self, goal): - """Retrieve information about the given goal. - - :param goal: UUID or name of the goal. - """ - if self.from_goals: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_goal = api_utils.get_resource('Goal', goal) - policy.enforce(context, 'goal:get', rpc_goal, action='goal:get') - - return Goal.convert_with_links(rpc_goal) diff --git a/watcher/api/controllers/v1/scoring_engine.py b/watcher/api/controllers/v1/scoring_engine.py deleted file mode 100644 index 0e13d38..0000000 --- a/watcher/api/controllers/v1/scoring_engine.py +++ /dev/null @@ -1,248 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2016 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Scoring Engine ` is an executable that has -a well-defined input, a well-defined output, and performs a purely mathematical -task. That is, the calculation does not depend on the environment in which it -is running - it would produce the same result anywhere. - -Because there might be multiple algorithms used to build a particular data -model (and therefore a scoring engine), the usage of scoring engine might -vary. A metainfo field is supposed to contain any information which might -be needed by the user of a given scoring engine. -""" - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -class ScoringEngine(base.APIBase): - """API representation of a scoring engine. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a scoring - engine. - """ - - uuid = types.uuid - """Unique UUID of the scoring engine""" - - name = wtypes.text - """The name of the scoring engine""" - - description = wtypes.text - """A human readable description of the Scoring Engine""" - - metainfo = wtypes.text - """A metadata associated with the scoring engine""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated action links""" - - def __init__(self, **kwargs): - super(ScoringEngine, self).__init__() - - self.fields = [] - self.fields.append('uuid') - self.fields.append('name') - self.fields.append('description') - self.fields.append('metainfo') - setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) - setattr(self, 'name', kwargs.get('name', wtypes.Unset)) - setattr(self, 'description', kwargs.get('description', wtypes.Unset)) - setattr(self, 'metainfo', kwargs.get('metainfo', wtypes.Unset)) - - @staticmethod - def _convert_with_links(se, url, expand=True): - if not expand: - se.unset_fields_except( - ['uuid', 'name', 'description', 'metainfo']) - - se.links = [link.Link.make_link('self', url, - 'scoring_engines', se.uuid), - link.Link.make_link('bookmark', url, - 'scoring_engines', se.uuid, - bookmark=True)] - return se - - @classmethod - def convert_with_links(cls, scoring_engine, expand=True): - scoring_engine = ScoringEngine(**scoring_engine.as_dict()) - return cls._convert_with_links( - scoring_engine, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='81bbd3c7-3b08-4d12-a268-99354dbf7b71', - name='sample-se-123', - description='Sample Scoring Engine 123 just for testing') - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ScoringEngineCollection(collection.Collection): - """API representation of a collection of scoring engines.""" - - scoring_engines = [ScoringEngine] - """A list containing scoring engine objects""" - - def __init__(self, **kwargs): - super(ScoringEngineCollection, self).__init__() - self._type = 'scoring_engines' - - @staticmethod - def convert_with_links(scoring_engines, limit, url=None, expand=False, - **kwargs): - - collection = ScoringEngineCollection() - collection.scoring_engines = [ScoringEngine.convert_with_links( - se, expand) for se in scoring_engines] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'name': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - collection.goals = sorted( - collection.scoring_engines, - key=lambda se: se.name, - reverse=reverse) - - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - @classmethod - def sample(cls): - sample = cls() - sample.scoring_engines = [ScoringEngine.sample(expand=False)] - return sample - - -class ScoringEngineController(rest.RestController): - """REST controller for Scoring Engines.""" - def __init__(self): - super(ScoringEngineController, self).__init__() - - from_scoring_engines = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Scoring Engines.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_scoring_engines_collection(self, marker, limit, - sort_key, sort_dir, expand=False, - resource_url=None): - - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.ScoringEngine.get_by_uuid( - pecan.request.context, marker) - - filters = {} - - sort_db_key = sort_key - - scoring_engines = objects.ScoringEngine.list( - context=pecan.request.context, - limit=limit, - marker=marker_obj, - sort_key=sort_db_key, - sort_dir=sort_dir, - filters=filters) - - return ScoringEngineCollection.convert_with_links( - scoring_engines, - limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, - int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', - sort_dir='asc'): - """Retrieve a list of Scoring Engines. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: name. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'scoring_engine:get_all', - action='scoring_engine:get_all') - - return self._get_scoring_engines_collection( - marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, - int, wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of Scoring Engines with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: name. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'scoring_engine:detail', - action='scoring_engine:detail') - - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "scoring_engines": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['scoring_engines', 'detail']) - return self._get_scoring_engines_collection( - marker, limit, sort_key, sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(ScoringEngine, wtypes.text) - def get_one(self, scoring_engine): - """Retrieve information about the given Scoring Engine. - - :param scoring_engine_name: The name of the Scoring Engine. - """ - context = pecan.request.context - policy.enforce(context, 'scoring_engine:get', - action='scoring_engine:get') - - if self.from_scoring_engines: - raise exception.OperationNotPermitted - - rpc_scoring_engine = api_utils.get_resource( - 'ScoringEngine', scoring_engine) - - return ScoringEngine.convert_with_links(rpc_scoring_engine) diff --git a/watcher/api/controllers/v1/service.py b/watcher/api/controllers/v1/service.py deleted file mode 100644 index 63ea179..0000000 --- a/watcher/api/controllers/v1/service.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Service mechanism provides ability to monitor Watcher services state. -""" - -import datetime -import six - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import context -from watcher.common import exception -from watcher.common import policy -from watcher import objects - - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class Service(base.APIBase): - """API representation of a service. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a service. - """ - - _status = None - _context = context.RequestContext(is_admin=True) - - def _get_status(self): - return self._status - - def _set_status(self, id): - service = objects.Service.get(pecan.request.context, id) - last_heartbeat = (service.last_seen_up or service.updated_at - or service.created_at) - if isinstance(last_heartbeat, six.string_types): - # NOTE(russellb) If this service came in over rpc via - # conductor, then the timestamp will be a string and needs to be - # converted back to a datetime. - last_heartbeat = timeutils.parse_strtime(last_heartbeat) - else: - # Objects have proper UTC timezones, but the timeutils comparison - # below does not (and will fail) - last_heartbeat = last_heartbeat.replace(tzinfo=None) - elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) - is_up = abs(elapsed) <= CONF.service_down_time - if not is_up: - LOG.warning('Seems service %(name)s on host %(host)s is down. ' - 'Last heartbeat was %(lhb)s.' - 'Elapsed time is %(el)s', - {'name': service.name, - 'host': service.host, - 'lhb': str(last_heartbeat), 'el': str(elapsed)}) - self._status = objects.service.ServiceStatus.FAILED - else: - self._status = objects.service.ServiceStatus.ACTIVE - - id = wsme.wsattr(int, readonly=True) - """ID for this service.""" - - name = wtypes.text - """Name of the service.""" - - host = wtypes.text - """Host where service is placed on.""" - - last_seen_up = wsme.wsattr(datetime.datetime, readonly=True) - """Time when Watcher service sent latest heartbeat.""" - - status = wsme.wsproperty(wtypes.text, _get_status, _set_status, - mandatory=True) - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link.""" - - def __init__(self, **kwargs): - super(Service, self).__init__() - - fields = list(objects.Service.fields.keys()) + ['status'] - self.fields = [] - for field in fields: - self.fields.append(field) - setattr(self, field, kwargs.get( - field if field != 'status' else 'id', wtypes.Unset)) - - @staticmethod - def _convert_with_links(service, url, expand=True): - if not expand: - service.unset_fields_except( - ['id', 'name', 'host', 'status']) - - service.links = [ - link.Link.make_link('self', url, 'services', str(service.id)), - link.Link.make_link('bookmark', url, 'services', str(service.id), - bookmark=True)] - return service - - @classmethod - def convert_with_links(cls, service, expand=True): - service = Service(**service.as_dict()) - return cls._convert_with_links( - service, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(id=1, - name='watcher-applier', - host='Controller', - last_seen_up=datetime.datetime(2016, 1, 1)) - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class ServiceCollection(collection.Collection): - """API representation of a collection of services.""" - - services = [Service] - """A list containing services objects""" - - def __init__(self, **kwargs): - super(ServiceCollection, self).__init__() - self._type = 'services' - - @staticmethod - def convert_with_links(services, limit, url=None, expand=False, - **kwargs): - service_collection = ServiceCollection() - service_collection.services = [ - Service.convert_with_links(g, expand) for g in services] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'service': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - service_collection.services = sorted( - service_collection.services, - key=lambda service: service.id, - reverse=reverse) - - service_collection.next = service_collection.get_next( - limit, url=url, marker_field='id', **kwargs) - return service_collection - - @classmethod - def sample(cls): - sample = cls() - sample.services = [Service.sample(expand=False)] - return sample - - -class ServicesController(rest.RestController): - """REST controller for Services.""" - def __init__(self): - super(ServicesController, self).__init__() - - from_services = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Services.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_services_collection(self, marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - sort_db_key = (sort_key if sort_key in objects.Service.fields.keys() - else None) - - marker_obj = None - if marker: - marker_obj = objects.Service.get( - pecan.request.context, marker) - - services = objects.Service.list( - pecan.request.context, limit, marker_obj, - sort_key=sort_db_key, sort_dir=sort_dir) - - return ServiceCollection.convert_with_links( - services, limit, url=resource_url, expand=expand, - sort_key=sort_key, sort_dir=sort_dir) - - @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) - def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of services. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'service:get_all', - action='service:get_all') - - return self._get_services_collection(marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) - def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): - """Retrieve a list of services with detail. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'service:detail', - action='service:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "services": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['services', 'detail']) - - return self._get_services_collection( - marker, limit, sort_key, sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(Service, wtypes.text) - def get_one(self, service): - """Retrieve information about the given service. - - :param service: ID or name of the service. - """ - if self.from_services: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_service = api_utils.get_resource('Service', service) - policy.enforce(context, 'service:get', rpc_service, - action='service:get') - - return Service.convert_with_links(rpc_service) diff --git a/watcher/api/controllers/v1/strategy.py b/watcher/api/controllers/v1/strategy.py deleted file mode 100644 index 2c74da1..0000000 --- a/watcher/api/controllers/v1/strategy.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Strategy ` is an algorithm implementation which is -able to find a :ref:`Solution ` for a given -:ref:`Goal `. - -There may be several potential strategies which are able to achieve the same -:ref:`Goal `. This is why it is possible to configure which -specific :ref:`Strategy ` should be used for each goal. - -Some strategies may provide better optimization results but may take more time -to find an optimal :ref:`Solution `. -""" - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from watcher.api.controllers import base -from watcher.api.controllers import link -from watcher.api.controllers.v1 import collection -from watcher.api.controllers.v1 import types -from watcher.api.controllers.v1 import utils as api_utils -from watcher.common import exception -from watcher.common import policy -from watcher.common import utils as common_utils -from watcher import objects - - -class Strategy(base.APIBase): - """API representation of a strategy. - - This class enforces type checking and value constraints, and converts - between the internal object model and the API representation of a strategy. - """ - _goal_uuid = None - _goal_name = None - - def _get_goal(self, value): - if value == wtypes.Unset: - return None - goal = None - try: - if (common_utils.is_uuid_like(value) or - common_utils.is_int_like(value)): - goal = objects.Goal.get(pecan.request.context, value) - else: - goal = objects.Goal.get_by_name(pecan.request.context, value) - except exception.GoalNotFound: - pass - if goal: - self.goal_id = goal.id - return goal - - def _get_goal_uuid(self): - return self._goal_uuid - - def _set_goal_uuid(self, value): - if value and self._goal_uuid != value: - self._goal_uuid = None - goal = self._get_goal(value) - if goal: - self._goal_uuid = goal.uuid - - def _get_goal_name(self): - return self._goal_name - - def _set_goal_name(self, value): - if value and self._goal_name != value: - self._goal_name = None - goal = self._get_goal(value) - if goal: - self._goal_name = goal.name - - uuid = types.uuid - """Unique UUID for this strategy""" - - name = wtypes.text - """Name of the strategy""" - - display_name = wtypes.text - """Localized name of the strategy""" - - links = wsme.wsattr([link.Link], readonly=True) - """A list containing a self link and associated goal links""" - - goal_uuid = wsme.wsproperty(wtypes.text, _get_goal_uuid, _set_goal_uuid, - mandatory=True) - """The UUID of the goal this audit refers to""" - - goal_name = wsme.wsproperty(wtypes.text, _get_goal_name, _set_goal_name, - mandatory=False) - """The name of the goal this audit refers to""" - - parameters_spec = {wtypes.text: types.jsontype} - """Parameters spec dict""" - - def __init__(self, **kwargs): - super(Strategy, self).__init__() - - self.fields = [] - self.fields.append('uuid') - self.fields.append('name') - self.fields.append('display_name') - self.fields.append('goal_uuid') - self.fields.append('goal_name') - self.fields.append('parameters_spec') - setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) - setattr(self, 'name', kwargs.get('name', wtypes.Unset)) - setattr(self, 'display_name', kwargs.get('display_name', wtypes.Unset)) - setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) - setattr(self, 'parameters_spec', kwargs.get('parameters_spec', - wtypes.Unset)) - - @staticmethod - def _convert_with_links(strategy, url, expand=True): - if not expand: - strategy.unset_fields_except( - ['uuid', 'name', 'display_name', 'goal_uuid', 'goal_name']) - - strategy.links = [ - link.Link.make_link('self', url, 'strategies', strategy.uuid), - link.Link.make_link('bookmark', url, 'strategies', strategy.uuid, - bookmark=True)] - return strategy - - @classmethod - def convert_with_links(cls, strategy, expand=True): - strategy = Strategy(**strategy.as_dict()) - return cls._convert_with_links( - strategy, pecan.request.host_url, expand) - - @classmethod - def sample(cls, expand=True): - sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', - name='DUMMY', - display_name='Dummy strategy') - return cls._convert_with_links(sample, 'http://localhost:9322', expand) - - -class StrategyCollection(collection.Collection): - """API representation of a collection of strategies.""" - - strategies = [Strategy] - """A list containing strategies objects""" - - def __init__(self, **kwargs): - super(StrategyCollection, self).__init__() - self._type = 'strategies' - - @staticmethod - def convert_with_links(strategies, limit, url=None, expand=False, - **kwargs): - strategy_collection = StrategyCollection() - strategy_collection.strategies = [ - Strategy.convert_with_links(g, expand) for g in strategies] - - if 'sort_key' in kwargs: - reverse = False - if kwargs['sort_key'] == 'strategy': - if 'sort_dir' in kwargs: - reverse = True if kwargs['sort_dir'] == 'desc' else False - strategy_collection.strategies = sorted( - strategy_collection.strategies, - key=lambda strategy: strategy.uuid, - reverse=reverse) - - strategy_collection.next = strategy_collection.get_next( - limit, url=url, **kwargs) - return strategy_collection - - @classmethod - def sample(cls): - sample = cls() - sample.strategies = [Strategy.sample(expand=False)] - return sample - - -class StrategiesController(rest.RestController): - """REST controller for Strategies.""" - def __init__(self): - super(StrategiesController, self).__init__() - - from_strategies = False - """A flag to indicate if the requests to this controller are coming - from the top-level resource Strategies.""" - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_strategies_collection(self, filters, marker, limit, sort_key, - sort_dir, expand=False, resource_url=None): - api_utils.validate_search_filters( - filters, list(objects.strategy.Strategy.fields.keys()) + - ["goal_uuid", "goal_name"]) - limit = api_utils.validate_limit(limit) - api_utils.validate_sort_dir(sort_dir) - - sort_db_key = (sort_key if sort_key in objects.Strategy.fields.keys() - else None) - - marker_obj = None - if marker: - marker_obj = objects.Strategy.get_by_uuid( - pecan.request.context, marker) - - strategies = objects.Strategy.list( - pecan.request.context, limit, marker_obj, filters=filters, - sort_key=sort_db_key, sort_dir=sort_dir) - - return StrategyCollection.convert_with_links( - strategies, limit, url=resource_url, expand=expand, - sort_key=sort_key, sort_dir=sort_dir) - - @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, - int, wtypes.text, wtypes.text) - def get_all(self, goal=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of strategies. - - :param goal: goal UUID or name to filter by. - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'strategy:get_all', - action='strategy:get_all') - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - return self._get_strategies_collection( - filters, marker, limit, sort_key, sort_dir) - - @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int, - wtypes.text, wtypes.text) - def detail(self, goal=None, marker=None, limit=None, - sort_key='id', sort_dir='asc'): - """Retrieve a list of strategies with detail. - - :param goal: goal UUID or name to filter by. - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - """ - context = pecan.request.context - policy.enforce(context, 'strategy:detail', - action='strategy:detail') - # NOTE(lucasagomes): /detail should only work agaist collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "strategies": - raise exception.HTTPNotFound - expand = True - resource_url = '/'.join(['strategies', 'detail']) - - filters = {} - if goal: - if common_utils.is_uuid_like(goal): - filters['goal_uuid'] = goal - else: - filters['goal_name'] = goal - - return self._get_strategies_collection( - filters, marker, limit, sort_key, sort_dir, expand, resource_url) - - @wsme_pecan.wsexpose(Strategy, wtypes.text) - def get_one(self, strategy): - """Retrieve information about the given strategy. - - :param strategy: UUID or name of the strategy. - """ - if self.from_strategies: - raise exception.OperationNotPermitted - - context = pecan.request.context - rpc_strategy = api_utils.get_resource('Strategy', strategy) - policy.enforce(context, 'strategy:get', rpc_strategy, - action='strategy:get') - - return Strategy.convert_with_links(rpc_strategy) diff --git a/watcher/api/controllers/v1/types.py b/watcher/api/controllers/v1/types.py deleted file mode 100644 index 77d41b6..0000000 --- a/watcher/api/controllers/v1/types.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils -from oslo_utils import strutils -import six -import wsme -from wsme import types as wtypes - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils - - -class UuidOrNameType(wtypes.UserType): - """A simple UUID or logical name type.""" - - basetype = wtypes.text - name = 'uuid_or_name' - - @staticmethod - def validate(value): - if not (utils.is_uuid_like(value) or utils.is_hostname_safe(value)): - raise exception.InvalidUuidOrName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidOrNameType.validate(value) - - -class IntervalOrCron(wtypes.UserType): - """A simple int value or cron syntax type""" - - basetype = wtypes.text - name = 'interval_or_cron' - - @staticmethod - def validate(value): - if not (utils.is_int_like(value) or utils.is_cron_like(value)): - raise exception.InvalidIntervalOrCron(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return IntervalOrCron.validate(value) - - -interval_or_cron = IntervalOrCron() - - -class NameType(wtypes.UserType): - """A simple logical name type.""" - - basetype = wtypes.text - name = 'name' - - @staticmethod - def validate(value): - if not utils.is_hostname_safe(value): - raise exception.InvalidName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return NameType.validate(value) - - -class UuidType(wtypes.UserType): - """A simple UUID type.""" - - basetype = wtypes.text - name = 'uuid' - - @staticmethod - def validate(value): - if not utils.is_uuid_like(value): - raise exception.InvalidUUID(uuid=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidType.validate(value) - - -class BooleanType(wtypes.UserType): - """A simple boolean type.""" - - basetype = wtypes.text - name = 'boolean' - - @staticmethod - def validate(value): - try: - return strutils.bool_from_string(value, strict=True) - except ValueError as e: - # raise Invalid to return 400 (BadRequest) in the API - raise exception.Invalid(e) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return BooleanType.validate(value) - - -class JsonType(wtypes.UserType): - """A simple JSON type.""" - - basetype = wtypes.text - name = 'json' - - def __str__(self): - # These are the json serializable native types - return ' | '.join(map(str, (wtypes.text, six.integer_types, float, - BooleanType, list, dict, None))) - - @staticmethod - def validate(value): - try: - jsonutils.dumps(value, default=None) - except TypeError: - raise exception.Invalid(_('%s is not JSON serializable') % value) - else: - return value - - @staticmethod - def frombasetype(value): - return JsonType.validate(value) - - -uuid = UuidType() -boolean = BooleanType() -jsontype = JsonType() - - -class MultiType(wtypes.UserType): - """A complex type that represents one or more types. - - Used for validating that a value is an instance of one of the types. - - :param types: Variable-length list of types. - - """ - def __init__(self, *types): - self.types = types - - def __str__(self): - return ' | '.join(map(str, self.types)) - - def validate(self, value): - for t in self.types: - if t is wsme.types.text and isinstance(value, wsme.types.bytes): - value = value.decode() - if isinstance(value, t): - return value - else: - raise ValueError( - _("Wrong type. Expected '%(type)s', got '%(value)s'"), - type=self.types, value=type(value) - ) - - -class JsonPatchType(wtypes.Base): - """A complex type that represents a single json-patch operation.""" - - path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), - mandatory=True) - op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), - mandatory=True) - value = wsme.wsattr(jsontype, default=wtypes.Unset) - - @staticmethod - def internal_attrs(): - """Returns a list of internal attributes. - - Internal attributes can't be added, replaced or removed. This - method may be overwritten by derived class. - - """ - return ['/created_at', '/id', '/links', '/updated_at', - '/deleted_at', '/uuid'] - - @staticmethod - def mandatory_attrs(): - """Returns a list of mandatory attributes. - - Mandatory attributes can't be removed from the document. This - method should be overwritten by derived class. - - """ - return [] - - @staticmethod - def validate(patch): - _path = '/{0}'.format(patch.path.split('/')[1]) - if _path in patch.internal_attrs(): - msg = _("'%s' is an internal attribute and can not be updated") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.path in patch.mandatory_attrs() and patch.op == 'remove': - msg = _("'%s' is a mandatory attribute and can not be removed") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.op != 'remove': - if patch.value is wsme.Unset: - msg = _("'add' and 'replace' operations needs value") - raise wsme.exc.ClientSideError(msg) - - ret = {'path': patch.path, 'op': patch.op} - if patch.value is not wsme.Unset: - ret['value'] = patch.value - return ret diff --git a/watcher/api/controllers/v1/utils.py b/watcher/api/controllers/v1/utils.py deleted file mode 100644 index 2ad5b49..0000000 --- a/watcher/api/controllers/v1/utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonpatch -from oslo_config import cfg -from oslo_utils import reflection -from oslo_utils import uuidutils -import pecan -import wsme - -from watcher._i18n import _ -from watcher.common import utils -from watcher import objects - -CONF = cfg.CONF - - -JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, - jsonpatch.JsonPointerException, - KeyError) - - -def validate_limit(limit): - if limit is None: - return CONF.api.max_limit - - if limit <= 0: - # Case where we don't a valid limit value - raise wsme.exc.ClientSideError(_("Limit must be positive")) - - if limit and not CONF.api.max_limit: - # Case where we don't have an upper limit - return limit - - return min(CONF.api.max_limit, limit) - - -def validate_sort_dir(sort_dir): - if sort_dir not in ['asc', 'desc']: - raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " - "Acceptable values are " - "'asc' or 'desc'") % sort_dir) - - -def validate_search_filters(filters, allowed_fields): - # Very lightweight validation for now - # todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries) - for filter_name in filters.keys(): - if filter_name not in allowed_fields: - raise wsme.exc.ClientSideError( - _("Invalid filter: %s") % filter_name) - - -def apply_jsonpatch(doc, patch): - for p in patch: - if p['op'] == 'add' and p['path'].count('/') == 1: - if p['path'].lstrip('/') not in doc: - msg = _('Adding a new attribute (%s) to the root of ' - ' the resource is not allowed') - raise wsme.exc.ClientSideError(msg % p['path']) - return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) - - -def get_patch_value(patch, key): - for p in patch: - if p['op'] == 'replace' and p['path'] == '/%s' % key: - return p['value'] - - -def check_audit_state_transition(patch, initial): - is_transition_valid = True - state_value = get_patch_value(patch, "state") - if state_value is not None: - is_transition_valid = objects.audit.AuditStateTransitionManager( - ).check_transition(initial, state_value) - return is_transition_valid - - -def as_filters_dict(**filters): - filters_dict = {} - for filter_name, filter_value in filters.items(): - if filter_value: - filters_dict[filter_name] = filter_value - - return filters_dict - - -def get_resource(resource, resource_id, eager=False): - """Get the resource from the uuid, id or logical name. - - :param resource: the resource type. - :param resource_id: the UUID, ID or logical name of the resource. - - :returns: The resource. - """ - resource = getattr(objects, resource) - - _get = None - if utils.is_int_like(resource_id): - resource_id = int(resource_id) - _get = resource.get - elif uuidutils.is_uuid_like(resource_id): - _get = resource.get_by_uuid - else: - _get = resource.get_by_name - - method_signature = reflection.get_signature(_get) - if 'eager' in method_signature.parameters: - return _get(pecan.request.context, resource_id, eager=eager) - - return _get(pecan.request.context, resource_id) diff --git a/watcher/api/hooks.py b/watcher/api/hooks.py deleted file mode 100644 index 8147e39..0000000 --- a/watcher/api/hooks.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_config import cfg -from oslo_utils import importutils -from pecan import hooks -from six.moves import http_client - -from watcher.common import context - - -class ContextHook(hooks.PecanHook): - """Configures a request context and attaches it to the request. - - The following HTTP request headers are used: - - X-User: - Used for context.user. - - X-User-Id: - Used for context.user_id. - - X-Project-Name: - Used for context.project. - - X-Project-Id: - Used for context.project_id. - - X-Auth-Token: - Used for context.auth_token. - - """ - - def before(self, state): - headers = state.request.headers - user = headers.get('X-User') - user_id = headers.get('X-User-Id') - project = headers.get('X-Project-Name') - project_id = headers.get('X-Project-Id') - domain_id = headers.get('X-User-Domain-Id') - domain_name = headers.get('X-User-Domain-Name') - auth_token = headers.get('X-Storage-Token') - auth_token = headers.get('X-Auth-Token', auth_token) - show_deleted = headers.get('X-Show-Deleted') - auth_token_info = state.request.environ.get('keystone.token_info') - roles = (headers.get('X-Roles', None) and - headers.get('X-Roles').split(',')) - - auth_url = headers.get('X-Auth-Url') - if auth_url is None: - importutils.import_module('keystonemiddleware.auth_token') - auth_url = cfg.CONF.keystone_authtoken.auth_uri - - state.request.context = context.make_context( - auth_token=auth_token, - auth_url=auth_url, - auth_token_info=auth_token_info, - user=user, - user_id=user_id, - project=project, - project_id=project_id, - domain_id=domain_id, - domain_name=domain_name, - show_deleted=show_deleted, - roles=roles) - - -class NoExceptionTracebackHook(hooks.PecanHook): - """Workaround rpc.common: deserialize_remote_exception. - - deserialize_remote_exception builds rpc exception traceback into error - message which is then sent to the client. Such behavior is a security - concern so this hook is aimed to cut-off traceback from the error message. - """ - # NOTE(max_lobur): 'after' hook used instead of 'on_error' because - # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator - # catches and handles all the errors, so 'on_error' dedicated for unhandled - # exceptions never fired. - def after(self, state): - # Omit empty body. Some errors may not have body at this level yet. - if not state.response.body: - return - - # Do nothing if there is no error. - # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not - # an error. - if (http_client.OK <= state.response.status_int < - http_client.BAD_REQUEST): - return - - json_body = state.response.json - # Do not remove traceback when traceback config is set - if cfg.CONF.debug: - return - - faultstring = json_body.get('faultstring') - traceback_marker = 'Traceback (most recent call last):' - if faultstring and traceback_marker in faultstring: - # Cut-off traceback. - faultstring = faultstring.split(traceback_marker, 1)[0] - # Remove trailing newlines and spaces if any. - json_body['faultstring'] = faultstring.rstrip() - # Replace the whole json. Cannot change original one because it's - # generated on the fly. - state.response.json = json_body diff --git a/watcher/api/middleware/__init__.py b/watcher/api/middleware/__init__.py deleted file mode 100644 index 6141cb9..0000000 --- a/watcher/api/middleware/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from watcher.api.middleware import auth_token -from watcher.api.middleware import parsable_error - - -ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware -AuthTokenMiddleware = auth_token.AuthTokenMiddleware - -__all__ = (ParsableErrorMiddleware, - AuthTokenMiddleware) diff --git a/watcher/api/middleware/auth_token.py b/watcher/api/middleware/auth_token.py deleted file mode 100644 index 585d495..0000000 --- a/watcher/api/middleware/auth_token.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from oslo_log import log - -from keystonemiddleware import auth_token - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils - -LOG = log.getLogger(__name__) - - -class AuthTokenMiddleware(auth_token.AuthProtocol): - """A wrapper on Keystone auth_token middleware. - - Does not perform verification of authentication tokens - for public routes in the API. - - """ - def __init__(self, app, conf, public_api_routes=()): - route_pattern_tpl = '%s(\.json|\.xml)?$' - - try: - self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) - for route_tpl in public_api_routes] - except re.error as e: - LOG.exception(e) - raise exception.ConfigInvalid( - error_msg=_('Cannot compile public API routes')) - - super(AuthTokenMiddleware, self).__init__(app, conf) - - def __call__(self, env, start_response): - path = utils.safe_rstrip(env.get('PATH_INFO'), '/') - - # The information whether the API call is being performed against the - # public API is required for some other components. Saving it to the - # WSGI environment is reasonable thereby. - env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), - self.public_api_routes)) - - if env['is_public_api']: - return self._app(env, start_response) - - return super(AuthTokenMiddleware, self).__call__(env, start_response) diff --git a/watcher/api/middleware/parsable_error.py b/watcher/api/middleware/parsable_error.py deleted file mode 100644 index 9d905ab..0000000 --- a/watcher/api/middleware/parsable_error.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Middleware to replace the plain text message body of an error -response with one formatted so the client can parse it. - -Based on pecan.middleware.errordocument -""" - -from xml import etree as et - -from oslo_log import log -from oslo_serialization import jsonutils -import six -import webob - -from watcher._i18n import _ - -LOG = log.getLogger(__name__) - - -class ParsableErrorMiddleware(object): - """Replace error body with something the client can parse.""" - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - # Request for this state, modified by replace_start_response() - # and used when an error is being reported. - state = {} - - def replacement_start_response(status, headers, exc_info=None): - """Overrides the default response to make errors parsable.""" - try: - status_code = int(status.split(' ')[0]) - state['status_code'] = status_code - except (ValueError, TypeError): # pragma: nocover - raise Exception(_( - 'ErrorDocumentMiddleware received an invalid ' - 'status %s') % status) - else: - if (state['status_code'] // 100) not in (2, 3): - # Remove some headers so we can replace them later - # when we have the full error message and can - # compute the length. - headers = [(h, v) - for (h, v) in headers - if h not in ('Content-Length', 'Content-Type')] - # Save the headers in case we need to modify them. - state['headers'] = headers - return start_response(status, headers, exc_info) - - app_iter = self.app(environ, replacement_start_response) - if (state['status_code'] // 100) not in (2, 3): - req = webob.Request(environ) - if ( - req.accept.best_match( - ['application/json', - 'application/xml']) == 'application/xml' - ): - try: - # simple check xml is valid - body = [ - et.ElementTree.tostring( - et.ElementTree.Element( - 'error_message', text='\n'.join(app_iter)))] - except et.ElementTree.ParseError as err: - LOG.error('Error parsing HTTP response: %s', err) - body = ['%s' - '' % state['status_code']] - state['headers'].append(('Content-Type', 'application/xml')) - else: - if six.PY3: - app_iter = [i.decode('utf-8') for i in app_iter] - body = [jsonutils.dumps( - {'error_message': '\n'.join(app_iter)})] - if six.PY3: - body = [item.encode('utf-8') for item in body] - state['headers'].append(('Content-Type', 'application/json')) - state['headers'].append(('Content-Length', str(len(body[0])))) - else: - body = app_iter - return body diff --git a/watcher/api/scheduling.py b/watcher/api/scheduling.py deleted file mode 100644 index 4a2b053..0000000 --- a/watcher/api/scheduling.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import datetime -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import six - -from watcher.common import context as watcher_context -from watcher.common import scheduling -from watcher import notifications - -from watcher import objects - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class APISchedulingService(scheduling.BackgroundSchedulerService): - - def __init__(self, gconfig=None, **options): - self.services_status = {} - gconfig = None or {} - super(APISchedulingService, self).__init__(gconfig, **options) - - def get_services_status(self, context): - services = objects.service.Service.list(context) - for service in services: - result = self.get_service_status(context, service.id) - if service.id not in self.services_status.keys(): - self.services_status[service.id] = result - continue - if self.services_status[service.id] != result: - self.services_status[service.id] = result - notifications.service.send_service_update(context, service, - state=result) - - def get_service_status(self, context, service_id): - service = objects.Service.get(context, service_id) - last_heartbeat = (service.last_seen_up or service.updated_at - or service.created_at) - if isinstance(last_heartbeat, six.string_types): - # NOTE(russellb) If this service came in over rpc via - # conductor, then the timestamp will be a string and needs to be - # converted back to a datetime. - last_heartbeat = timeutils.parse_strtime(last_heartbeat) - else: - # Objects have proper UTC timezones, but the timeutils comparison - # below does not (and will fail) - last_heartbeat = last_heartbeat.replace(tzinfo=None) - elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) - is_up = abs(elapsed) <= CONF.service_down_time - if not is_up: - LOG.warning('Seems service %(name)s on host %(host)s is down. ' - 'Last heartbeat was %(lhb)s. Elapsed time is %(el)s', - {'name': service.name, - 'host': service.host, - 'lhb': str(last_heartbeat), 'el': str(elapsed)}) - return objects.service.ServiceStatus.FAILED - - return objects.service.ServiceStatus.ACTIVE - - def start(self): - """Start service.""" - context = watcher_context.make_context(is_admin=True) - self.add_job(self.get_services_status, name='service_status', - trigger='interval', jobstore='default', args=[context], - next_run_time=datetime.datetime.now(), seconds=60) - super(APISchedulingService, self).start() - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ diff --git a/watcher/applier/__init__.py b/watcher/applier/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/action_plan/__init__.py b/watcher/applier/action_plan/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/action_plan/base.py b/watcher/applier/action_plan/base.py deleted file mode 100644 index dbd40a6..0000000 --- a/watcher/applier/action_plan/base.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseActionPlanHandler(object): - @abc.abstractmethod - def execute(self): - raise NotImplementedError() diff --git a/watcher/applier/action_plan/default.py b/watcher/applier/action_plan/default.py deleted file mode 100644 index a63221e..0000000 --- a/watcher/applier/action_plan/default.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher.applier.action_plan import base -from watcher.applier import default -from watcher.common import exception -from watcher import notifications -from watcher import objects -from watcher.objects import fields - -LOG = log.getLogger(__name__) - - -class DefaultActionPlanHandler(base.BaseActionPlanHandler): - - def __init__(self, context, service, action_plan_uuid): - super(DefaultActionPlanHandler, self).__init__() - self.ctx = context - self.service = service - self.action_plan_uuid = action_plan_uuid - - def execute(self): - try: - action_plan = objects.ActionPlan.get_by_uuid( - self.ctx, self.action_plan_uuid, eager=True) - if action_plan.state == objects.action_plan.State.CANCELLED: - self._update_action_from_pending_to_cancelled() - return - action_plan.state = objects.action_plan.State.ONGOING - action_plan.save() - notifications.action_plan.send_action_notification( - self.ctx, action_plan, - action=fields.NotificationAction.EXECUTION, - phase=fields.NotificationPhase.START) - - applier = default.DefaultApplier(self.ctx, self.service) - applier.execute(self.action_plan_uuid) - - action_plan.state = objects.action_plan.State.SUCCEEDED - notifications.action_plan.send_action_notification( - self.ctx, action_plan, - action=fields.NotificationAction.EXECUTION, - phase=fields.NotificationPhase.END) - - except exception.ActionPlanCancelled as e: - LOG.exception(e) - action_plan.state = objects.action_plan.State.CANCELLED - self._update_action_from_pending_to_cancelled() - - except Exception as e: - LOG.exception(e) - action_plan.state = objects.action_plan.State.FAILED - notifications.action_plan.send_action_notification( - self.ctx, action_plan, - action=fields.NotificationAction.EXECUTION, - priority=fields.NotificationPriority.ERROR, - phase=fields.NotificationPhase.ERROR) - finally: - action_plan.save() - - def _update_action_from_pending_to_cancelled(self): - filters = {'action_plan_uuid': self.action_plan_uuid, - 'state': objects.action.State.PENDING} - actions = objects.Action.list(self.ctx, filters=filters, eager=True) - if actions: - for a in actions: - a.state = objects.action.State.CANCELLED - a.save() diff --git a/watcher/applier/actions/__init__.py b/watcher/applier/actions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/actions/base.py b/watcher/applier/actions/base.py deleted file mode 100644 index ec1cb5d..0000000 --- a/watcher/applier/actions/base.py +++ /dev/null @@ -1,152 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import jsonschema -import six - -from watcher.common import clients -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class BaseAction(loadable.Loadable): - # NOTE(jed): by convention we decided - # that the attribute "resource_id" is the unique id of - # the resource to which the Action applies to allow us to use it in the - # watcher dashboard and will be nested in input_parameters - RESOURCE_ID = 'resource_id' - - # Add action class name to the list, if implementing abort. - ABORT_TRUE = ['Sleep', 'Nop'] - - def __init__(self, config, osc=None): - """Constructor - - :param config: A mapping containing the configuration of this action - :type config: dict - :param osc: an OpenStackClients instance, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - super(BaseAction, self).__init__(config) - self._input_parameters = {} - self._osc = osc - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @property - def input_parameters(self): - return self._input_parameters - - @input_parameters.setter - def input_parameters(self, p): - self._input_parameters = p - - @property - def resource_id(self): - return self.input_parameters[self.RESOURCE_ID] - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def execute(self): - """Executes the main logic of the action - - This method can be used to perform an action on a given set of input - parameters to accomplish some type of operation. This operation may - return a boolean value as a result of its execution. If False, this - will be considered as an error and will then trigger the reverting of - the actions. - - :returns: A flag indicating whether or not the action succeeded - :rtype: bool - """ - raise NotImplementedError() - - @abc.abstractmethod - def revert(self): - """Revert this action - - This method should rollback the resource to its initial state in the - event of a faulty execution. This happens when the action raised an - exception during its :py:meth:`~.BaseAction.execute`. - """ - raise NotImplementedError() - - @abc.abstractmethod - def pre_condition(self): - """Hook: called before the execution of an action - - This method can be used to perform some initializations or to make - some more advanced validation on its input parameters. So if you wish - to block its execution based on this factor, `raise` the related - exception. - """ - raise NotImplementedError() - - @abc.abstractmethod - def post_condition(self): - """Hook: called after the execution of an action - - This function is called regardless of whether an action succeeded or - not. So you can use it to perform cleanup operations. - """ - raise NotImplementedError() - - @abc.abstractproperty - def schema(self): - """Defines a Schema that the input parameters shall comply to - - :returns: A schema declaring the input parameters this action should be - provided along with their respective constraints - :rtype: :py:class:`voluptuous.Schema` instance - """ - raise NotImplementedError() - - def validate_parameters(self): - try: - jsonschema.validate(self.input_parameters, self.schema) - return True - except jsonschema.ValidationError as e: - raise e - - @abc.abstractmethod - def get_description(self): - """Description of the action""" - raise NotImplementedError() - - def check_abort(self): - if self.__class__.__name__ is 'Migrate': - if self.migration_type == self.LIVE_MIGRATION: - return True - else: - return False - else: - return bool(self.__class__.__name__ in self.ABORT_TRUE) diff --git a/watcher/applier/actions/change_node_power_state.py b/watcher/applier/actions/change_node_power_state.py deleted file mode 100644 index 1a085e9..0000000 --- a/watcher/applier/actions/change_node_power_state.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 ZTE -# -# Authors: Li Canwei -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import enum - -from watcher._i18n import _ -from watcher.applier.actions import base -from watcher.common import exception - - -class NodeState(enum.Enum): - POWERON = 'on' - POWEROFF = 'off' - - -class ChangeNodePowerState(base.BaseAction): - """Compute node power on/off - - By using this action, you will be able to on/off the power of a - compute node. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, - 'state': str, - }) - - The `resource_id` references a ironic node id (list of available - ironic node is returned by this command: ``ironic node-list``). - The `state` value should either be `on` or `off`. - """ - - STATE = 'state' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'resource_id': { - 'type': 'string', - "minlength": 1 - }, - 'state': { - 'type': 'string', - 'enum': [NodeState.POWERON.value, - NodeState.POWEROFF.value] - } - }, - 'required': ['resource_id', 'state'], - 'additionalProperties': False, - } - - @property - def node_uuid(self): - return self.resource_id - - @property - def state(self): - return self.input_parameters.get(self.STATE) - - def execute(self): - target_state = self.state - return self._node_manage_power(target_state) - - def revert(self): - if self.state == NodeState.POWERON.value: - target_state = NodeState.POWEROFF.value - elif self.state == NodeState.POWEROFF.value: - target_state = NodeState.POWERON.value - return self._node_manage_power(target_state) - - def _node_manage_power(self, state): - if state is None: - raise exception.IllegalArgumentException( - message=_("The target state is not defined")) - - result = False - ironic_client = self.osc.ironic() - nova_client = self.osc.nova() - if state == NodeState.POWEROFF.value: - node_info = ironic_client.node.get(self.node_uuid).to_dict() - compute_node_id = node_info['extra']['compute_node_id'] - compute_node = nova_client.hypervisors.get(compute_node_id) - compute_node = compute_node.to_dict() - if (compute_node['running_vms'] == 0): - result = ironic_client.node.set_power_state( - self.node_uuid, state) - else: - result = ironic_client.node.set_power_state(self.node_uuid, state) - return result - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return ("Compute node power on/off through ironic.") diff --git a/watcher/applier/actions/change_nova_service_state.py b/watcher/applier/actions/change_nova_service_state.py deleted file mode 100644 index a2d9792..0000000 --- a/watcher/applier/actions/change_nova_service_state.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from watcher._i18n import _ -from watcher.applier.actions import base -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.model import element - - -class ChangeNovaServiceState(base.BaseAction): - """Disables or enables the nova-compute service, deployed on a host - - By using this action, you will be able to update the state of a - nova-compute service. A disabled nova-compute service can not be selected - by the nova scheduler for future deployment of server. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, - 'state': str, - }) - - The `resource_id` references a nova-compute service name (list of available - nova-compute services is returned by this command: ``nova service-list - --binary nova-compute``). - The `state` value should either be `ONLINE` or `OFFLINE`. - """ - - STATE = 'state' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'resource_id': { - 'type': 'string', - "minlength": 1 - }, - 'state': { - 'type': 'string', - 'enum': [element.ServiceState.ONLINE.value, - element.ServiceState.OFFLINE.value, - element.ServiceState.ENABLED.value, - element.ServiceState.DISABLED.value] - } - }, - 'required': ['resource_id', 'state'], - 'additionalProperties': False, - } - - @property - def host(self): - return self.resource_id - - @property - def state(self): - return self.input_parameters.get(self.STATE) - - def execute(self): - target_state = None - if self.state == element.ServiceState.DISABLED.value: - target_state = False - elif self.state == element.ServiceState.ENABLED.value: - target_state = True - return self._nova_manage_service(target_state) - - def revert(self): - target_state = None - if self.state == element.ServiceState.DISABLED.value: - target_state = True - elif self.state == element.ServiceState.ENABLED.value: - target_state = False - return self._nova_manage_service(target_state) - - def _nova_manage_service(self, state): - if state is None: - raise exception.IllegalArgumentException( - message=_("The target state is not defined")) - - nova = nova_helper.NovaHelper(osc=self.osc) - if state is True: - return nova.enable_service_nova_compute(self.host) - else: - return nova.disable_service_nova_compute(self.host) - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return ("Disables or enables the nova-compute service." - "A disabled nova-compute service can not be selected " - "by the nova for future deployment of new server.") diff --git a/watcher/applier/actions/factory.py b/watcher/applier/actions/factory.py deleted file mode 100644 index 037e0db..0000000 --- a/watcher/applier/actions/factory.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import unicode_literals - -from oslo_log import log - -from watcher.applier.loading import default - -LOG = log.getLogger(__name__) - - -class ActionFactory(object): - def __init__(self): - self.action_loader = default.DefaultActionLoader() - - def make_action(self, object_action, osc=None): - LOG.debug("Creating instance of %s", object_action.action_type) - loaded_action = self.action_loader.load(name=object_action.action_type, - osc=osc) - loaded_action.input_parameters = object_action.input_parameters - LOG.debug("Checking the input parameters") - # NOTE(jed) if we change the schema of an action and we try to reload - # an older version of the Action, the validation can fail. - # We need to add the versioning of an Action or a migration tool. - # We can also create an new Action which extends the previous one. - loaded_action.validate_parameters() - return loaded_action diff --git a/watcher/applier/actions/migration.py b/watcher/applier/actions/migration.py deleted file mode 100644 index 9763c3e..0000000 --- a/watcher/applier/actions/migration.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from oslo_log import log -from watcher._i18n import _ -from watcher.applier.actions import base -from watcher.common import exception -from watcher.common import nova_helper - -LOG = log.getLogger(__name__) - - -class Migrate(base.BaseAction): - """Migrates a server to a destination nova-compute host - - This action will allow you to migrate a server to another compute - destination host. - Migration type 'live' can only be used for migrating active VMs. - Migration type 'cold' can be used for migrating non-active VMs - as well active VMs, which will be shut down while migrating. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, # should be a UUID - 'migration_type': str, # choices -> "live", "cold" - 'destination_node': str, - 'source_node': str, - }) - - The `resource_id` is the UUID of the server to migrate. - The `source_node` and `destination_node` parameters are respectively the - source and the destination compute hostname (list of available compute - hosts is returned by this command: ``nova service-list --binary - nova-compute``). - """ - - # input parameters constants - MIGRATION_TYPE = 'migration_type' - LIVE_MIGRATION = 'live' - COLD_MIGRATION = 'cold' - DESTINATION_NODE = 'destination_node' - SOURCE_NODE = 'source_node' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'destination_node': { - "anyof": [ - {'type': 'string', "minLength": 1}, - {'type': 'None'} - ] - }, - 'migration_type': { - 'type': 'string', - "enum": ["live", "cold"] - }, - 'resource_id': { - 'type': 'string', - "minlength": 1, - "pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-" - "([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-" - "([a-fA-F0-9]){12}$") - }, - 'source_node': { - 'type': 'string', - "minLength": 1 - } - }, - 'required': ['migration_type', 'resource_id', 'source_node'], - 'additionalProperties': False, - } - - @property - def instance_uuid(self): - return self.resource_id - - @property - def migration_type(self): - return self.input_parameters.get(self.MIGRATION_TYPE) - - @property - def destination_node(self): - return self.input_parameters.get(self.DESTINATION_NODE) - - @property - def source_node(self): - return self.input_parameters.get(self.SOURCE_NODE) - - def _live_migrate_instance(self, nova, destination): - result = None - try: - result = nova.live_migrate_instance(instance_id=self.instance_uuid, - dest_hostname=destination) - except nova_helper.nvexceptions.ClientException as e: - if e.code == 400: - LOG.debug("Live migration of instance %s failed. " - "Trying to live migrate using block migration." - % self.instance_uuid) - result = nova.live_migrate_instance( - instance_id=self.instance_uuid, - dest_hostname=destination, - block_migration=True) - else: - LOG.debug("Nova client exception occurred while live " - "migrating instance %s.Exception: %s" % - (self.instance_uuid, e)) - except Exception: - LOG.critical("Unexpected error occurred. Migration failed for " - "instance %s. Leaving instance on previous " - "host.", self.instance_uuid) - - return result - - def _cold_migrate_instance(self, nova, destination): - result = None - try: - result = nova.watcher_non_live_migrate_instance( - instance_id=self.instance_uuid, - dest_hostname=destination) - except Exception as exc: - LOG.exception(exc) - LOG.critical("Unexpected error occurred. Migration failed for " - "instance %s. Leaving instance on previous " - "host.", self.instance_uuid) - return result - - def _abort_cold_migrate(self, nova): - # TODO(adisky): currently watcher uses its own version of cold migrate - # implement cold migrate using nova dependent on the blueprint - # https://blueprints.launchpad.net/nova/+spec/cold-migration-with-target - # Abort operation for cold migrate is dependent on blueprint - # https://blueprints.launchpad.net/nova/+spec/abort-cold-migration - LOG.warning("Abort operation for cold migration is not implemented") - - def _abort_live_migrate(self, nova, source, destination): - return nova.abort_live_migrate(instance_id=self.instance_uuid, - source=source, destination=destination) - - def migrate(self, destination=None): - nova = nova_helper.NovaHelper(osc=self.osc) - if destination is None: - LOG.debug("Migrating instance %s, destination node will be " - "determined by nova-scheduler", self.instance_uuid) - else: - LOG.debug("Migrate instance %s to %s", self.instance_uuid, - destination) - instance = nova.find_instance(self.instance_uuid) - if instance: - if self.migration_type == self.LIVE_MIGRATION: - return self._live_migrate_instance(nova, destination) - elif self.migration_type == self.COLD_MIGRATION: - return self._cold_migrate_instance(nova, destination) - else: - raise exception.Invalid( - message=(_("Migration of type '%(migration_type)s' is not " - "supported.") % - {'migration_type': self.migration_type})) - else: - raise exception.InstanceNotFound(name=self.instance_uuid) - - def execute(self): - return self.migrate(destination=self.destination_node) - - def revert(self): - return self.migrate(destination=self.source_node) - - def abort(self): - nova = nova_helper.NovaHelper(osc=self.osc) - instance = nova.find_instance(self.instance_uuid) - if instance: - if self.migration_type == self.COLD_MIGRATION: - return self._abort_cold_migrate(nova) - elif self.migration_type == self.LIVE_MIGRATION: - return self._abort_live_migrate( - nova, source=self.source_node, - destination=self.destination_node) - else: - raise exception.InstanceNotFound(name=self.instance_uuid) - - def pre_condition(self): - # TODO(jed): check if the instance exists / check if the instance is on - # the source_node - pass - - def post_condition(self): - # TODO(jed): check extra parameters (network response, etc.) - pass - - def get_description(self): - """Description of the action""" - return "Moving a VM instance from source_node to destination_node" diff --git a/watcher/applier/actions/nop.py b/watcher/applier/actions/nop.py deleted file mode 100644 index 3bd9220..0000000 --- a/watcher/applier/actions/nop.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log - -from watcher.applier.actions import base - -LOG = log.getLogger(__name__) - - -class Nop(base.BaseAction): - """logs a message - - The action schema is:: - - schema = Schema({ - 'message': str, - }) - - The `message` is the actual message that will be logged. - """ - - MESSAGE = 'message' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'message': { - 'type': ['string', 'null'] - } - }, - 'required': ['message'], - 'additionalProperties': False, - } - - @property - def message(self): - return self.input_parameters.get(self.MESSAGE) - - def execute(self): - LOG.debug("Executing action NOP message: %s ", self.message) - return True - - def revert(self): - LOG.debug("Revert action NOP") - return True - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return "Logging a NOP message" - - def abort(self): - LOG.debug("Abort action NOP") - return True diff --git a/watcher/applier/actions/resize.py b/watcher/applier/actions/resize.py deleted file mode 100644 index 561e545..0000000 --- a/watcher/applier/actions/resize.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log - -from watcher.applier.actions import base -from watcher.common import nova_helper - -LOG = log.getLogger(__name__) - - -class Resize(base.BaseAction): - """Resizes a server with specified flavor. - - This action will allow you to resize a server to another flavor. - - The action schema is:: - - schema = Schema({ - 'resource_id': str, # should be a UUID - 'flavor': str, # should be either ID or Name of Flavor - }) - - The `resource_id` is the UUID of the server to resize. - The `flavor` is the ID or Name of Flavor (Nova accepts either ID or Name - of Flavor to resize() function). - """ - - # input parameters constants - FLAVOR = 'flavor' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'resource_id': { - 'type': 'string', - 'minlength': 1, - 'pattern': ('^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-' - '([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-' - '([a-fA-F0-9]){12}$') - }, - 'flavor': { - 'type': 'string', - 'minlength': 1, - }, - }, - 'required': ['resource_id', 'flavor'], - 'additionalProperties': False, - } - - @property - def instance_uuid(self): - return self.resource_id - - @property - def flavor(self): - return self.input_parameters.get(self.FLAVOR) - - def resize(self): - nova = nova_helper.NovaHelper(osc=self.osc) - LOG.debug("Resize instance %s to %s flavor", self.instance_uuid, - self.flavor) - instance = nova.find_instance(self.instance_uuid) - result = None - if instance: - try: - result = nova.resize_instance( - instance_id=self.instance_uuid, flavor=self.flavor) - except Exception as exc: - LOG.exception(exc) - LOG.critical( - "Unexpected error occurred. Resizing failed for " - "instance %s.", self.instance_uuid) - return result - - def execute(self): - return self.resize() - - def revert(self): - return self.migrate(destination=self.source_node) - - def pre_condition(self): - # TODO(jed): check if the instance exists / check if the instance is on - # the source_node - pass - - def post_condition(self): - # TODO(jed): check extra parameters (network response, etc.) - pass - - def get_description(self): - """Description of the action""" - return "Resize a server with specified flavor." diff --git a/watcher/applier/actions/sleep.py b/watcher/applier/actions/sleep.py deleted file mode 100644 index 5865c22..0000000 --- a/watcher/applier/actions/sleep.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import time - -from oslo_log import log -from watcher.applier.actions import base - -LOG = log.getLogger(__name__) - - -class Sleep(base.BaseAction): - """Makes the executor of the action plan wait for a given duration - - The action schema is:: - - schema = Schema({ - 'duration': float, - }) - - The `duration` is expressed in seconds. - """ - - DURATION = 'duration' - - @property - def schema(self): - return { - 'type': 'object', - 'properties': { - 'duration': { - 'type': 'number', - 'minimum': 0 - }, - }, - 'required': ['duration'], - 'additionalProperties': False, - } - - @property - def duration(self): - return int(self.input_parameters.get(self.DURATION)) - - def execute(self): - LOG.debug("Starting action sleep with duration: %s ", self.duration) - time.sleep(self.duration) - return True - - def revert(self): - LOG.debug("Revert action sleep") - return True - - def pre_condition(self): - pass - - def post_condition(self): - pass - - def get_description(self): - """Description of the action""" - return "Wait for a given interval in seconds." - - def abort(self): - LOG.debug("Abort action sleep") - return True diff --git a/watcher/applier/base.py b/watcher/applier/base.py deleted file mode 100644 index daa4097..0000000 --- a/watcher/applier/base.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -This component is in charge of executing the -:ref:`Action Plan ` built by the -:ref:`Watcher Decision Engine `. - -See: :doc:`../architecture` for more details on this component. -""" - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseApplier(object): - @abc.abstractmethod - def execute(self, action_plan_uuid): - raise NotImplementedError() diff --git a/watcher/applier/default.py b/watcher/applier/default.py deleted file mode 100755 index aac85d6..0000000 --- a/watcher/applier/default.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_config import cfg -from oslo_log import log - -from watcher.applier import base -from watcher.applier.loading import default -from watcher import objects - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class DefaultApplier(base.BaseApplier): - def __init__(self, context, applier_manager): - super(DefaultApplier, self).__init__() - self._applier_manager = applier_manager - self._loader = default.DefaultWorkFlowEngineLoader() - self._engine = None - self._context = context - - @property - def context(self): - return self._context - - @property - def applier_manager(self): - return self._applier_manager - - @property - def engine(self): - if self._engine is None: - selected_workflow_engine = CONF.watcher_applier.workflow_engine - LOG.debug("Loading workflow engine %s ", selected_workflow_engine) - self._engine = self._loader.load( - name=selected_workflow_engine, - context=self.context, - applier_manager=self.applier_manager) - return self._engine - - def execute(self, action_plan_uuid): - LOG.debug("Executing action plan %s ", action_plan_uuid) - - filters = {'action_plan_uuid': action_plan_uuid} - actions = objects.Action.list(self.context, filters=filters, - eager=True) - return self.engine.execute(actions) diff --git a/watcher/applier/loading/__init__.py b/watcher/applier/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/loading/default.py b/watcher/applier/loading/default.py deleted file mode 100644 index c4d58d9..0000000 --- a/watcher/applier/loading/default.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - - -from watcher.common.loader import default - - -class DefaultWorkFlowEngineLoader(default.DefaultLoader): - def __init__(self): - super(DefaultWorkFlowEngineLoader, self).__init__( - namespace='watcher_workflow_engines') - - -class DefaultActionLoader(default.DefaultLoader): - def __init__(self): - super(DefaultActionLoader, self).__init__( - namespace='watcher_actions') diff --git a/watcher/applier/manager.py b/watcher/applier/manager.py deleted file mode 100644 index 03a2356..0000000 --- a/watcher/applier/manager.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from watcher.applier.messaging import trigger -from watcher.common import service_manager - -from watcher import conf - -CONF = conf.CONF - - -class ApplierManager(service_manager.ServiceManager): - - @property - def service_name(self): - return 'watcher-applier' - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_applier.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_applier.conductor_topic - - @property - def notification_topics(self): - return [] - - @property - def conductor_endpoints(self): - return [trigger.TriggerActionPlan] - - @property - def notification_endpoints(self): - return [] diff --git a/watcher/applier/messaging/__init__.py b/watcher/applier/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/messaging/trigger.py b/watcher/applier/messaging/trigger.py deleted file mode 100644 index 1c4b3a7..0000000 --- a/watcher/applier/messaging/trigger.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from concurrent import futures - -from oslo_config import cfg -from oslo_log import log - -from watcher.applier.action_plan import default - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -class TriggerActionPlan(object): - def __init__(self, applier_manager): - self.applier_manager = applier_manager - workers = CONF.watcher_applier.workers - self.executor = futures.ThreadPoolExecutor(max_workers=workers) - - def do_launch_action_plan(self, context, action_plan_uuid): - try: - cmd = default.DefaultActionPlanHandler(context, - self.applier_manager, - action_plan_uuid) - cmd.execute() - except Exception as e: - LOG.exception(e) - - def launch_action_plan(self, context, action_plan_uuid): - LOG.debug("Trigger ActionPlan %s", action_plan_uuid) - # submit - self.executor.submit(self.do_launch_action_plan, context, - action_plan_uuid) - return action_plan_uuid diff --git a/watcher/applier/rpcapi.py b/watcher/applier/rpcapi.py deleted file mode 100644 index 6788dc6..0000000 --- a/watcher/applier/rpcapi.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import service -from watcher.common import service_manager -from watcher.common import utils - -from watcher import conf - -CONF = conf.CONF - - -class ApplierAPI(service.Service): - - def __init__(self): - super(ApplierAPI, self).__init__(ApplierAPIManager) - - def launch_action_plan(self, context, action_plan_uuid=None): - if not utils.is_uuid_like(action_plan_uuid): - raise exception.InvalidUuidOrName(name=action_plan_uuid) - - self.conductor_client.cast( - context, 'launch_action_plan', action_plan_uuid=action_plan_uuid) - - -class ApplierAPIManager(service_manager.ServiceManager): - - @property - def service_name(self): - return None - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_applier.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_applier.conductor_topic - - @property - def notification_topics(self): - return [] - - @property - def conductor_endpoints(self): - return [] - - @property - def notification_endpoints(self): - return [] diff --git a/watcher/applier/workflow_engine/__init__.py b/watcher/applier/workflow_engine/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/applier/workflow_engine/base.py b/watcher/applier/workflow_engine/base.py deleted file mode 100644 index 3e0c60f..0000000 --- a/watcher/applier/workflow_engine/base.py +++ /dev/null @@ -1,260 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import six -import time - -import eventlet - -from oslo_log import log -from taskflow import task as flow_task - -from watcher.applier.actions import factory -from watcher.common import clients -from watcher.common import exception -from watcher.common.loader import loadable -from watcher import notifications -from watcher import objects -from watcher.objects import fields - - -LOG = log.getLogger(__name__) - -CANCEL_STATE = [objects.action_plan.State.CANCELLING, - objects.action_plan.State.CANCELLED] - - -@six.add_metaclass(abc.ABCMeta) -class BaseWorkFlowEngine(loadable.Loadable): - - def __init__(self, config, context=None, applier_manager=None): - """Constructor - - :param config: A mapping containing the configuration of this - workflow engine - :type config: dict - :param osc: an OpenStackClients object, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - super(BaseWorkFlowEngine, self).__init__(config) - self._context = context - self._applier_manager = applier_manager - self._action_factory = factory.ActionFactory() - self._osc = None - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @property - def context(self): - return self._context - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @property - def applier_manager(self): - return self._applier_manager - - @property - def action_factory(self): - return self._action_factory - - def notify(self, action, state): - db_action = objects.Action.get_by_uuid(self.context, action.uuid, - eager=True) - db_action.state = state - db_action.save() - - @abc.abstractmethod - def execute(self, actions): - raise NotImplementedError() - - -class BaseTaskFlowActionContainer(flow_task.Task): - - def __init__(self, name, db_action, engine, **kwargs): - super(BaseTaskFlowActionContainer, self).__init__(name=name) - self._db_action = db_action - self._engine = engine - self.loaded_action = None - - @property - def engine(self): - return self._engine - - @property - def action(self): - if self.loaded_action is None: - action = self.engine.action_factory.make_action( - self._db_action, - osc=self._engine.osc) - self.loaded_action = action - return self.loaded_action - - @abc.abstractmethod - def do_pre_execute(self): - raise NotImplementedError() - - @abc.abstractmethod - def do_execute(self, *args, **kwargs): - raise NotImplementedError() - - @abc.abstractmethod - def do_post_execute(self): - raise NotImplementedError() - - @abc.abstractmethod - def do_revert(self): - raise NotImplementedError() - - @abc.abstractmethod - def do_abort(self, *args, **kwargs): - raise NotImplementedError() - - # NOTE(alexchadin): taskflow does 3 method calls (pre_execute, execute, - # post_execute) independently. We want to support notifications in base - # class, so child's methods should be named with `do_` prefix and wrapped. - def pre_execute(self): - try: - # NOTE(adisky): check the state of action plan before starting - # next action, if action plan is cancelled raise the exceptions - # so that taskflow does not schedule further actions. - action_plan = objects.ActionPlan.get_by_id( - self.engine.context, self._db_action.action_plan_id) - if action_plan.state in CANCEL_STATE: - raise exception.ActionPlanCancelled(uuid=action_plan.uuid) - self.do_pre_execute() - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.START) - except exception.ActionPlanCancelled as e: - LOG.exception(e) - raise - except Exception as e: - LOG.exception(e) - self.engine.notify(self._db_action, objects.action.State.FAILED) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.ERROR, - priority=fields.NotificationPriority.ERROR) - - def execute(self, *args, **kwargs): - def _do_execute_action(*args, **kwargs): - try: - self.do_execute(*args, **kwargs) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.END) - except Exception as e: - LOG.exception(e) - LOG.error('The workflow engine has failed' - 'to execute the action: %s', self.name) - self.engine.notify(self._db_action, - objects.action.State.FAILED) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.ERROR, - priority=fields.NotificationPriority.ERROR) - raise - # NOTE: spawn a new thread for action execution, so that if action plan - # is cancelled workflow engine will not wait to finish action execution - et = eventlet.spawn(_do_execute_action, *args, **kwargs) - # NOTE: check for the state of action plan periodically,so that if - # action is finished or action plan is cancelled we can exit from here. - while True: - action_object = objects.Action.get_by_uuid( - self.engine.context, self._db_action.uuid, eager=True) - action_plan_object = objects.ActionPlan.get_by_id( - self.engine.context, action_object.action_plan_id) - if (action_object.state in [objects.action.State.SUCCEEDED, - objects.action.State.FAILED] or - action_plan_object.state in CANCEL_STATE): - break - time.sleep(1) - try: - # NOTE: kill the action execution thread, if action plan is - # cancelled for all other cases wait for the result from action - # execution thread. - # Not all actions support abort operations, kill only those action - # which support abort operations - abort = self.action.check_abort() - if (action_plan_object.state in CANCEL_STATE and abort): - et.kill() - et.wait() - - # NOTE: catch the greenlet exit exception due to thread kill, - # taskflow will call revert for the action, - # we will redirect it to abort. - except eventlet.greenlet.GreenletExit: - raise exception.ActionPlanCancelled(uuid=action_plan_object.uuid) - - except Exception as e: - LOG.exception(e) - raise - - def post_execute(self): - try: - self.do_post_execute() - except Exception as e: - LOG.exception(e) - self.engine.notify(self._db_action, objects.action.State.FAILED) - notifications.action.send_execution_notification( - self.engine.context, self._db_action, - fields.NotificationAction.EXECUTION, - fields.NotificationPhase.ERROR, - priority=fields.NotificationPriority.ERROR) - - def revert(self, *args, **kwargs): - action_plan = objects.ActionPlan.get_by_id( - self.engine.context, self._db_action.action_plan_id, eager=True) - # NOTE: check if revert cause by cancel action plan or - # some other exception occured during action plan execution - # if due to some other exception keep the flow intact. - if action_plan.state not in CANCEL_STATE: - self.do_revert() - return - - action_object = objects.Action.get_by_uuid( - self.engine.context, self._db_action.uuid, eager=True) - if action_object.state == objects.action.State.ONGOING: - action_object.state = objects.action.State.CANCELLING - action_object.save() - self.abort() - elif action_object.state == objects.action.State.PENDING: - action_object.state = objects.action.State.CANCELLED - action_object.save() - else: - pass - - def abort(self, *args, **kwargs): - self.do_abort(*args, **kwargs) diff --git a/watcher/applier/workflow_engine/default.py b/watcher/applier/workflow_engine/default.py deleted file mode 100644 index 4080de7..0000000 --- a/watcher/applier/workflow_engine/default.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import log -from taskflow import engines -from taskflow import exceptions as tf_exception -from taskflow.patterns import graph_flow as gf -from taskflow import task as flow_task - -from watcher.applier.workflow_engine import base -from watcher.common import exception -from watcher import objects - -LOG = log.getLogger(__name__) - - -class DefaultWorkFlowEngine(base.BaseWorkFlowEngine): - """Taskflow as a workflow engine for Watcher - - Full documentation on taskflow at - http://docs.openstack.org/developer/taskflow/ - """ - - def decider(self, history): - # FIXME(jed) not possible with the current Watcher Planner - # - # decider – A callback function that will be expected to - # decide at runtime whether v should be allowed to execute - # (or whether the execution of v should be ignored, - # and therefore not executed). It is expected to take as single - # keyword argument history which will be the execution results of - # all u decideable links that have v as a target. It is expected - # to return a single boolean - # (True to allow v execution or False to not). - return True - - @classmethod - def get_config_opts(cls): - return [ - cfg.IntOpt( - 'max_workers', - default=processutils.get_worker_count(), - min=1, - required=True, - help='Number of workers for taskflow engine ' - 'to execute actions.') - ] - - def execute(self, actions): - try: - # NOTE(jed) We want to have a strong separation of concern - # between the Watcher planner and the Watcher Applier in order - # to us the possibility to support several workflow engine. - # We want to provide the 'taskflow' engine by - # default although we still want to leave the possibility for - # the users to change it. - # The current implementation uses graph with linked actions. - # todo(jed) add olso conf for retry and name - flow = gf.Flow("watcher_flow") - actions_uuid = {} - for a in actions: - task = TaskFlowActionContainer(a, self) - flow.add(task) - actions_uuid[a.uuid] = task - - for a in actions: - for parent_id in a.parents: - flow.link(actions_uuid[parent_id], actions_uuid[a.uuid], - decider=self.decider) - - e = engines.load( - flow, engine='parallel', - max_workers=self.config.max_workers) - e.run() - - return flow - - except exception.ActionPlanCancelled as e: - raise - - except tf_exception.WrappedFailure as e: - if e.check("watcher.common.exception.ActionPlanCancelled"): - raise exception.ActionPlanCancelled - else: - raise exception.WorkflowExecutionException(error=e) - - except Exception as e: - raise exception.WorkflowExecutionException(error=e) - - -class TaskFlowActionContainer(base.BaseTaskFlowActionContainer): - def __init__(self, db_action, engine): - name = "action_type:{0} uuid:{1}".format(db_action.action_type, - db_action.uuid) - super(TaskFlowActionContainer, self).__init__(name, db_action, engine) - - def do_pre_execute(self): - self.engine.notify(self._db_action, objects.action.State.ONGOING) - LOG.debug("Pre-condition action: %s", self.name) - self.action.pre_condition() - - def do_execute(self, *args, **kwargs): - LOG.debug("Running action: %s", self.name) - - # NOTE: For result is False, set action state fail - result = self.action.execute() - if result is False: - self.engine.notify(self._db_action, - objects.action.State.FAILED) - else: - self.engine.notify(self._db_action, - objects.action.State.SUCCEEDED) - - def do_post_execute(self): - LOG.debug("Post-condition action: %s", self.name) - self.action.post_condition() - - def do_revert(self, *args, **kwargs): - LOG.warning("Revert action: %s", self.name) - try: - # TODO(jed): do we need to update the states in case of failure? - self.action.revert() - except Exception as e: - LOG.exception(e) - LOG.critical("Oops! We need a disaster recover plan.") - - def do_abort(self, *args, **kwargs): - LOG.warning("Aborting action: %s", self.name) - try: - result = self.action.abort() - if result: - # Aborted the action. - self.engine.notify(self._db_action, - objects.action.State.CANCELLED) - else: - self.engine.notify(self._db_action, - objects.action.State.SUCCEEDED) - except Exception as e: - self.engine.notify(self._db_action, objects.action.State.FAILED) - LOG.exception(e) - - -class TaskFlowNop(flow_task.Task): - """This class is used in case of the workflow have only one Action. - - We need at least two atoms to create a link. - """ - def execute(self): - pass diff --git a/watcher/cmd/__init__.py b/watcher/cmd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/cmd/api.py b/watcher/cmd/api.py deleted file mode 100644 index 58c27e2..0000000 --- a/watcher/cmd/api.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for the Watcher API service.""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from watcher.api import scheduling -from watcher.common import service -from watcher import conf - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - service.prepare_service(sys.argv, CONF) - - host, port = cfg.CONF.api.host, cfg.CONF.api.port - protocol = "http" if not CONF.api.enable_ssl_api else "https" - # Build and start the WSGI app - server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api) - - if host == '127.0.0.1': - LOG.info('serving on 127.0.0.1:%(port)s, ' - 'view at %(protocol)s://127.0.0.1:%(port)s' % - dict(protocol=protocol, port=port)) - else: - LOG.info('serving on %(protocol)s://%(host)s:%(port)s' % - dict(protocol=protocol, host=host, port=port)) - - api_schedule = scheduling.APISchedulingService() - api_schedule.start() - - launcher = service.launch(CONF, server, workers=server.workers) - launcher.wait() diff --git a/watcher/cmd/applier.py b/watcher/cmd/applier.py deleted file mode 100644 index 364a9ba..0000000 --- a/watcher/cmd/applier.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for the Applier service.""" - -import os -import sys - -from oslo_log import log as logging - -from watcher.applier import manager -from watcher.common import service as watcher_service -from watcher import conf - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - watcher_service.prepare_service(sys.argv, CONF) - - LOG.info('Starting Watcher Applier service in PID %s', os.getpid()) - - applier_service = watcher_service.Service(manager.ApplierManager) - - # Only 1 process - launcher = watcher_service.launch(CONF, applier_service) - launcher.wait() diff --git a/watcher/cmd/dbmanage.py b/watcher/cmd/dbmanage.py deleted file mode 100644 index 883efaa..0000000 --- a/watcher/cmd/dbmanage.py +++ /dev/null @@ -1,157 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Run storage database migration. -""" - -import sys - -from oslo_config import cfg - -from watcher.common import service -from watcher import conf -from watcher.db import migration -from watcher.db import purge - -CONF = conf.CONF - - -class DBCommand(object): - - @staticmethod - def upgrade(): - migration.upgrade(CONF.command.revision) - - @staticmethod - def downgrade(): - migration.downgrade(CONF.command.revision) - - @staticmethod - def revision(): - migration.revision(CONF.command.message, CONF.command.autogenerate) - - @staticmethod - def stamp(): - migration.stamp(CONF.command.revision) - - @staticmethod - def version(): - print(migration.version()) - - @staticmethod - def create_schema(): - migration.create_schema() - - @staticmethod - def purge(): - purge.purge(CONF.command.age_in_days, CONF.command.max_number, - CONF.command.goal, CONF.command.exclude_orphans, - CONF.command.dry_run) - - -def add_command_parsers(subparsers): - parser = subparsers.add_parser( - 'upgrade', - help="Upgrade the database schema to the latest version. " - "Optionally, use --revision to specify an alembic revision " - "string to upgrade to.") - parser.set_defaults(func=DBCommand.upgrade) - parser.add_argument('--revision', nargs='?') - - parser = subparsers.add_parser( - 'downgrade', - help="Downgrade the database schema to the oldest revision. " - "While optional, one should generally use --revision to " - "specify the alembic revision string to downgrade to.") - parser.set_defaults(func=DBCommand.downgrade) - parser.add_argument('--revision', nargs='?') - - parser = subparsers.add_parser('stamp') - parser.add_argument('revision', nargs='?') - parser.set_defaults(func=DBCommand.stamp) - - parser = subparsers.add_parser( - 'revision', - help="Create a new alembic revision. " - "Use --message to set the message string.") - parser.add_argument('-m', '--message') - parser.add_argument('--autogenerate', action='store_true') - parser.set_defaults(func=DBCommand.revision) - - parser = subparsers.add_parser( - 'version', - help="Print the current version information and exit.") - parser.set_defaults(func=DBCommand.version) - - parser = subparsers.add_parser( - 'create_schema', - help="Create the database schema.") - parser.set_defaults(func=DBCommand.create_schema) - - parser = subparsers.add_parser( - 'purge', - help="Purge the database.") - parser.add_argument('-d', '--age-in-days', - help="Number of days since deletion (from today) " - "to exclude from the purge. If None, everything " - "will be purged.", - type=int, default=None, nargs='?') - parser.add_argument('-n', '--max-number', - help="Max number of objects expected to be deleted. " - "Prevents the deletion if exceeded. No limit if " - "set to None.", - type=int, default=None, nargs='?') - parser.add_argument('-t', '--goal', - help="UUID or name of the goal to purge.", - type=str, default=None, nargs='?') - parser.add_argument('-e', '--exclude-orphans', action='store_true', - help="Flag to indicate whether or not you want to " - "exclude orphans from deletion (default: False).", - default=False) - parser.add_argument('--dry-run', action='store_true', - help="Flag to indicate whether or not you want to " - "perform a dry run (no deletion).", - default=False) - - parser.set_defaults(func=DBCommand.purge) - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help='Available commands', - handler=add_command_parsers) - - -def register_sub_command_opts(): - cfg.CONF.register_cli_opt(command_opt) - - -def main(): - register_sub_command_opts() - # this is hack to work with previous usage of watcher-dbsync - # pls change it to watcher-dbsync upgrade - valid_commands = set([ - 'upgrade', 'downgrade', 'revision', - 'version', 'stamp', 'create_schema', - 'purge', - ]) - if not set(sys.argv).intersection(valid_commands): - sys.argv.append('upgrade') - - service.prepare_service(sys.argv, CONF) - CONF.command.func() diff --git a/watcher/cmd/decisionengine.py b/watcher/cmd/decisionengine.py deleted file mode 100644 index ac172ee..0000000 --- a/watcher/cmd/decisionengine.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for the Decision Engine manager service.""" - -import os -import sys - -from oslo_log import log as logging - -from watcher.common import service as watcher_service -from watcher import conf -from watcher.decision_engine import gmr -from watcher.decision_engine import manager -from watcher.decision_engine import scheduling -from watcher.decision_engine import sync - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - watcher_service.prepare_service(sys.argv, CONF) - gmr.register_gmr_plugins() - - LOG.info('Starting Watcher Decision Engine service in PID %s', - os.getpid()) - - syncer = sync.Syncer() - syncer.sync() - - de_service = watcher_service.Service(manager.DecisionEngineManager) - bg_scheduler_service = scheduling.DecisionEngineSchedulingService() - - # Only 1 process - launcher = watcher_service.launch(CONF, de_service) - launcher.launch_service(bg_scheduler_service) - - launcher.wait() diff --git a/watcher/cmd/sync.py b/watcher/cmd/sync.py deleted file mode 100644 index c0cbf38..0000000 --- a/watcher/cmd/sync.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Script for the sync tool.""" - -import sys - -from oslo_log import log as logging - -from watcher.common import service as service -from watcher import conf -from watcher.decision_engine import sync - -LOG = logging.getLogger(__name__) -CONF = conf.CONF - - -def main(): - LOG.info('Watcher sync started.') - - service.prepare_service(sys.argv, CONF) - syncer = sync.Syncer() - syncer.sync() - - LOG.info('Watcher sync finished.') diff --git a/watcher/common/__init__.py b/watcher/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/common/cinder_helper.py b/watcher/common/cinder_helper.py deleted file mode 100644 index 72058f8..0000000 --- a/watcher/common/cinder_helper.py +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from oslo_log import log - -from watcher.common import clients -from watcher.common import exception - -LOG = log.getLogger(__name__) - - -class CinderHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.cinder = self.osc.cinder() - - def get_storage_node_list(self): - return list(self.cinder.services.list(binary='cinder-volume')) - - def get_storage_node_by_name(self, name): - """Get storage node by name(host@backendname)""" - try: - storages = list(filter(lambda storage: - storage.host == name, - self.get_storage_node_list())) - if len(storages) != 1: - raise exception.StorageNodeNotFound(name=name) - return storages[0] - except Exception as exc: - LOG.exception(exc) - raise exception.StorageNodeNotFound(name=name) - - def get_storage_pool_list(self): - return self.cinder.pools.list(detailed=True) - - def get_storage_pool_by_name(self, name): - """Get pool by name(host@backend#poolname)""" - try: - pools = list(filter(lambda pool: - pool.name == name, - self.get_storage_pool_list())) - if len(pools) != 1: - raise exception.PoolNotFound(name=name) - return pools[0] - except Exception as exc: - LOG.exception(exc) - raise exception.PoolNotFound(name=name) - - def get_volume_list(self): - return self.cinder.volumes.list(search_opts={'all_tenants': True}) - - def get_volume_type_list(self): - return self.cinder.volume_types.list() - - def get_volume_type_by_backendname(self, backendname): - volume_type_list = self.get_volume_type_list() - - volume_type = list(filter( - lambda volume_type: - volume_type.extra_specs.get( - 'volume_backend_name') == backendname, volume_type_list)) - if volume_type: - return volume_type[0].name - else: - return "" diff --git a/watcher/common/clients.py b/watcher/common/clients.py deleted file mode 100755 index a9f0bc7..0000000 --- a/watcher/common/clients.py +++ /dev/null @@ -1,204 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometerclient import client as ceclient -from cinderclient import client as ciclient -from glanceclient import client as glclient -from gnocchiclient import client as gnclient -from ironicclient import client as irclient -from keystoneauth1 import loading as ka_loading -from keystoneclient import client as keyclient -from monascaclient import client as monclient -from neutronclient.neutron import client as netclient -from novaclient import client as nvclient - -from watcher.common import exception - -from watcher import conf - -CONF = conf.CONF - -_CLIENTS_AUTH_GROUP = 'watcher_clients_auth' - - -class OpenStackClients(object): - """Convenience class to create and cache client instances.""" - - def __init__(self): - self.reset_clients() - - def reset_clients(self): - self._session = None - self._keystone = None - self._nova = None - self._glance = None - self._gnocchi = None - self._cinder = None - self._ceilometer = None - self._monasca = None - self._neutron = None - self._ironic = None - - def _get_keystone_session(self): - auth = ka_loading.load_auth_from_conf_options(CONF, - _CLIENTS_AUTH_GROUP) - sess = ka_loading.load_session_from_conf_options(CONF, - _CLIENTS_AUTH_GROUP, - auth=auth) - return sess - - @property - def auth_url(self): - return self.keystone().auth_url - - @property - def session(self): - if not self._session: - self._session = self._get_keystone_session() - return self._session - - def _get_client_option(self, client, option): - return getattr(getattr(CONF, '%s_client' % client), option) - - @exception.wrap_keystone_exception - def keystone(self): - if not self._keystone: - self._keystone = keyclient.Client(session=self.session) - - return self._keystone - - @exception.wrap_keystone_exception - def nova(self): - if self._nova: - return self._nova - - novaclient_version = self._get_client_option('nova', 'api_version') - nova_endpoint_type = self._get_client_option('nova', 'endpoint_type') - self._nova = nvclient.Client(novaclient_version, - endpoint_type=nova_endpoint_type, - session=self.session) - return self._nova - - @exception.wrap_keystone_exception - def glance(self): - if self._glance: - return self._glance - - glanceclient_version = self._get_client_option('glance', 'api_version') - glance_endpoint_type = self._get_client_option('glance', - 'endpoint_type') - self._glance = glclient.Client(glanceclient_version, - interface=glance_endpoint_type, - session=self.session) - return self._glance - - @exception.wrap_keystone_exception - def gnocchi(self): - if self._gnocchi: - return self._gnocchi - - gnocchiclient_version = self._get_client_option('gnocchi', - 'api_version') - gnocchiclient_interface = self._get_client_option('gnocchi', - 'endpoint_type') - self._gnocchi = gnclient.Client(gnocchiclient_version, - interface=gnocchiclient_interface, - session=self.session) - return self._gnocchi - - @exception.wrap_keystone_exception - def cinder(self): - if self._cinder: - return self._cinder - - cinderclient_version = self._get_client_option('cinder', 'api_version') - cinder_endpoint_type = self._get_client_option('cinder', - 'endpoint_type') - self._cinder = ciclient.Client(cinderclient_version, - endpoint_type=cinder_endpoint_type, - session=self.session) - return self._cinder - - @exception.wrap_keystone_exception - def ceilometer(self): - if self._ceilometer: - return self._ceilometer - - ceilometerclient_version = self._get_client_option('ceilometer', - 'api_version') - ceilometer_endpoint_type = self._get_client_option('ceilometer', - 'endpoint_type') - self._ceilometer = ceclient.get_client( - ceilometerclient_version, - endpoint_type=ceilometer_endpoint_type, - session=self.session) - return self._ceilometer - - @exception.wrap_keystone_exception - def monasca(self): - if self._monasca: - return self._monasca - - monascaclient_version = self._get_client_option( - 'monasca', 'api_version') - monascaclient_interface = self._get_client_option( - 'monasca', 'interface') - token = self.session.get_token() - watcher_clients_auth_config = CONF.get(_CLIENTS_AUTH_GROUP) - service_type = 'monitoring' - monasca_kwargs = { - 'auth_url': watcher_clients_auth_config.auth_url, - 'cert_file': watcher_clients_auth_config.certfile, - 'insecure': watcher_clients_auth_config.insecure, - 'key_file': watcher_clients_auth_config.keyfile, - 'keystone_timeout': watcher_clients_auth_config.timeout, - 'os_cacert': watcher_clients_auth_config.cafile, - 'service_type': service_type, - 'token': token, - 'username': watcher_clients_auth_config.username, - 'password': watcher_clients_auth_config.password, - } - endpoint = self.session.get_endpoint(service_type=service_type, - interface=monascaclient_interface) - - self._monasca = monclient.Client( - monascaclient_version, endpoint, **monasca_kwargs) - - return self._monasca - - @exception.wrap_keystone_exception - def neutron(self): - if self._neutron: - return self._neutron - - neutronclient_version = self._get_client_option('neutron', - 'api_version') - neutron_endpoint_type = self._get_client_option('neutron', - 'endpoint_type') - - self._neutron = netclient.Client(neutronclient_version, - endpoint_type=neutron_endpoint_type, - session=self.session) - self._neutron.format = 'json' - return self._neutron - - @exception.wrap_keystone_exception - def ironic(self): - if self._ironic: - return self._ironic - - ironicclient_version = self._get_client_option('ironic', 'api_version') - endpoint_type = self._get_client_option('ironic', 'endpoint_type') - self._ironic = irclient.get_client(ironicclient_version, - ironic_url=endpoint_type, - session=self.session) - return self._ironic diff --git a/watcher/common/config.py b/watcher/common/config.py deleted file mode 100644 index 5ca04e3..0000000 --- a/watcher/common/config.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from watcher.common import rpc -from watcher import version - - -def parse_args(argv, default_config_files=None): - default_config_files = (default_config_files or - cfg.find_config_files(project='watcher')) - rpc.set_defaults(control_exchange='watcher') - cfg.CONF(argv[1:], - project='python-watcher', - version=version.version_info.release_string(), - default_config_files=default_config_files) - rpc.init(cfg.CONF) diff --git a/watcher/common/context.py b/watcher/common/context.py deleted file mode 100644 index 3da7b22..0000000 --- a/watcher/common/context.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context -from oslo_log import log as logging -from oslo_utils import timeutils -import six - -from watcher.common import utils - -LOG = logging.getLogger(__name__) - - -class RequestContext(context.RequestContext): - """Extends security contexts from the OpenStack common library.""" - - def __init__(self, user_id=None, project_id=None, is_admin=None, - roles=None, timestamp=None, request_id=None, auth_token=None, - auth_url=None, overwrite=True, user_name=None, - project_name=None, domain_name=None, domain_id=None, - auth_token_info=None, **kwargs): - """Stores several additional request parameters: - - :param domain_id: The ID of the domain. - :param domain_name: The name of the domain. - :param is_public_api: Specifies whether the request should be processed - without authentication. - - """ - user = kwargs.pop('user', None) - tenant = kwargs.pop('tenant', None) - super(RequestContext, self).__init__( - auth_token=auth_token, - user=user_id or user, - tenant=project_id or tenant, - domain=kwargs.pop('domain', None) or domain_name or domain_id, - user_domain=kwargs.pop('user_domain', None), - project_domain=kwargs.pop('project_domain', None), - is_admin=is_admin, - read_only=kwargs.pop('read_only', False), - show_deleted=kwargs.pop('show_deleted', False), - request_id=request_id, - resource_uuid=kwargs.pop('resource_uuid', None), - is_admin_project=kwargs.pop('is_admin_project', True), - overwrite=overwrite, - roles=roles) - - self.remote_address = kwargs.pop('remote_address', None) - self.instance_lock_checked = kwargs.pop('instance_lock_checked', None) - self.read_deleted = kwargs.pop('read_deleted', None) - self.service_catalog = kwargs.pop('service_catalog', None) - self.quota_class = kwargs.pop('quota_class', None) - - # oslo_context's RequestContext.to_dict() generates this field, we can - # safely ignore this as we don't use it. - kwargs.pop('user_identity', None) - kwargs.pop('global_request_id', None) - if kwargs: - LOG.warning('Arguments dropped when creating context: %s', - str(kwargs)) - - # FIXME(dims): user_id and project_id duplicate information that is - # already present in the oslo_context's RequestContext. We need to - # get rid of them. - self.auth_url = auth_url - self.domain_name = domain_name - self.domain_id = domain_id - self.auth_token_info = auth_token_info - self.user_id = user_id or user - self.project_id = project_id - if not timestamp: - timestamp = timeutils.utcnow() - if isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_isotime(timestamp) - self.timestamp = timestamp - self.user_name = user_name - self.project_name = project_name - self.is_admin = is_admin - # if self.is_admin is None: - # self.is_admin = policy.check_is_admin(self) - - def to_dict(self): - values = super(RequestContext, self).to_dict() - # FIXME(dims): defensive hasattr() checks need to be - # removed once we figure out why we are seeing stack - # traces - values.update({ - 'user_id': getattr(self, 'user_id', None), - 'user_name': getattr(self, 'user_name', None), - 'project_id': getattr(self, 'project_id', None), - 'project_name': getattr(self, 'project_name', None), - 'domain_id': getattr(self, 'domain_id', None), - 'domain_name': getattr(self, 'domain_name', None), - 'auth_token_info': getattr(self, 'auth_token_info', None), - 'is_admin': getattr(self, 'is_admin', None), - 'timestamp': utils.strtime(self.timestamp) if hasattr( - self, 'timestamp') else None, - 'request_id': getattr(self, 'request_id', None), - }) - return values - - @classmethod - def from_dict(cls, values): - return cls(**values) - - def __str__(self): - return "" % self.to_dict() - - -def make_context(*args, **kwargs): - return RequestContext(*args, **kwargs) diff --git a/watcher/common/exception.py b/watcher/common/exception.py deleted file mode 100644 index 22f1bd3..0000000 --- a/watcher/common/exception.py +++ /dev/null @@ -1,477 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Watcher base exception handling. - -Includes decorator for re-raising Watcher-type exceptions. - -SHOULD include dedicated exception logging. - -""" - -import functools -import sys - -from keystoneclient import exceptions as keystone_exceptions -from oslo_log import log as logging -import six - -from watcher._i18n import _ - -from watcher import conf - -LOG = logging.getLogger(__name__) - -CONF = conf.CONF - - -def wrap_keystone_exception(func): - """Wrap keystone exceptions and throw Watcher specific exceptions.""" - @functools.wraps(func) - def wrapped(*args, **kw): - try: - return func(*args, **kw) - except keystone_exceptions.AuthorizationFailure: - raise AuthorizationFailure( - client=func.__name__, reason=sys.exc_info()[1]) - except keystone_exceptions.ClientException: - raise AuthorizationFailure( - client=func.__name__, - reason=(_('Unexpected keystone client error occurred: %s') - % sys.exc_info()[1])) - return wrapped - - -class WatcherException(Exception): - """Base Watcher Exception - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - - """ - msg_fmt = _("An unknown exception occurred") - code = 500 - headers = {} - safe = False - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if 'code' not in self.kwargs: - try: - self.kwargs['code'] = self.code - except AttributeError: - pass - - if not message: - try: - message = self.msg_fmt % kwargs - except Exception: - # kwargs doesn't match a variable in msg_fmt - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in kwargs.items(): - LOG.error("%(name)s: %(value)s", - {'name': name, 'value': value}) - - if CONF.fatal_exception_format_errors: - raise - else: - # at least get the core msg_fmt out if something happened - message = self.msg_fmt - - super(WatcherException, self).__init__(message) - - def __str__(self): - """Encode to utf-8 then wsme api can consume it as well""" - if not six.PY3: - return six.text_type(self.args[0]).encode('utf-8') - else: - return self.args[0] - - def __unicode__(self): - return six.text_type(self.args[0]) - - def format_message(self): - if self.__class__.__name__.endswith('_Remote'): - return self.args[0] - else: - return six.text_type(self) - - -class UnsupportedError(WatcherException): - msg_fmt = _("Not supported") - - -class NotAuthorized(WatcherException): - msg_fmt = _("Not authorized") - code = 403 - - -class PolicyNotAuthorized(NotAuthorized): - msg_fmt = _("Policy doesn't allow %(action)s to be performed.") - - -class OperationNotPermitted(NotAuthorized): - msg_fmt = _("Operation not permitted") - - -class Invalid(WatcherException, ValueError): - msg_fmt = _("Unacceptable parameters") - code = 400 - - -class ObjectNotFound(WatcherException): - msg_fmt = _("The %(name)s %(id)s could not be found") - - -class Conflict(WatcherException): - msg_fmt = _('Conflict') - code = 409 - - -class ResourceNotFound(ObjectNotFound): - msg_fmt = _("The %(name)s resource %(id)s could not be found") - code = 404 - - -class InvalidParameter(Invalid): - msg_fmt = _("%(parameter)s has to be of type %(parameter_type)s") - - -class InvalidIdentity(Invalid): - msg_fmt = _("Expected a uuid or int but received %(identity)s") - - -class InvalidOperator(Invalid): - msg_fmt = _("Filter operator is not valid: %(operator)s not " - "in %(valid_operators)s") - - -class InvalidGoal(Invalid): - msg_fmt = _("Goal %(goal)s is invalid") - - -class InvalidStrategy(Invalid): - msg_fmt = _("Strategy %(strategy)s is invalid") - - -class InvalidAudit(Invalid): - msg_fmt = _("Audit %(audit)s is invalid") - - -class EagerlyLoadedAuditRequired(InvalidAudit): - msg_fmt = _("Audit %(audit)s was not eagerly loaded") - - -class InvalidActionPlan(Invalid): - msg_fmt = _("Action plan %(action_plan)s is invalid") - - -class EagerlyLoadedActionPlanRequired(InvalidActionPlan): - msg_fmt = _("Action plan %(action_plan)s was not eagerly loaded") - - -class EagerlyLoadedActionRequired(InvalidActionPlan): - msg_fmt = _("Action %(action)s was not eagerly loaded") - - -class InvalidUUID(Invalid): - msg_fmt = _("Expected a uuid but received %(uuid)s") - - -class InvalidName(Invalid): - msg_fmt = _("Expected a logical name but received %(name)s") - - -class InvalidUuidOrName(Invalid): - msg_fmt = _("Expected a logical name or uuid but received %(name)s") - - -class InvalidIntervalOrCron(Invalid): - msg_fmt = _("Expected an interval or cron syntax but received %(name)s") - - -class GoalNotFound(ResourceNotFound): - msg_fmt = _("Goal %(goal)s could not be found") - - -class GoalAlreadyExists(Conflict): - msg_fmt = _("A goal with UUID %(uuid)s already exists") - - -class StrategyNotFound(ResourceNotFound): - msg_fmt = _("Strategy %(strategy)s could not be found") - - -class StrategyAlreadyExists(Conflict): - msg_fmt = _("A strategy with UUID %(uuid)s already exists") - - -class AuditTemplateNotFound(ResourceNotFound): - msg_fmt = _("AuditTemplate %(audit_template)s could not be found") - - -class AuditTemplateAlreadyExists(Conflict): - msg_fmt = _("An audit_template with UUID or name %(audit_template)s " - "already exists") - - -class AuditTemplateReferenced(Invalid): - msg_fmt = _("AuditTemplate %(audit_template)s is referenced by one or " - "multiple audits") - - -class AuditTypeNotFound(Invalid): - msg_fmt = _("Audit type %(audit_type)s could not be found") - - -class AuditParameterNotAllowed(Invalid): - msg_fmt = _("Audit parameter %(parameter)s are not allowed") - - -class AuditNotFound(ResourceNotFound): - msg_fmt = _("Audit %(audit)s could not be found") - - -class AuditAlreadyExists(Conflict): - msg_fmt = _("An audit with UUID %(uuid)s already exists") - - -class AuditIntervalNotSpecified(Invalid): - msg_fmt = _("Interval of audit must be specified for %(audit_type)s.") - - -class AuditIntervalNotAllowed(Invalid): - msg_fmt = _("Interval of audit must not be set for %(audit_type)s.") - - -class AuditReferenced(Invalid): - msg_fmt = _("Audit %(audit)s is referenced by one or multiple action " - "plans") - - -class ActionPlanNotFound(ResourceNotFound): - msg_fmt = _("ActionPlan %(action_plan)s could not be found") - - -class ActionPlanAlreadyExists(Conflict): - msg_fmt = _("An action plan with UUID %(uuid)s already exists") - - -class ActionPlanReferenced(Invalid): - msg_fmt = _("Action Plan %(action_plan)s is referenced by one or " - "multiple actions") - - -class ActionPlanCancelled(WatcherException): - msg_fmt = _("Action Plan with UUID %(uuid)s is cancelled by user") - - -class ActionPlanIsOngoing(Conflict): - msg_fmt = _("Action Plan %(action_plan)s is currently running.") - - -class ActionNotFound(ResourceNotFound): - msg_fmt = _("Action %(action)s could not be found") - - -class ActionAlreadyExists(Conflict): - msg_fmt = _("An action with UUID %(uuid)s already exists") - - -class ActionReferenced(Invalid): - msg_fmt = _("Action plan %(action_plan)s is referenced by one or " - "multiple goals") - - -class ActionFilterCombinationProhibited(Invalid): - msg_fmt = _("Filtering actions on both audit and action-plan is " - "prohibited") - - -class UnsupportedActionType(UnsupportedError): - msg_fmt = _("Provided %(action_type) is not supported yet") - - -class EfficacyIndicatorNotFound(ResourceNotFound): - msg_fmt = _("Efficacy indicator %(efficacy_indicator)s could not be found") - - -class EfficacyIndicatorAlreadyExists(Conflict): - msg_fmt = _("An action with UUID %(uuid)s already exists") - - -class ScoringEngineAlreadyExists(Conflict): - msg_fmt = _("A scoring engine with UUID %(uuid)s already exists") - - -class ScoringEngineNotFound(ResourceNotFound): - msg_fmt = _("ScoringEngine %(scoring_engine)s could not be found") - - -class HTTPNotFound(ResourceNotFound): - pass - - -class PatchError(Invalid): - msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") - - -# decision engine - -class WorkflowExecutionException(WatcherException): - msg_fmt = _('Workflow execution error: %(error)s') - - -class IllegalArgumentException(WatcherException): - msg_fmt = _('Illegal argument') - - -class NoSuchMetric(WatcherException): - msg_fmt = _('No such metric') - - -class NoDataFound(WatcherException): - msg_fmt = _('No rows were returned') - - -class AuthorizationFailure(WatcherException): - msg_fmt = _('%(client)s connection failed. Reason: %(reason)s') - - -class KeystoneFailure(WatcherException): - msg_fmt = _("Keystone API endpoint is missing") - - -class ClusterEmpty(WatcherException): - msg_fmt = _("The list of compute node(s) in the cluster is empty") - - -class MetricCollectorNotDefined(WatcherException): - msg_fmt = _("The metrics resource collector is not defined") - - -class ClusterStateStale(WatcherException): - msg_fmt = _("The cluster state is stale") - - -class ClusterDataModelCollectionError(WatcherException): - msg_fmt = _("The cluster data model '%(cdm)s' could not be built") - - -class ClusterStateNotDefined(WatcherException): - msg_fmt = _("The cluster state is not defined") - - -class CapacityNotDefined(WatcherException): - msg_fmt = _("The capacity %(capacity)s is not defined for '%(resource)s'") - - -class NoAvailableStrategyForGoal(WatcherException): - msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.") - - -class InvalidIndicatorValue(WatcherException): - msg_fmt = _("The indicator '%(name)s' with value '%(value)s' " - "and spec type '%(spec_type)s' is invalid.") - - -class GlobalEfficacyComputationError(WatcherException): - msg_fmt = _("Could not compute the global efficacy for the '%(goal)s' " - "goal using the '%(strategy)s' strategy.") - - -class NoMetricValuesForInstance(WatcherException): - msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.") - - -class UnsupportedDataSource(UnsupportedError): - msg_fmt = _("Datasource %(datasource)s is not supported " - "by strategy %(strategy)s") - - -class NoSuchMetricForHost(WatcherException): - msg_fmt = _("No %(metric)s metric for %(host)s found.") - - -class ServiceAlreadyExists(Conflict): - msg_fmt = _("A service with name %(name)s is already working on %(host)s.") - - -class ServiceNotFound(ResourceNotFound): - msg_fmt = _("The service %(service)s cannot be found.") - - -class WildcardCharacterIsUsed(WatcherException): - msg_fmt = _("You shouldn't use any other IDs of %(resource)s if you use " - "wildcard character.") - - -class CronFormatIsInvalid(WatcherException): - msg_fmt = _("Provided cron is invalid: %(message)s") - - -# Model - -class ComputeResourceNotFound(WatcherException): - msg_fmt = _("The compute resource '%(name)s' could not be found") - - -class InstanceNotFound(ComputeResourceNotFound): - msg_fmt = _("The instance '%(name)s' could not be found") - - -class ComputeNodeNotFound(ComputeResourceNotFound): - msg_fmt = _("The compute node %(name)s could not be found") - - -class StorageResourceNotFound(WatcherException): - msg_fmt = _("The storage resource '%(name)s' could not be found") - - -class StorageNodeNotFound(StorageResourceNotFound): - msg_fmt = _("The storage node %(name)s could not be found") - - -class PoolNotFound(StorageResourceNotFound): - msg_fmt = _("The pool %(name)s could not be found") - - -class VolumeNotFound(StorageResourceNotFound): - msg_fmt = _("The volume '%(name)s' could not be found") - - -class LoadingError(WatcherException): - msg_fmt = _("Error loading plugin '%(name)s'") - - -class ReservedWord(WatcherException): - msg_fmt = _("The identifier '%(name)s' is a reserved word") - - -class NotSoftDeletedStateError(WatcherException): - msg_fmt = _("The %(name)s resource %(id)s is not soft deleted") - - -class NegativeLimitError(WatcherException): - msg_fmt = _("Limit should be positive") - - -class NotificationPayloadError(WatcherException): - _msg_fmt = _("Payload not populated when trying to send notification " - "\"%(class_name)s\"") diff --git a/watcher/common/loader/__init__.py b/watcher/common/loader/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/common/loader/base.py b/watcher/common/loader/base.py deleted file mode 100644 index 322cb43..0000000 --- a/watcher/common/loader/base.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseLoader(object): - - @abc.abstractmethod - def list_available(self): - raise NotImplementedError() - - @abc.abstractmethod - def load(self, name): - raise NotImplementedError() diff --git a/watcher/common/loader/default.py b/watcher/common/loader/default.py deleted file mode 100644 index 3ef63bc..0000000 --- a/watcher/common/loader/default.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_config import cfg -from oslo_log import log -from stevedore import driver as drivermanager -from stevedore import extension as extensionmanager - -from watcher.common import exception -from watcher.common.loader import base -from watcher.common import utils - -LOG = log.getLogger(__name__) - - -class DefaultLoader(base.BaseLoader): - - def __init__(self, namespace, conf=cfg.CONF): - """Entry point loader for Watcher using Stevedore - - :param namespace: namespace of the entry point(s) to load or list - :type namespace: str - :param conf: ConfigOpts instance, defaults to cfg.CONF - """ - super(DefaultLoader, self).__init__() - self.namespace = namespace - self.conf = conf - - def load(self, name, **kwargs): - try: - LOG.debug("Loading in namespace %s => %s ", self.namespace, name) - driver_manager = drivermanager.DriverManager( - namespace=self.namespace, - name=name, - invoke_on_load=False, - ) - - driver_cls = driver_manager.driver - config = self._load_plugin_config(name, driver_cls) - - driver = driver_cls(config, **kwargs) - except Exception as exc: - LOG.exception(exc) - raise exception.LoadingError(name=name) - - return driver - - def _reload_config(self): - self.conf(default_config_files=self.conf.default_config_files) - - def get_entry_name(self, name): - return ".".join([self.namespace, name]) - - def _load_plugin_config(self, name, driver_cls): - """Load the config of the plugin""" - config = utils.Struct() - config_opts = driver_cls.get_config_opts() - if not config_opts: - return config - - group_name = self.get_entry_name(name) - self.conf.register_opts(config_opts, group=group_name) - - # Finalise the opt import by re-checking the configuration - # against the provided config files - self._reload_config() - - config_group = self.conf.get(group_name) - if not config_group: - raise exception.LoadingError(name=name) - - config.update({ - name: value for name, value in config_group.items() - }) - - return config - - def list_available(self): - extension_manager = extensionmanager.ExtensionManager( - namespace=self.namespace) - return {ext.name: ext.plugin for ext in extension_manager.extensions} diff --git a/watcher/common/loader/loadable.py b/watcher/common/loader/loadable.py deleted file mode 100644 index c234274..0000000 --- a/watcher/common/loader/loadable.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - -from watcher.common import service - - -@six.add_metaclass(abc.ABCMeta) -class Loadable(object): - """Generic interface for dynamically loading a driver/entry point. - - This defines the contract in order to let the loader manager inject - the configuration parameters during the loading. - """ - - def __init__(self, config): - super(Loadable, self).__init__() - self.config = config - - @classmethod - @abc.abstractmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - raise NotImplementedError - - -LoadableSingletonMeta = type( - "LoadableSingletonMeta", (abc.ABCMeta, service.Singleton), {}) - - -@six.add_metaclass(LoadableSingletonMeta) -class LoadableSingleton(object): - """Generic interface for dynamically loading a driver as a singleton. - - This defines the contract in order to let the loader manager inject - the configuration parameters during the loading. Classes inheriting from - this class will be singletons. - """ - - def __init__(self, config): - super(LoadableSingleton, self).__init__() - self.config = config - - @classmethod - @abc.abstractmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - raise NotImplementedError diff --git a/watcher/common/nova_helper.py b/watcher/common/nova_helper.py deleted file mode 100644 index 52994f4..0000000 --- a/watcher/common/nova_helper.py +++ /dev/null @@ -1,866 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import random -import time - -from oslo_log import log - -import cinderclient.exceptions as ciexceptions -import glanceclient.exc as glexceptions -import novaclient.exceptions as nvexceptions - -from watcher.common import clients -from watcher.common import exception -from watcher.common import utils - -LOG = log.getLogger(__name__) - - -class NovaHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.neutron = self.osc.neutron() - self.cinder = self.osc.cinder() - self.nova = self.osc.nova() - self.glance = self.osc.glance() - - def get_compute_node_list(self): - return self.nova.hypervisors.list() - - def get_compute_node_by_id(self, node_id): - """Get compute node by ID (*not* UUID)""" - # We need to pass an object with an 'id' attribute to make it work - return self.nova.hypervisors.get(utils.Struct(id=node_id)) - - def get_compute_node_by_hostname(self, node_hostname): - """Get compute node by ID (*not* UUID)""" - # We need to pass an object with an 'id' attribute to make it work - try: - compute_nodes = self.nova.hypervisors.search(node_hostname) - if len(compute_nodes) != 1: - raise exception.ComputeNodeNotFound(name=node_hostname) - - return self.get_compute_node_by_id(compute_nodes[0].id) - except Exception as exc: - LOG.exception(exc) - raise exception.ComputeNodeNotFound(name=node_hostname) - - def get_instance_list(self): - return self.nova.servers.list(search_opts={'all_tenants': True}) - - def get_service(self, service_id): - return self.nova.services.find(id=service_id) - - def get_flavor(self, flavor_id): - return self.nova.flavors.get(flavor_id) - - def get_aggregate_list(self): - return self.nova.aggregates.list() - - def get_aggregate_detail(self, aggregate_id): - return self.nova.aggregates.get(aggregate_id) - - def get_availability_zone_list(self): - return self.nova.availability_zones.list(detailed=True) - - def find_instance(self, instance_id): - return self.nova.servers.get(instance_id) - - def confirm_resize(self, instance, previous_status, retry=60): - instance.confirm_resize() - instance = self.nova.servers.get(instance.id) - while instance.status != previous_status and retry: - instance = self.nova.servers.get(instance.id) - retry -= 1 - time.sleep(1) - if instance.status == previous_status: - return True - else: - LOG.debug("confirm resize failed for the " - "instance %s" % instance.id) - return False - - def wait_for_volume_status(self, volume, status, timeout=60, - poll_interval=1): - """Wait until volume reaches given status. - - :param volume: volume resource - :param status: expected status of volume - :param timeout: timeout in seconds - :param poll_interval: poll interval in seconds - """ - start_time = time.time() - while time.time() - start_time < timeout: - volume = self.cinder.volumes.get(volume.id) - if volume.status == status: - break - time.sleep(poll_interval) - else: - raise Exception("Volume %s did not reach status %s after %d s" - % (volume.id, status, timeout)) - return volume.status == status - - def watcher_non_live_migrate_instance(self, instance_id, dest_hostname, - keep_original_image_name=True, - retry=120): - """This method migrates a given instance - - using an image of this instance and creating a new instance - from this image. It saves some configuration information - about the original instance : security group, list of networks, - list of attached volumes, floating IP, ... - in order to apply the same settings to the new instance. - At the end of the process the original instance is deleted. - It returns True if the migration was successful, - False otherwise. - - if destination hostname not given, this method calls nova api - to migrate the instance. - - :param instance_id: the unique id of the instance to migrate. - :param keep_original_image_name: flag indicating whether the - image name from which the original instance was built must be - used as the name of the intermediate image used for migration. - If this flag is False, a temporary image name is built - """ - new_image_name = "" - LOG.debug( - "Trying a non-live migrate of instance '%s' " % instance_id) - - # Looking for the instance to migrate - instance = self.find_instance(instance_id) - if not instance: - LOG.debug("Instance %s not found !" % instance_id) - return False - else: - # NOTE: If destination node is None call Nova API to migrate - # instance - host_name = getattr(instance, "OS-EXT-SRV-ATTR:host") - LOG.debug( - "Instance %s found on host '%s'." % (instance_id, host_name)) - - if dest_hostname is None: - previous_status = getattr(instance, 'status') - - instance.migrate() - instance = self.nova.servers.get(instance_id) - while (getattr(instance, 'status') not in - ["VERIFY_RESIZE", "ERROR"] and retry): - instance = self.nova.servers.get(instance.id) - time.sleep(2) - retry -= 1 - new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') - - if (host_name != new_hostname and - instance.status == 'VERIFY_RESIZE'): - if not self.confirm_resize(instance, previous_status): - return False - LOG.debug( - "cold migration succeeded : " - "instance %s is now on host '%s'." % ( - instance_id, new_hostname)) - return True - else: - LOG.debug( - "cold migration for instance %s failed" % instance_id) - return False - - if not keep_original_image_name: - # randrange gives you an integral value - irand = random.randint(0, 1000) - - # Building the temporary image name - # which will be used for the migration - new_image_name = "tmp-migrate-%s-%s" % (instance_id, irand) - else: - # Get the image name of the current instance. - # We'll use the same name for the new instance. - imagedict = getattr(instance, "image") - image_id = imagedict["id"] - image = self.glance.images.get(image_id) - new_image_name = getattr(image, "name") - - instance_name = getattr(instance, "name") - flavordict = getattr(instance, "flavor") - # a_dict = dict([flavorstr.strip('{}').split(":"),]) - flavor_id = flavordict["id"] - flavor = self.nova.flavors.get(flavor_id) - flavor_name = getattr(flavor, "name") - keypair_name = getattr(instance, "key_name") - - addresses = getattr(instance, "addresses") - - floating_ip = "" - network_names_list = [] - - for network_name, network_conf_obj in addresses.items(): - LOG.debug( - "Extracting network configuration for network '%s'" % - network_name) - - network_names_list.append(network_name) - - for net_conf_item in network_conf_obj: - if net_conf_item['OS-EXT-IPS:type'] == "floating": - floating_ip = net_conf_item['addr'] - break - - sec_groups_list = getattr(instance, "security_groups") - sec_groups = [] - - for sec_group_dict in sec_groups_list: - sec_groups.append(sec_group_dict['name']) - - # Stopping the old instance properly so - # that no new data is sent to it and to its attached volumes - stopped_ok = self.stop_instance(instance_id) - - if not stopped_ok: - LOG.debug("Could not stop instance: %s" % instance_id) - return False - - # Building the temporary image which will be used - # to re-build the same instance on another target host - image_uuid = self.create_image_from_instance(instance_id, - new_image_name) - - if not image_uuid: - LOG.debug( - "Could not build temporary image of instance: %s" % - instance_id) - return False - - # - # We need to get the list of attached volumes and detach - # them from the instance in order to attache them later - # to the new instance - # - blocks = [] - - # Looks like this : - # os-extended-volumes:volumes_attached | - # [{u'id': u'c5c3245f-dd59-4d4f-8d3a-89d80135859a'}] - attached_volumes = getattr(instance, - "os-extended-volumes:volumes_attached") - - for attached_volume in attached_volumes: - volume_id = attached_volume['id'] - - try: - volume = self.cinder.volumes.get(volume_id) - - attachments_list = getattr(volume, "attachments") - - device_name = attachments_list[0]['device'] - # When a volume is attached to an instance - # it contains the following property : - # attachments = [{u'device': u'/dev/vdb', - # u'server_id': u'742cc508-a2f2-4769-a794-bcdad777e814', - # u'id': u'f6d62785-04b8-400d-9626-88640610f65e', - # u'host_name': None, u'volume_id': - # u'f6d62785-04b8-400d-9626-88640610f65e'}] - - # boot_index indicates a number - # designating the boot order of the device. - # Use -1 for the boot volume, - # choose 0 for an attached volume. - block_device_mapping_v2_item = {"device_name": device_name, - "source_type": "volume", - "destination_type": - "volume", - "uuid": volume_id, - "boot_index": "0"} - - blocks.append( - block_device_mapping_v2_item) - - LOG.debug("Detaching volume %s from instance: %s" % ( - volume_id, instance_id)) - # volume.detach() - self.nova.volumes.delete_server_volume(instance_id, - volume_id) - - if not self.wait_for_volume_status(volume, "available", 5, - 10): - LOG.debug( - "Could not detach volume %s from instance: %s" % ( - volume_id, instance_id)) - return False - except ciexceptions.NotFound: - LOG.debug("Volume '%s' not found " % image_id) - return False - - # We create the new instance from - # the intermediate image of the original instance - new_instance = self. \ - create_instance(dest_hostname, - instance_name, - image_uuid, - flavor_name, - sec_groups, - network_names_list=network_names_list, - keypair_name=keypair_name, - create_new_floating_ip=False, - block_device_mapping_v2=blocks) - - if not new_instance: - LOG.debug( - "Could not create new instance " - "for non-live migration of instance %s" % instance_id) - return False - - try: - LOG.debug("Detaching floating ip '%s' from instance %s" % ( - floating_ip, instance_id)) - # We detach the floating ip from the current instance - instance.remove_floating_ip(floating_ip) - - LOG.debug( - "Attaching floating ip '%s' to the new instance %s" % ( - floating_ip, new_instance.id)) - - # We attach the same floating ip to the new instance - new_instance.add_floating_ip(floating_ip) - except Exception as e: - LOG.debug(e) - - new_host_name = getattr(new_instance, "OS-EXT-SRV-ATTR:host") - - # Deleting the old instance (because no more useful) - delete_ok = self.delete_instance(instance_id) - if not delete_ok: - LOG.debug("Could not delete instance: %s" % instance_id) - return False - - LOG.debug( - "Instance %s has been successfully migrated " - "to new host '%s' and its new id is %s." % ( - instance_id, new_host_name, new_instance.id)) - - return True - - def resize_instance(self, instance_id, flavor, retry=120): - """This method resizes given instance with specified flavor. - - This method uses the Nova built-in resize() - action to do a resize of a given instance. - - It returns True if the resize was successful, - False otherwise. - - :param instance_id: the unique id of the instance to resize. - :param flavor: the name or ID of the flavor to resize to. - """ - LOG.debug("Trying a resize of instance %s to flavor '%s'" % ( - instance_id, flavor)) - - # Looking for the instance to resize - instance = self.find_instance(instance_id) - - flavor_id = None - - try: - flavor_id = self.nova.flavors.get(flavor) - except nvexceptions.NotFound: - flavor_id = [f.id for f in self.nova.flavors.list() if - f.name == flavor][0] - except nvexceptions.ClientException as e: - LOG.debug("Nova client exception occurred while resizing " - "instance %s. Exception: %s", instance_id, e) - - if not flavor_id: - LOG.debug("Flavor not found: %s" % flavor) - return False - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - - instance_status = getattr(instance, 'OS-EXT-STS:vm_state') - LOG.debug( - "Instance %s is in '%s' status." % (instance_id, - instance_status)) - - instance.resize(flavor=flavor_id) - while getattr(instance, - 'OS-EXT-STS:vm_state') != 'resized' \ - and retry: - instance = self.nova.servers.get(instance.id) - LOG.debug( - 'Waiting the resize of {0} to {1}'.format( - instance, flavor_id)) - time.sleep(1) - retry -= 1 - - instance_status = getattr(instance, 'status') - if instance_status != 'VERIFY_RESIZE': - return False - - instance.confirm_resize() - - LOG.debug("Resizing succeeded : instance %s is now on flavor " - "'%s'.", instance_id, flavor_id) - - return True - - def live_migrate_instance(self, instance_id, dest_hostname, - block_migration=False, retry=120): - """This method does a live migration of a given instance - - This method uses the Nova built-in live_migrate() - action to do a live migration of a given instance. - - It returns True if the migration was successful, - False otherwise. - - :param instance_id: the unique id of the instance to migrate. - :param dest_hostname: the name of the destination compute node, if - destination_node is None, nova scheduler choose - the destination host - :param block_migration: No shared storage is required. - """ - LOG.debug("Trying to live migrate instance %s " % (instance_id)) - - # Looking for the instance to migrate - instance = self.find_instance(instance_id) - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - else: - host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') - LOG.debug( - "Instance %s found on host '%s'." % (instance_id, host_name)) - - instance.live_migrate(host=dest_hostname, - block_migration=block_migration, - disk_over_commit=True) - - instance = self.nova.servers.get(instance_id) - - # NOTE: If destination host is not specified for live migration - # let nova scheduler choose the destination host. - if dest_hostname is None: - while (instance.status not in ['ACTIVE', 'ERROR'] and retry): - instance = self.nova.servers.get(instance.id) - LOG.debug( - 'Waiting the migration of {0}'.format(instance.id)) - time.sleep(1) - retry -= 1 - new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') - - if host_name != new_hostname and instance.status == 'ACTIVE': - LOG.debug( - "Live migration succeeded : " - "instance %s is now on host '%s'." % ( - instance_id, new_hostname)) - return True - else: - return False - - while getattr(instance, - 'OS-EXT-SRV-ATTR:host') != dest_hostname \ - and retry: - instance = self.nova.servers.get(instance.id) - LOG.debug( - 'Waiting the migration of {0} to {1}'.format( - instance, - getattr(instance, - 'OS-EXT-SRV-ATTR:host'))) - time.sleep(1) - retry -= 1 - - host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') - if host_name != dest_hostname: - return False - - LOG.debug( - "Live migration succeeded : " - "instance %s is now on host '%s'." % ( - instance_id, host_name)) - - return True - - def abort_live_migrate(self, instance_id, source, destination, retry=240): - LOG.debug("Aborting live migration of instance %s" % instance_id) - migration = self.get_running_migration(instance_id) - if migration: - migration_id = getattr(migration[0], "id") - try: - self.nova.server_migrations.live_migration_abort( - server=instance_id, migration=migration_id) - except exception as e: - # Note: Does not return from here, as abort request can't be - # accepted but migration still going on. - LOG.exception(e) - else: - LOG.debug( - "No running migrations found for instance %s" % instance_id) - - while retry: - instance = self.nova.servers.get(instance_id) - if (getattr(instance, 'OS-EXT-STS:task_state') is None and - getattr(instance, 'status') in ['ACTIVE', 'ERROR']): - break - time.sleep(2) - retry -= 1 - instance_host = getattr(instance, 'OS-EXT-SRV-ATTR:host') - instance_status = getattr(instance, 'status') - - # Abort live migration successfull, action is cancelled - if instance_host == source and instance_status == 'ACTIVE': - return True - # Nova Unable to abort live migration, action is succeded - elif instance_host == destination and instance_status == 'ACTIVE': - return False - - else: - raise Exception("Live migration execution and abort both failed " - "for the instance %s" % instance_id) - - def enable_service_nova_compute(self, hostname): - if self.nova.services.enable(host=hostname, - binary='nova-compute'). \ - status == 'enabled': - return True - else: - return False - - def disable_service_nova_compute(self, hostname): - if self.nova.services.disable(host=hostname, - binary='nova-compute'). \ - status == 'disabled': - return True - else: - return False - - def set_host_offline(self, hostname): - # See API on http://developer.openstack.org/api-ref-compute-v2.1.html - # especially the PUT request - # regarding this resource : /v2.1/os-hosts/​{host_name}​ - # - # The following body should be sent : - # { - # "host": { - # "host": "65c5d5b7e3bd44308e67fc50f362aee6", - # "maintenance_mode": "off_maintenance", - # "status": "enabled" - # } - # } - - # Voir ici - # https://github.com/openstack/nova/ - # blob/master/nova/virt/xenapi/host.py - # set_host_enabled(self, enabled): - # Sets the compute host's ability to accept new instances. - # host_maintenance_mode(self, host, mode): - # Start/Stop host maintenance window. - # On start, it triggers guest instances evacuation. - host = self.nova.hosts.get(hostname) - - if not host: - LOG.debug("host not found: %s" % hostname) - return False - else: - host[0].update( - {"maintenance_mode": "disable", "status": "disable"}) - return True - - def create_image_from_instance(self, instance_id, image_name, - metadata={"reason": "instance_migrate"}): - """This method creates a new image from a given instance. - - It waits for this image to be in 'active' state before returning. - It returns the unique UUID of the created image if successful, - None otherwise. - - :param instance_id: the uniqueid of - the instance to backup as an image. - :param image_name: the name of the image to create. - :param metadata: a dictionary containing the list of - key-value pairs to associate to the image as metadata. - """ - LOG.debug( - "Trying to create an image from instance %s ..." % instance_id) - - # Looking for the instance - instance = self.find_instance(instance_id) - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return None - else: - host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') - LOG.debug( - "Instance %s found on host '%s'." % (instance_id, host_name)) - - # We need to wait for an appropriate status - # of the instance before we can build an image from it - if self.wait_for_instance_status(instance, ('ACTIVE', 'SHUTOFF'), - 5, - 10): - image_uuid = self.nova.servers.create_image(instance_id, - image_name, - metadata) - - image = self.glance.images.get(image_uuid) - if not image: - return None - - # Waiting for the new image to be officially in ACTIVE state - # in order to make sure it can be used - status = image.status - retry = 10 - while status != 'active' and status != 'error' and retry: - time.sleep(5) - retry -= 1 - # Retrieve the instance again so the status field updates - image = self.glance.images.get(image_uuid) - if not image: - break - status = image.status - LOG.debug("Current image status: %s" % status) - - if not image: - LOG.debug("Image not found: %s" % image_uuid) - else: - LOG.debug( - "Image %s successfully created for instance %s" % ( - image_uuid, instance_id)) - return image_uuid - return None - - def delete_instance(self, instance_id): - """This method deletes a given instance. - - :param instance_id: the unique id of the instance to delete. - """ - LOG.debug("Trying to remove instance %s ..." % instance_id) - - instance = self.find_instance(instance_id) - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - else: - self.nova.servers.delete(instance_id) - LOG.debug("Instance %s removed." % instance_id) - return True - - def stop_instance(self, instance_id): - """This method stops a given instance. - - :param instance_id: the unique id of the instance to stop. - """ - LOG.debug("Trying to stop instance %s ..." % instance_id) - - instance = self.find_instance(instance_id) - - if not instance: - LOG.debug("Instance not found: %s" % instance_id) - return False - elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped": - LOG.debug("Instance has been stopped: %s" % instance_id) - return True - else: - self.nova.servers.stop(instance_id) - - if self.wait_for_instance_state(instance, "stopped", 8, 10): - LOG.debug("Instance %s stopped." % instance_id) - return True - else: - return False - - def wait_for_instance_state(self, server, state, retry, sleep): - """Waits for server to be in a specific state - - The state can be one of the following : - active, stopped - - :param server: server object. - :param state: for which state we are waiting for - :param retry: how many times to retry - :param sleep: seconds to sleep between the retries - """ - if not server: - return False - - while getattr(server, 'OS-EXT-STS:vm_state') != state and retry: - time.sleep(sleep) - server = self.nova.servers.get(server) - retry -= 1 - return getattr(server, 'OS-EXT-STS:vm_state') == state - - def wait_for_instance_status(self, instance, status_list, retry, sleep): - """Waits for instance to be in a specific status - - The status can be one of the following - : BUILD, ACTIVE, ERROR, VERIFY_RESIZE, SHUTOFF - - :param instance: instance object. - :param status_list: tuple containing the list of - status we are waiting for - :param retry: how many times to retry - :param sleep: seconds to sleep between the retries - """ - if not instance: - return False - - while instance.status not in status_list and retry: - LOG.debug("Current instance status: %s" % instance.status) - time.sleep(sleep) - instance = self.nova.servers.get(instance.id) - retry -= 1 - LOG.debug("Current instance status: %s" % instance.status) - return instance.status in status_list - - def create_instance(self, node_id, inst_name="test", image_id=None, - flavor_name="m1.tiny", - sec_group_list=["default"], - network_names_list=["demo-net"], keypair_name="mykeys", - create_new_floating_ip=True, - block_device_mapping_v2=None): - """This method creates a new instance - - It also creates, if requested, a new floating IP and associates - it with the new instance - It returns the unique id of the created instance. - """ - LOG.debug( - "Trying to create new instance '%s' " - "from image '%s' with flavor '%s' ..." % ( - inst_name, image_id, flavor_name)) - - try: - self.nova.keypairs.findall(name=keypair_name) - except nvexceptions.NotFound: - LOG.debug("Key pair '%s' not found " % keypair_name) - return - - try: - image = self.glance.images.get(image_id) - except glexceptions.NotFound: - LOG.debug("Image '%s' not found " % image_id) - return - - try: - flavor = self.nova.flavors.find(name=flavor_name) - except nvexceptions.NotFound: - LOG.debug("Flavor '%s' not found " % flavor_name) - return - - # Make sure all security groups exist - for sec_group_name in sec_group_list: - try: - self.nova.security_groups.find(name=sec_group_name) - - except nvexceptions.NotFound: - LOG.debug("Security group '%s' not found " % sec_group_name) - return - - net_list = list() - - for network_name in network_names_list: - nic_id = self.get_network_id_from_name(network_name) - - if not nic_id: - LOG.debug("Network '%s' not found " % network_name) - return - net_obj = {"net-id": nic_id} - net_list.append(net_obj) - - instance = self.nova.servers.create( - inst_name, image, - flavor=flavor, - key_name=keypair_name, - security_groups=sec_group_list, - nics=net_list, - block_device_mapping_v2=block_device_mapping_v2, - availability_zone="nova:%s" % node_id) - - # Poll at 5 second intervals, until the status is no longer 'BUILD' - if instance: - if self.wait_for_instance_status(instance, - ('ACTIVE', 'ERROR'), 5, 10): - instance = self.nova.servers.get(instance.id) - - if create_new_floating_ip and instance.status == 'ACTIVE': - LOG.debug( - "Creating a new floating IP" - " for instance '%s'" % instance.id) - # Creating floating IP for the new instance - floating_ip = self.nova.floating_ips.create() - - instance.add_floating_ip(floating_ip) - - LOG.debug("Instance %s associated to Floating IP '%s'" % ( - instance.id, floating_ip.ip)) - - return instance - - def get_network_id_from_name(self, net_name="private"): - """This method returns the unique id of the provided network name""" - networks = self.neutron.list_networks(name=net_name) - - # LOG.debug(networks) - network_id = networks['networks'][0]['id'] - - return network_id - - def get_instance_by_uuid(self, instance_uuid): - return [instance for instance in - self.nova.servers.list(search_opts={"all_tenants": True, - "uuid": instance_uuid})] - - def get_instance_by_name(self, instance_name): - return [instance for instance in - self.nova.servers.list(search_opts={"all_tenants": True, - "name": instance_name})] - - def get_instances_by_node(self, host): - return [instance for instance in - self.nova.servers.list(search_opts={"all_tenants": True}) - if self.get_hostname(instance) == host] - - def get_hostname(self, instance): - return str(getattr(instance, 'OS-EXT-SRV-ATTR:host')) - - def get_flavor_instance(self, instance, cache): - fid = instance.flavor['id'] - if fid in cache: - flavor = cache.get(fid) - else: - try: - flavor = self.nova.flavors.get(fid) - except ciexceptions.NotFound: - flavor = None - cache[fid] = flavor - attr_defaults = [('name', 'unknown-id-%s' % fid), - ('vcpus', 0), ('ram', 0), ('disk', 0), - ('ephemeral', 0), ('extra_specs', {})] - for attr, default in attr_defaults: - if not flavor: - instance.flavor[attr] = default - continue - instance.flavor[attr] = getattr(flavor, attr, default) - - def get_running_migration(self, instance_id): - return self.nova.server_migrations.list(server=instance_id) diff --git a/watcher/common/observable.py b/watcher/common/observable.py deleted file mode 100644 index 3f08185..0000000 --- a/watcher/common/observable.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import synchronization - - -class Observable(synchronization.Synchronization): - def __init__(self): - super(Observable, self).__init__() - self.__observers = [] - self.changed = 0 - - def set_changed(self): - self.changed = 1 - - def clear_changed(self): - self.changed = 0 - - def has_changed(self): - return self.changed - - def register_observer(self, observer): - if observer not in self.__observers: - self.__observers.append(observer) - - def unregister_observer(self, observer): - try: - self.__observers.remove(observer) - except ValueError: - pass - - def notify(self, ctx=None, publisherid=None, event_type=None, - metadata=None, payload=None, modifier=None): - self.mutex.acquire() - try: - if not self.changed: - return - for observer in self.__observers: - if modifier != observer: - observer.update(self, ctx, metadata, publisherid, - event_type, payload) - self.clear_changed() - finally: - self.mutex.release() diff --git a/watcher/common/paths.py b/watcher/common/paths.py deleted file mode 100644 index ff05291..0000000 --- a/watcher/common/paths.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from watcher import conf - -CONF = conf.CONF - - -def basedir_rel(*args): - """Return a path relative to $pybasedir.""" - return os.path.join(CONF.pybasedir, *args) - - -def bindir_rel(*args): - """Return a path relative to $bindir.""" - return os.path.join(CONF.bindir, *args) - - -def state_path_rel(*args): - """Return a path relative to $state_path.""" - return os.path.join(CONF.state_path, *args) diff --git a/watcher/common/policy.py b/watcher/common/policy.py deleted file mode 100644 index 30caafd..0000000 --- a/watcher/common/policy.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Policy Engine For Watcher.""" - -from oslo_config import cfg -from oslo_policy import policy - -from watcher.common import exception - - -_ENFORCER = None -CONF = cfg.CONF - - -# we can get a policy enforcer by this init. -# oslo policy support change policy rule dynamically. -# at present, policy.enforce will reload the policy rules when it checks -# the policy files have been touched. -def init(policy_file=None, rules=None, - default_rule=None, use_conf=True, overwrite=True): - """Init an Enforcer class. - - :param policy_file: Custom policy file to use, if none is - specified, ``conf.policy_file`` will be - used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. If - :meth:`load_rules` with ``force_reload=True``, - :meth:`clear` or :meth:`set_rules` with - ``overwrite=True`` is called this will be overwritten. - :param default_rule: Default rule to use, conf.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from cache or config file. - :param overwrite: Whether to overwrite existing rules when reload rules - from config file. - """ - global _ENFORCER - if not _ENFORCER: - # http://docs.openstack.org/developer/oslo.policy/usage.html - _ENFORCER = policy.Enforcer(CONF, - policy_file=policy_file, - rules=rules, - default_rule=default_rule, - use_conf=use_conf, - overwrite=overwrite) - return _ENFORCER - - -def enforce(context, rule=None, target=None, - do_raise=True, exc=None, *args, **kwargs): - - """Checks authorization of a rule against the target and credentials. - - :param dict context: As much information about the user performing the - action as possible. - :param rule: The rule to evaluate. - :param dict target: As much information about the object being operated - on as possible. - :param do_raise: Whether to raise an exception or not if check - fails. - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to :meth:`enforce` (both - positional and keyword arguments) will be passed to - the exception class. If not specified, - :class:`PolicyNotAuthorized` will be used. - - :return: ``False`` if the policy does not allow the action and `exc` is - not provided; otherwise, returns a value that evaluates to - ``True``. Note: for rules using the "case" expression, this - ``True`` value will be the specified string from the - expression. - """ - enforcer = init() - credentials = context.to_dict() - if not exc: - exc = exception.PolicyNotAuthorized - if target is None: - target = {'project_id': context.project_id, - 'user_id': context.user_id} - return enforcer.enforce(rule, target, credentials, - do_raise=do_raise, exc=exc, *args, **kwargs) diff --git a/watcher/common/rpc.py b/watcher/common/rpc.py deleted file mode 100644 index 49197a0..0000000 --- a/watcher/common/rpc.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging as messaging - -from oslo_messaging.rpc import dispatcher - -from watcher.common import context as watcher_context -from watcher.common import exception - -__all__ = [ - 'init', - 'cleanup', - 'set_defaults', - 'add_extra_exmods', - 'clear_extra_exmods', - 'get_allowed_exmods', - 'RequestContextSerializer', - 'get_client', - 'get_server', - 'get_notifier', -] - -CONF = cfg.CONF -LOG = log.getLogger(__name__) -TRANSPORT = None -NOTIFICATION_TRANSPORT = None -NOTIFIER = None - -ALLOWED_EXMODS = [ - exception.__name__, -] -EXTRA_EXMODS = [] - - -JsonPayloadSerializer = messaging.JsonPayloadSerializer - - -def init(conf): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - exmods = get_allowed_exmods() - TRANSPORT = messaging.get_rpc_transport( - conf, allowed_remote_exmods=exmods) - NOTIFICATION_TRANSPORT = messaging.get_notification_transport( - conf, allowed_remote_exmods=exmods) - - serializer = RequestContextSerializer(JsonPayloadSerializer()) - if not conf.notification_level: - NOTIFIER = messaging.Notifier( - NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') - else: - NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer) - - -def initialized(): - return None not in [TRANSPORT, NOTIFIER] - - -def cleanup(): - global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER - if NOTIFIER is None: - LOG.exception("RPC cleanup: NOTIFIER is None") - TRANSPORT.cleanup() - NOTIFICATION_TRANSPORT.cleanup() - TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None - - -def set_defaults(control_exchange): - messaging.set_transport_defaults(control_exchange) - - -def add_extra_exmods(*args): - EXTRA_EXMODS.extend(args) - - -def clear_extra_exmods(): - del EXTRA_EXMODS[:] - - -def get_allowed_exmods(): - return ALLOWED_EXMODS + EXTRA_EXMODS - - -class RequestContextSerializer(messaging.Serializer): - - def __init__(self, base): - self._base = base - - def serialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.serialize_entity(context, entity) - - def deserialize_entity(self, context, entity): - if not self._base: - return entity - return self._base.deserialize_entity(context, entity) - - def serialize_context(self, context): - return context.to_dict() - - def deserialize_context(self, context): - return watcher_context.RequestContext.from_dict(context) - - -def get_client(target, version_cap=None, serializer=None): - assert TRANSPORT is not None - serializer = RequestContextSerializer(serializer) - return messaging.RPCClient(TRANSPORT, - target, - version_cap=version_cap, - serializer=serializer) - - -def get_server(target, endpoints, serializer=None): - assert TRANSPORT is not None - access_policy = dispatcher.DefaultRPCAccessPolicy - serializer = RequestContextSerializer(serializer) - return messaging.get_rpc_server(TRANSPORT, - target, - endpoints, - executor='eventlet', - serializer=serializer, - access_policy=access_policy) - - -def get_notifier(publisher_id): - assert NOTIFIER is not None - return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/watcher/common/scheduling.py b/watcher/common/scheduling.py deleted file mode 100644 index 90884d1..0000000 --- a/watcher/common/scheduling.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from apscheduler import events -from apscheduler.schedulers import background -from oslo_service import service - -job_events = events - - -class BackgroundSchedulerService(service.ServiceBase, - background.BackgroundScheduler): - - def start(self): - """Start service.""" - background.BackgroundScheduler.start(self) - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ diff --git a/watcher/common/service.py b/watcher/common/service.py deleted file mode 100644 index 61bdd2e..0000000 --- a/watcher/common/service.py +++ /dev/null @@ -1,308 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 eNovance -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import socket - -import eventlet -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_log import _options -from oslo_log import log -import oslo_messaging as om -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts -from oslo_service import service -from oslo_service import wsgi - -from oslo_messaging.rpc import dispatcher - -from watcher._i18n import _ -from watcher.api import app -from watcher.common import config -from watcher.common import context -from watcher.common import rpc -from watcher.common import scheduling -from watcher.conf import plugins as plugins_conf -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields -from watcher import version - -# NOTE: -# Ubuntu 14.04 forces librabbitmq when kombu is used -# Unfortunately it forces a version that has a crash -# bug. Calling eventlet.monkey_patch() tells kombu -# to use libamqp instead. -eventlet.monkey_patch() - -NOTIFICATION_OPTS = [ - cfg.StrOpt('notification_level', - choices=[''] + list(wfields.NotificationPriority.ALL), - default=wfields.NotificationPriority.INFO, - help=_('Specifies the minimum level for which to send ' - 'notifications. If not set, no notifications will ' - 'be sent. The default is for this option to be at the ' - '`INFO` level.')) -] -cfg.CONF.register_opts(NOTIFICATION_OPTS) - - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'qpid.messaging=INFO', - 'oslo.messaging=INFO', 'sqlalchemy=WARN', - 'keystoneclient=INFO', 'stevedore=INFO', - 'eventlet.wsgi.server=WARN', 'iso8601=WARN', - 'paramiko=WARN', 'requests=WARN', 'neutronclient=WARN', - 'glanceclient=WARN', 'watcher.openstack.common=WARN'] - -Singleton = service.Singleton - - -class WSGIService(service.ServiceBase): - """Provides ability to launch Watcher API from wsgi app.""" - - def __init__(self, service_name, use_ssl=False): - """Initialize, but do not start the WSGI server. - - :param service_name: The service name of the WSGI server. - :param use_ssl: Wraps the socket in an SSL context if True. - """ - self.service_name = service_name - self.app = app.VersionSelectorApplication() - self.workers = (CONF.api.workers or - processutils.get_worker_count()) - self.server = wsgi.Server(CONF, self.service_name, self.app, - host=CONF.api.host, - port=CONF.api.port, - use_ssl=use_ssl, - logger_name=self.service_name) - - def start(self): - """Start serving this service using loaded configuration""" - self.server.start() - - def stop(self): - """Stop serving this API""" - self.server.stop() - - def wait(self): - """Wait for the service to stop serving this API""" - self.server.wait() - - def reset(self): - """Reset server greenpool size to default""" - self.server.reset() - - -class ServiceHeartbeat(scheduling.BackgroundSchedulerService): - - service_name = None - - def __init__(self, gconfig=None, service_name=None, **kwargs): - gconfig = None or {} - super(ServiceHeartbeat, self).__init__(gconfig, **kwargs) - ServiceHeartbeat.service_name = service_name - self.context = context.make_context() - self.send_beat() - - def send_beat(self): - host = CONF.host - watcher_list = objects.Service.list( - self.context, filters={'name': ServiceHeartbeat.service_name, - 'host': host}) - if watcher_list: - watcher_service = watcher_list[0] - watcher_service.last_seen_up = datetime.datetime.utcnow() - watcher_service.save() - else: - watcher_service = objects.Service(self.context) - watcher_service.name = ServiceHeartbeat.service_name - watcher_service.host = host - watcher_service.create() - - def add_heartbeat_job(self): - self.add_job(self.send_beat, 'interval', seconds=60, - next_run_time=datetime.datetime.now()) - - @classmethod - def get_service_name(cls): - return CONF.host, cls.service_name - - def start(self): - """Start service.""" - self.add_heartbeat_job() - super(ServiceHeartbeat, self).start() - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ - - -class Service(service.ServiceBase): - - API_VERSION = '1.0' - - def __init__(self, manager_class): - super(Service, self).__init__() - self.manager = manager_class() - - self.publisher_id = self.manager.publisher_id - self.api_version = self.manager.api_version - - self.conductor_topic = self.manager.conductor_topic - self.notification_topics = self.manager.notification_topics - - self.heartbeat = None - - self.service_name = self.manager.service_name - if self.service_name: - self.heartbeat = ServiceHeartbeat( - service_name=self.manager.service_name) - - self.conductor_endpoints = [ - ep(self) for ep in self.manager.conductor_endpoints - ] - self.notification_endpoints = self.manager.notification_endpoints - - self.serializer = rpc.RequestContextSerializer( - base.WatcherObjectSerializer()) - - self._transport = None - self._notification_transport = None - self._conductor_client = None - - self.conductor_topic_handler = None - self.notification_handler = None - - if self.conductor_topic and self.conductor_endpoints: - self.conductor_topic_handler = self.build_topic_handler( - self.conductor_topic, self.conductor_endpoints) - if self.notification_topics and self.notification_endpoints: - self.notification_handler = self.build_notification_handler( - self.notification_topics, self.notification_endpoints - ) - - @property - def transport(self): - if self._transport is None: - self._transport = om.get_rpc_transport(CONF) - return self._transport - - @property - def notification_transport(self): - if self._notification_transport is None: - self._notification_transport = om.get_notification_transport(CONF) - return self._notification_transport - - @property - def conductor_client(self): - if self._conductor_client is None: - target = om.Target( - topic=self.conductor_topic, - version=self.API_VERSION, - ) - self._conductor_client = om.RPCClient( - self.transport, target, serializer=self.serializer) - return self._conductor_client - - @conductor_client.setter - def conductor_client(self, c): - self.conductor_client = c - - def build_topic_handler(self, topic_name, endpoints=()): - access_policy = dispatcher.DefaultRPCAccessPolicy - serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) - target = om.Target( - topic=topic_name, - # For compatibility, we can override it with 'host' opt - server=CONF.host or socket.gethostname(), - version=self.api_version, - ) - return om.get_rpc_server( - self.transport, target, endpoints, - executor='eventlet', serializer=serializer, - access_policy=access_policy) - - def build_notification_handler(self, topic_names, endpoints=()): - serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) - targets = [om.Target(topic=topic_name) for topic_name in topic_names] - return om.get_notification_listener( - self.notification_transport, targets, endpoints, - executor='eventlet', serializer=serializer, - allow_requeue=False) - - def start(self): - LOG.debug("Connecting to '%s' (%s)", - CONF.transport_url, CONF.rpc_backend) - if self.conductor_topic_handler: - self.conductor_topic_handler.start() - if self.notification_handler: - self.notification_handler.start() - if self.heartbeat: - self.heartbeat.start() - - def stop(self): - LOG.debug("Disconnecting from '%s' (%s)", - CONF.transport_url, CONF.rpc_backend) - if self.conductor_topic_handler: - self.conductor_topic_handler.stop() - if self.notification_handler: - self.notification_handler.stop() - if self.heartbeat: - self.heartbeat.stop() - - def reset(self): - """Reset a service in case it received a SIGHUP.""" - - def wait(self): - """Wait for service to complete.""" - - def check_api_version(self, ctx): - api_manager_version = self.conductor_client.call( - ctx, 'check_api_version', api_version=self.api_version) - return api_manager_version - - -def launch(conf, service_, workers=1, restart_method='reload'): - return service.launch(conf, service_, workers, restart_method) - - -def prepare_service(argv=(), conf=cfg.CONF): - log.register_options(conf) - gmr_opts.set_defaults(conf) - - config.parse_args(argv) - cfg.set_defaults(_options.log_opts, - default_log_levels=_DEFAULT_LOG_LEVELS) - log.setup(conf, 'python-watcher') - conf.log_opt_values(LOG, log.DEBUG) - objects.register_all() - - gmr.TextGuruMeditation.register_section( - _('Plugins'), plugins_conf.show_plugins) - gmr.TextGuruMeditation.setup_autorun(version, conf=conf) diff --git a/watcher/common/service_manager.py b/watcher/common/service_manager.py deleted file mode 100644 index b87240c..0000000 --- a/watcher/common/service_manager.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Servionica -## -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class ServiceManager(object): - - @abc.abstractproperty - def service_name(self): - raise NotImplementedError() - - @abc.abstractproperty - def api_version(self): - raise NotImplementedError() - - @abc.abstractproperty - def publisher_id(self): - raise NotImplementedError() - - @abc.abstractproperty - def conductor_topic(self): - raise NotImplementedError() - - @abc.abstractproperty - def notification_topics(self): - raise NotImplementedError() - - @abc.abstractproperty - def conductor_endpoints(self): - raise NotImplementedError() - - @abc.abstractproperty - def notification_endpoints(self): - raise NotImplementedError() diff --git a/watcher/common/synchronization.py b/watcher/common/synchronization.py deleted file mode 100644 index ffeccd1..0000000 --- a/watcher/common/synchronization.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading - - -class Synchronization(object): - def __init__(self): - self.mutex = threading.RLock() diff --git a/watcher/common/utils.py b/watcher/common/utils.py deleted file mode 100644 index be345af..0000000 --- a/watcher/common/utils.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities and helper functions.""" - -import datetime -import re - -from croniter import croniter - -from jsonschema import validators -from oslo_log import log as logging -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six - -from watcher.common import exception - -from watcher import conf - -CONF = conf.CONF - -LOG = logging.getLogger(__name__) - - -class Struct(dict): - """Specialized dict where you access an item like an attribute - - >>> struct = Struct() - >>> struct['a'] = 1 - >>> struct.b = 2 - >>> assert struct.a == 1 - >>> assert struct['b'] == 2 - """ - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - try: - self[name] = value - except KeyError: - raise AttributeError(name) - - -generate_uuid = uuidutils.generate_uuid -is_uuid_like = uuidutils.is_uuid_like -is_int_like = strutils.is_int_like -strtime = timeutils.strtime - - -def is_cron_like(value): - """Return True is submitted value is like cron syntax""" - try: - croniter(value, datetime.datetime.now()) - except Exception as e: - raise exception.CronFormatIsInvalid(message=str(e)) - return True - - -def safe_rstrip(value, chars=None): - """Removes trailing characters from a string if that does not make it empty - - :param value: A string value that will be stripped. - :param chars: Characters to remove. - :return: Stripped value. - - """ - if not isinstance(value, six.string_types): - LOG.warning( - "Failed to remove trailing character. Returning original object." - "Supplied object is not a string: %s,", value) - return value - - return value.rstrip(chars) or value - - -def is_hostname_safe(hostname): - """Determine if the supplied hostname is RFC compliant. - - Check that the supplied hostname conforms to: - * http://en.wikipedia.org/wiki/Hostname - * http://tools.ietf.org/html/rfc952 - * http://tools.ietf.org/html/rfc1123 - - :param hostname: The hostname to be validated. - :returns: True if valid. False if not. - - """ - m = r'^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$' - return (isinstance(hostname, six.string_types) and - (re.match(m, hostname) is not None)) - - -def get_cls_import_path(cls): - """Return the import path of a given class""" - module = cls.__module__ - if module is None or module == str.__module__: - return cls.__name__ - return module + '.' + cls.__name__ - - -# Default value feedback extension as jsonschema doesn't support it -def extend_with_default(validator_class): - validate_properties = validator_class.VALIDATORS["properties"] - - def set_defaults(validator, properties, instance, schema): - for prop, subschema in properties.items(): - if "default" in subschema and instance is not None: - instance.setdefault(prop, subschema["default"]) - - for error in validate_properties( - validator, properties, instance, schema - ): - yield error - - return validators.extend(validator_class, - {"properties": set_defaults}) - - -# Parameter strict check extension as jsonschema doesn't support it -def extend_with_strict_schema(validator_class): - validate_properties = validator_class.VALIDATORS["properties"] - - def strict_schema(validator, properties, instance, schema): - if instance is None: - return - - for para in instance.keys(): - if para not in properties.keys(): - raise exception.AuditParameterNotAllowed(parameter=para) - - for error in validate_properties( - validator, properties, instance, schema - ): - yield error - - return validators.extend(validator_class, {"properties": strict_schema}) - -StrictDefaultValidatingDraft4Validator = extend_with_default( - extend_with_strict_schema(validators.Draft4Validator)) - -Draft4Validator = validators.Draft4Validator diff --git a/watcher/conf/__init__.py b/watcher/conf/__init__.py deleted file mode 100755 index 625401b..0000000 --- a/watcher/conf/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.conf import api -from watcher.conf import applier -from watcher.conf import ceilometer_client -from watcher.conf import cinder_client -from watcher.conf import clients_auth -from watcher.conf import db -from watcher.conf import decision_engine -from watcher.conf import exception -from watcher.conf import glance_client -from watcher.conf import gnocchi_client -from watcher.conf import ironic_client -from watcher.conf import monasca_client -from watcher.conf import neutron_client -from watcher.conf import nova_client -from watcher.conf import paths -from watcher.conf import planner -from watcher.conf import service -from watcher.conf import utils - -CONF = cfg.CONF - -service.register_opts(CONF) -api.register_opts(CONF) -utils.register_opts(CONF) -paths.register_opts(CONF) -exception.register_opts(CONF) -db.register_opts(CONF) -planner.register_opts(CONF) -applier.register_opts(CONF) -decision_engine.register_opts(CONF) -monasca_client.register_opts(CONF) -nova_client.register_opts(CONF) -glance_client.register_opts(CONF) -gnocchi_client.register_opts(CONF) -cinder_client.register_opts(CONF) -ceilometer_client.register_opts(CONF) -neutron_client.register_opts(CONF) -clients_auth.register_opts(CONF) -ironic_client.register_opts(CONF) diff --git a/watcher/conf/_opts.py b/watcher/conf/_opts.py deleted file mode 100644 index 73398f1..0000000 --- a/watcher/conf/_opts.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2014 -# The Cloudscaling Group, Inc. -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystoneauth1 import loading as ka_loading - -from watcher.conf import api as conf_api -from watcher.conf import applier as conf_applier -from watcher.conf import ceilometer_client as conf_ceilometer_client -from watcher.conf import cinder_client as conf_cinder_client -from watcher.conf import db -from watcher.conf import decision_engine as conf_de -from watcher.conf import exception -from watcher.conf import glance_client as conf_glance_client -from watcher.conf import neutron_client as conf_neutron_client -from watcher.conf import nova_client as conf_nova_client -from watcher.conf import paths -from watcher.conf import planner as conf_planner -from watcher.conf import utils - - -def list_opts(): - """Legacy aggregation of all the watcher config options""" - return [ - ('DEFAULT', - (conf_api.AUTH_OPTS + - exception.EXC_LOG_OPTS + - paths.PATH_OPTS + - utils.UTILS_OPTS)), - ('api', conf_api.API_SERVICE_OPTS), - ('database', db.SQL_OPTS), - ('watcher_planner', conf_planner.WATCHER_PLANNER_OPTS), - ('watcher_applier', conf_applier.APPLIER_MANAGER_OPTS), - ('watcher_decision_engine', - (conf_de.WATCHER_DECISION_ENGINE_OPTS + - conf_de.WATCHER_CONTINUOUS_OPTS)), - ('nova_client', conf_nova_client.NOVA_CLIENT_OPTS), - ('glance_client', conf_glance_client.GLANCE_CLIENT_OPTS), - ('cinder_client', conf_cinder_client.CINDER_CLIENT_OPTS), - ('ceilometer_client', conf_ceilometer_client.CEILOMETER_CLIENT_OPTS), - ('neutron_client', conf_neutron_client.NEUTRON_CLIENT_OPTS), - ('watcher_clients_auth', - (ka_loading.get_auth_common_conf_options() + - ka_loading.get_auth_plugin_conf_options('password') + - ka_loading.get_session_conf_options())) - ] diff --git a/watcher/conf/api.py b/watcher/conf/api.py deleted file mode 100644 index 4531eca..0000000 --- a/watcher/conf/api.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -api = cfg.OptGroup(name='api', - title='Options for the Watcher API service') - -AUTH_OPTS = [ - cfg.BoolOpt('enable_authentication', - default=True, - help='This option enables or disables user authentication ' - 'via keystone. Default value is True.'), -] - -API_SERVICE_OPTS = [ - cfg.PortOpt('port', - default=9322, - help='The port for the watcher API server'), - cfg.HostAddressOpt('host', - default='127.0.0.1', - help='The listen IP address for the watcher API server' - ), - cfg.IntOpt('max_limit', - default=1000, - help='The maximum number of items returned in a single ' - 'response from a collection resource'), - cfg.IntOpt('workers', - min=1, - help='Number of workers for Watcher API service. ' - 'The default is equal to the number of CPUs available ' - 'if that can be determined, else a default worker ' - 'count of 1 is returned.'), - - cfg.BoolOpt('enable_ssl_api', - default=False, - help="Enable the integrated stand-alone API to service " - "requests via HTTPS instead of HTTP. If there is a " - "front-end service performing HTTPS offloading from " - "the service, this option should be False; note, you " - "will want to change public API endpoint to represent " - "SSL termination URL with 'public_endpoint' option."), -] - - -def register_opts(conf): - conf.register_group(api) - conf.register_opts(API_SERVICE_OPTS, group=api) - conf.register_opts(AUTH_OPTS) - - -def list_opts(): - return [('api', API_SERVICE_OPTS), ('DEFAULT', AUTH_OPTS)] diff --git a/watcher/conf/applier.py b/watcher/conf/applier.py deleted file mode 100644 index ec1bf38..0000000 --- a/watcher/conf/applier.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -watcher_applier = cfg.OptGroup(name='watcher_applier', - title='Options for the Applier messaging' - 'core') - -APPLIER_MANAGER_OPTS = [ - cfg.IntOpt('workers', - default='1', - min=1, - required=True, - help='Number of workers for applier, default value is 1.'), - cfg.StrOpt('conductor_topic', - default='watcher.applier.control', - help='The topic name used for' - 'control events, this topic ' - 'used for rpc call '), - cfg.StrOpt('publisher_id', - default='watcher.applier.api', - help='The identifier used by watcher ' - 'module on the message broker'), - cfg.StrOpt('workflow_engine', - default='taskflow', - required=True, - help='Select the engine to use to execute the workflow'), -] - - -def register_opts(conf): - conf.register_group(watcher_applier) - conf.register_opts(APPLIER_MANAGER_OPTS, group=watcher_applier) - - -def list_opts(): - return [('watcher_applier', APPLIER_MANAGER_OPTS)] diff --git a/watcher/conf/ceilometer_client.py b/watcher/conf/ceilometer_client.py deleted file mode 100644 index 48fdf88..0000000 --- a/watcher/conf/ceilometer_client.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -ceilometer_client = cfg.OptGroup(name='ceilometer_client', - title='Configuration Options for Ceilometer') - -CEILOMETER_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2', - help='Version of Ceilometer API to use in ' - 'ceilometerclient.'), - cfg.StrOpt('endpoint_type', - default='internalURL', - help='Type of endpoint to use in ceilometerclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is internalURL.')] - - -def register_opts(conf): - conf.register_group(ceilometer_client) - conf.register_opts(CEILOMETER_CLIENT_OPTS, group=ceilometer_client) - - -def list_opts(): - return [('ceilometer_client', CEILOMETER_CLIENT_OPTS)] diff --git a/watcher/conf/cinder_client.py b/watcher/conf/cinder_client.py deleted file mode 100644 index 687c4ce..0000000 --- a/watcher/conf/cinder_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -cinder_client = cfg.OptGroup(name='cinder_client', - title='Configuration Options for Cinder') - -CINDER_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='3', - help='Version of Cinder API to use in cinderclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in cinderclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(cinder_client) - conf.register_opts(CINDER_CLIENT_OPTS, group=cinder_client) - - -def list_opts(): - return [('cinder_client', CINDER_CLIENT_OPTS)] diff --git a/watcher/conf/clients_auth.py b/watcher/conf/clients_auth.py deleted file mode 100644 index 8e959fc..0000000 --- a/watcher/conf/clients_auth.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from keystoneauth1 import loading as ka_loading - -WATCHER_CLIENTS_AUTH = 'watcher_clients_auth' - - -def register_opts(conf): - ka_loading.register_session_conf_options(conf, WATCHER_CLIENTS_AUTH) - ka_loading.register_auth_conf_options(conf, WATCHER_CLIENTS_AUTH) - - -def list_opts(): - return [('watcher_clients_auth', ka_loading.get_session_conf_options() + - ka_loading.get_auth_common_conf_options())] diff --git a/watcher/conf/db.py b/watcher/conf/db.py deleted file mode 100644 index 8989687..0000000 --- a/watcher/conf/db.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_db import options as oslo_db_options - -from watcher.conf import paths - -_DEFAULT_SQL_CONNECTION = 'sqlite:///{0}'.format( - paths.state_path_def('watcher.sqlite')) - -database = cfg.OptGroup(name='database', - title='Configuration Options for database') - -SQL_OPTS = [ - cfg.StrOpt('mysql_engine', - default='InnoDB', - help='MySQL engine to use.') -] - - -def register_opts(conf): - oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) - conf.register_group(database) - conf.register_opts(SQL_OPTS, group=database) - - -def list_opts(): - return [('database', SQL_OPTS)] diff --git a/watcher/conf/decision_engine.py b/watcher/conf/decision_engine.py deleted file mode 100644 index 162dc29..0000000 --- a/watcher/conf/decision_engine.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - - -watcher_decision_engine = cfg.OptGroup(name='watcher_decision_engine', - title='Defines the parameters of ' - 'the module decision engine') - -WATCHER_DECISION_ENGINE_OPTS = [ - cfg.StrOpt('conductor_topic', - default='watcher.decision.control', - help='The topic name used for ' - 'control events, this topic ' - 'used for RPC calls'), - cfg.ListOpt('notification_topics', - default=['versioned_notifications', 'watcher_notifications'], - help='The topic names from which notification events ' - 'will be listened to'), - cfg.StrOpt('publisher_id', - default='watcher.decision.api', - help='The identifier used by the Watcher ' - 'module on the message broker'), - cfg.IntOpt('max_workers', - default=2, - required=True, - help='The maximum number of threads that can be used to ' - 'execute strategies'), - cfg.IntOpt('action_plan_expiry', - default=24, - help='An expiry timespan(hours). Watcher invalidates any ' - 'action plan for which its creation time ' - '-whose number of hours has been offset by this value-' - ' is older that the current time.'), - cfg.IntOpt('check_periodic_interval', - default=30*60, - help='Interval (in seconds) for checking action plan expiry.') -] - -WATCHER_CONTINUOUS_OPTS = [ - cfg.IntOpt('continuous_audit_interval', - default=10, - help='Interval (in seconds) for checking newly created ' - 'continuous audits.') -] - - -def register_opts(conf): - conf.register_group(watcher_decision_engine) - conf.register_opts(WATCHER_DECISION_ENGINE_OPTS, - group=watcher_decision_engine) - conf.register_opts(WATCHER_CONTINUOUS_OPTS, group=watcher_decision_engine) - - -def list_opts(): - return [('watcher_decision_engine', WATCHER_DECISION_ENGINE_OPTS), - ('watcher_decision_engine', WATCHER_CONTINUOUS_OPTS)] diff --git a/watcher/conf/exception.py b/watcher/conf/exception.py deleted file mode 100644 index 3d9f67d..0000000 --- a/watcher/conf/exception.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -EXC_LOG_OPTS = [ - cfg.BoolOpt('fatal_exception_format_errors', - default=False, - help='Make exception message format errors fatal.'), -] - - -def register_opts(conf): - conf.register_opts(EXC_LOG_OPTS) - - -def list_opts(): - return [('DEFAULT', EXC_LOG_OPTS)] diff --git a/watcher/conf/glance_client.py b/watcher/conf/glance_client.py deleted file mode 100644 index 015a09c..0000000 --- a/watcher/conf/glance_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -glance_client = cfg.OptGroup(name='glance_client', - title='Configuration Options for Glance') - -GLANCE_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2', - help='Version of Glance API to use in glanceclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in glanceclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(glance_client) - conf.register_opts(GLANCE_CLIENT_OPTS, group=glance_client) - - -def list_opts(): - return [('glance_client', GLANCE_CLIENT_OPTS)] diff --git a/watcher/conf/gnocchi_client.py b/watcher/conf/gnocchi_client.py deleted file mode 100644 index 0e3acce..0000000 --- a/watcher/conf/gnocchi_client.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -gnocchi_client = cfg.OptGroup(name='gnocchi_client', - title='Configuration Options for Gnocchi') - -GNOCCHI_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='1', - help='Version of Gnocchi API to use in gnocchiclient.'), - cfg.StrOpt('endpoint_type', - default='internalURL', - help='Type of endpoint to use in gnocchi client.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is internalURL.'), - cfg.IntOpt('query_max_retries', - default=10, - help='How many times Watcher is trying to query again'), - cfg.IntOpt('query_timeout', - default=1, - help='How many seconds Watcher should wait to do query again')] - - -def register_opts(conf): - conf.register_group(gnocchi_client) - conf.register_opts(GNOCCHI_CLIENT_OPTS, group=gnocchi_client) - - -def list_opts(): - return [('gnocchi_client', GNOCCHI_CLIENT_OPTS)] diff --git a/watcher/conf/ironic_client.py b/watcher/conf/ironic_client.py deleted file mode 100755 index fc4940e..0000000 --- a/watcher/conf/ironic_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 ZTE Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -ironic_client = cfg.OptGroup(name='ironic_client', - title='Configuration Options for Ironic') - -IRONIC_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default=1, - help='Version of Ironic API to use in ironicclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in ironicclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(ironic_client) - conf.register_opts(IRONIC_CLIENT_OPTS, group=ironic_client) - - -def list_opts(): - return [('ironic_client', IRONIC_CLIENT_OPTS)] diff --git a/watcher/conf/monasca_client.py b/watcher/conf/monasca_client.py deleted file mode 100644 index 26d54f4..0000000 --- a/watcher/conf/monasca_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -monasca_client = cfg.OptGroup(name='monasca_client', - title='Configuration Options for Monasca') - -MONASCA_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2_0', - help='Version of Monasca API to use in monascaclient.'), - cfg.StrOpt('interface', - default='internal', - help='Type of interface used for monasca endpoint.' - 'Supported values: internal, public, admin' - 'The default is internal.')] - - -def register_opts(conf): - conf.register_group(monasca_client) - conf.register_opts(MONASCA_CLIENT_OPTS, group=monasca_client) - - -def list_opts(): - return [('monasca_client', MONASCA_CLIENT_OPTS)] diff --git a/watcher/conf/neutron_client.py b/watcher/conf/neutron_client.py deleted file mode 100644 index 4a8888d..0000000 --- a/watcher/conf/neutron_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -neutron_client = cfg.OptGroup(name='neutron_client', - title='Configuration Options for Neutron') - -NEUTRON_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2.0', - help='Version of Neutron API to use in neutronclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in neutronclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(neutron_client) - conf.register_opts(NEUTRON_CLIENT_OPTS, group=neutron_client) - - -def list_opts(): - return [('neutron_client', NEUTRON_CLIENT_OPTS)] diff --git a/watcher/conf/nova_client.py b/watcher/conf/nova_client.py deleted file mode 100755 index 35d55fd..0000000 --- a/watcher/conf/nova_client.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -nova_client = cfg.OptGroup(name='nova_client', - title='Configuration Options for Nova') - -NOVA_CLIENT_OPTS = [ - cfg.StrOpt('api_version', - default='2', - help='Version of Nova API to use in novaclient.'), - cfg.StrOpt('endpoint_type', - default='publicURL', - help='Type of endpoint to use in novaclient.' - 'Supported values: internalURL, publicURL, adminURL' - 'The default is publicURL.')] - - -def register_opts(conf): - conf.register_group(nova_client) - conf.register_opts(NOVA_CLIENT_OPTS, group=nova_client) - - -def list_opts(): - return [('nova_client', NOVA_CLIENT_OPTS)] diff --git a/watcher/conf/opts.py b/watcher/conf/opts.py deleted file mode 100644 index 5af0314..0000000 --- a/watcher/conf/opts.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -This is the single point of entry to generate the sample configuration -file for Watcher. It collects all the necessary info from the other modules -in this package. It is assumed that: - -* every other module in this package has a 'list_opts' function which - return a dict where - * the keys are strings which are the group names - * the value of each key is a list of config options for that group -* the watcher.conf package doesn't have further packages with config options -* this module is only used in the context of sample file generation -""" - -import collections -import importlib -import os -import pkgutil - -LIST_OPTS_FUNC_NAME = "list_opts" - - -def _tupleize(dct): - """Take the dict of options and convert to the 2-tuple format.""" - return [(key, val) for key, val in dct.items()] - - -def list_opts(): - """Grouped list of all the Watcher-specific configuration options - - :return: A list of ``(group, [opt_1, opt_2])`` tuple pairs, where ``group`` - is either a group name as a string or an OptGroup object. - """ - opts = collections.defaultdict(list) - module_names = _list_module_names() - imported_modules = _import_modules(module_names) - _append_config_options(imported_modules, opts) - return _tupleize(opts) - - -def _list_module_names(): - module_names = [] - package_path = os.path.dirname(os.path.abspath(__file__)) - for __, modname, ispkg in pkgutil.iter_modules(path=[package_path]): - if modname == "opts" or ispkg: - continue - else: - module_names.append(modname) - return module_names - - -def _import_modules(module_names): - imported_modules = [] - for modname in module_names: - mod = importlib.import_module("watcher.conf." + modname) - if not hasattr(mod, LIST_OPTS_FUNC_NAME): - msg = "The module 'watcher.conf.%s' should have a '%s' "\ - "function which returns the config options." % \ - (modname, LIST_OPTS_FUNC_NAME) - raise Exception(msg) - else: - imported_modules.append(mod) - return imported_modules - - -def _process_old_opts(configs): - """Convert old-style 2-tuple configs to dicts.""" - if isinstance(configs, tuple): - configs = [configs] - return {label: options for label, options in configs} - - -def _append_config_options(imported_modules, config_options): - for mod in imported_modules: - configs = mod.list_opts() - # TODO(markus_z): Remove this compatibility shim once all list_opts() - # functions have been updated to return dicts. - if not isinstance(configs, dict): - configs = _process_old_opts(configs) - for key, val in configs.items(): - config_options[key].extend(val) diff --git a/watcher/conf/paths.py b/watcher/conf/paths.py deleted file mode 100644 index a499614..0000000 --- a/watcher/conf/paths.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -import os - -PATH_OPTS = [ - cfg.StrOpt('pybasedir', - default=os.path.abspath(os.path.join(os.path.dirname(__file__), - '../')), - help='Directory where the watcher python module is installed.'), - cfg.StrOpt('bindir', - default='$pybasedir/bin', - help='Directory where watcher binaries are installed.'), - cfg.StrOpt('state_path', - default='$pybasedir', - help="Top-level directory for maintaining watcher's state."), -] - - -def basedir_def(*args): - """Return an uninterpolated path relative to $pybasedir.""" - return os.path.join('$pybasedir', *args) - - -def bindir_def(*args): - """Return an uninterpolated path relative to $bindir.""" - return os.path.join('$bindir', *args) - - -def state_path_def(*args): - """Return an uninterpolated path relative to $state_path.""" - return os.path.join('$state_path', *args) - - -def register_opts(conf): - conf.register_opts(PATH_OPTS) - - -def list_opts(): - return [('DEFAULT', PATH_OPTS)] diff --git a/watcher/conf/planner.py b/watcher/conf/planner.py deleted file mode 100644 index 1386c2f..0000000 --- a/watcher/conf/planner.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -watcher_planner = cfg.OptGroup(name='watcher_planner', - title='Defines the parameters of ' - 'the planner') - -default_planner = 'weight' - -WATCHER_PLANNER_OPTS = { - cfg.StrOpt('planner', - default=default_planner, - required=True, - help='The selected planner used to schedule the actions') -} - - -def register_opts(conf): - conf.register_group(watcher_planner) - conf.register_opts(WATCHER_PLANNER_OPTS, group=watcher_planner) - - -def list_opts(): - return [('watcher_planner', WATCHER_PLANNER_OPTS)] diff --git a/watcher/conf/plugins.py b/watcher/conf/plugins.py deleted file mode 100644 index d770520..0000000 --- a/watcher/conf/plugins.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import prettytable as ptable - -from watcher.applier.loading import default as applier_loader -from watcher.common import utils -from watcher.decision_engine.loading import default as decision_engine_loader - -PLUGIN_LOADERS = ( - applier_loader.DefaultActionLoader, - decision_engine_loader.DefaultPlannerLoader, - decision_engine_loader.DefaultScoringLoader, - decision_engine_loader.DefaultScoringContainerLoader, - decision_engine_loader.DefaultStrategyLoader, - decision_engine_loader.ClusterDataModelCollectorLoader, - applier_loader.DefaultWorkFlowEngineLoader, -) - - -def list_opts(): - """Load config options for all Watcher plugins""" - plugins_opts = [] - for plugin_loader_cls in PLUGIN_LOADERS: - plugin_loader = plugin_loader_cls() - plugins_map = plugin_loader.list_available() - - for plugin_name, plugin_cls in plugins_map.items(): - plugin_opts = plugin_cls.get_config_opts() - if plugin_opts: - plugins_opts.append( - (plugin_loader.get_entry_name(plugin_name), plugin_opts)) - - return plugins_opts - - -def _show_plugins_ascii_table(rows): - headers = ["Namespace", "Plugin name", "Import path"] - table = ptable.PrettyTable(field_names=headers) - for row in rows: - table.add_row(row) - return table.get_string() - - -def show_plugins(): - rows = [] - for plugin_loader_cls in PLUGIN_LOADERS: - plugin_loader = plugin_loader_cls() - plugins_map = plugin_loader.list_available() - - rows += [ - (plugin_loader.get_entry_name(plugin_name), - plugin_name, - utils.get_cls_import_path(plugin_cls)) - for plugin_name, plugin_cls in plugins_map.items()] - - return _show_plugins_ascii_table(rows) diff --git a/watcher/conf/service.py b/watcher/conf/service.py deleted file mode 100644 index 0f18d3a..0000000 --- a/watcher/conf/service.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from oslo_config import cfg - -from watcher._i18n import _ - -SERVICE_OPTS = [ - cfg.IntOpt('periodic_interval', - default=60, - help=_('Seconds between running periodic tasks.')), - cfg.HostAddressOpt('host', - default=socket.gethostname(), - help=_('Name of this node. This can be an opaque ' - 'identifier. It is not necessarily a hostname, ' - 'FQDN, or IP address. However, the node name ' - 'must be valid within an AMQP key, and if using ' - 'ZeroMQ, a valid hostname, FQDN, or IP address.') - ), - cfg.IntOpt('service_down_time', - default=90, - help=_('Maximum time since last check-in for up service.')) -] - - -def register_opts(conf): - conf.register_opts(SERVICE_OPTS) - - -def list_opts(): - return [ - ('DEFAULT', SERVICE_OPTS), - ] diff --git a/watcher/conf/utils.py b/watcher/conf/utils.py deleted file mode 100644 index 7c2981c..0000000 --- a/watcher/conf/utils.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -UTILS_OPTS = [ - cfg.StrOpt('rootwrap_config', - default="/etc/watcher/rootwrap.conf", - help='Path to the rootwrap configuration file to use for ' - 'running commands as root.'), - cfg.StrOpt('tempdir', - help='Explicitly specify the temporary working directory.'), -] - - -def register_opts(conf): - conf.register_opts(UTILS_OPTS) - - -def list_opts(): - return [('DEFAULT', UTILS_OPTS)] diff --git a/watcher/datasource/__init__.py b/watcher/datasource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/datasource/ceilometer.py b/watcher/datasource/ceilometer.py deleted file mode 100644 index a71fb3f..0000000 --- a/watcher/datasource/ceilometer.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -from ceilometerclient import exc -from oslo_utils import timeutils - -from watcher._i18n import _ -from watcher.common import clients -from watcher.common import exception - - -class CeilometerHelper(object): - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.ceilometer = self.osc.ceilometer() - - @staticmethod - def format_query(user_id, tenant_id, resource_id, - user_ids, tenant_ids, resource_ids): - query = [] - - def query_append(query, _id, _ids, field): - if _id: - _ids = [_id] - for x_id in _ids: - query.append({"field": field, "op": "eq", "value": x_id}) - - query_append(query, user_id, (user_ids or []), "user_id") - query_append(query, tenant_id, (tenant_ids or []), "project_id") - query_append(query, resource_id, (resource_ids or []), "resource_id") - - return query - - def _timestamps(self, start_time, end_time): - - def _format_timestamp(_time): - if _time: - if isinstance(_time, datetime.datetime): - return _time.isoformat() - return _time - return None - - start_timestamp = _format_timestamp(start_time) - end_timestamp = _format_timestamp(end_time) - - if ((start_timestamp is not None) and (end_timestamp is not None) and - (timeutils.parse_isotime(start_timestamp) > - timeutils.parse_isotime(end_timestamp))): - raise exception.Invalid( - _("Invalid query: %(start_time)s > %(end_time)s") % dict( - start_time=start_timestamp, end_time=end_timestamp)) - return start_timestamp, end_timestamp - - def build_query(self, user_id=None, tenant_id=None, resource_id=None, - user_ids=None, tenant_ids=None, resource_ids=None, - start_time=None, end_time=None): - """Returns query built from given parameters. - - This query can be then used for querying resources, meters and - statistics. - :param user_id: user_id, has a priority over list of ids - :param tenant_id: tenant_id, has a priority over list of ids - :param resource_id: resource_id, has a priority over list of ids - :param user_ids: list of user_ids - :param tenant_ids: list of tenant_ids - :param resource_ids: list of resource_ids - :param start_time: datetime from which measurements should be collected - :param end_time: datetime until which measurements should be collected - """ - - query = self.format_query(user_id, tenant_id, resource_id, - user_ids, tenant_ids, resource_ids) - - start_timestamp, end_timestamp = self._timestamps(start_time, - end_time) - - if start_timestamp: - query.append({"field": "timestamp", "op": "ge", - "value": start_timestamp}) - if end_timestamp: - query.append({"field": "timestamp", "op": "le", - "value": end_timestamp}) - return query - - def query_retry(self, f, *args, **kargs): - try: - return f(*args, **kargs) - except exc.HTTPUnauthorized: - self.osc.reset_clients() - self.ceilometer = self.osc.ceilometer() - return f(*args, **kargs) - except Exception: - raise - - def query_sample(self, meter_name, query, limit=1): - return self.query_retry(f=self.ceilometer.samples.list, - meter_name=meter_name, - limit=limit, - q=query) - - def statistic_list(self, meter_name, query=None, period=None): - """List of statistics.""" - statistics = self.ceilometer.statistics.list( - meter_name=meter_name, - q=query, - period=period) - return statistics - - def meter_list(self, query=None): - """List the user's meters.""" - meters = self.query_retry(f=self.ceilometer.meters.list, - query=query) - return meters - - def statistic_aggregation(self, - resource_id, - meter_name, - period, - aggregate='avg'): - """Representing a statistic aggregate by operators - - :param resource_id: id of resource to list statistics for. - :param meter_name: Name of meter to list statistics for. - :param period: Period in seconds over which to group samples. - :param aggregate: Available aggregates are: count, cardinality, - min, max, sum, stddev, avg. Defaults to avg. - :return: Return the latest statistical data, None if no data. - """ - - end_time = datetime.datetime.utcnow() - start_time = end_time - datetime.timedelta(seconds=int(period)) - query = self.build_query( - resource_id=resource_id, start_time=start_time, end_time=end_time) - statistic = self.query_retry(f=self.ceilometer.statistics.list, - meter_name=meter_name, - q=query, - period=period, - aggregates=[ - {'func': aggregate}]) - - item_value = None - if statistic: - item_value = statistic[-1]._info.get('aggregate').get(aggregate) - return item_value - - def get_last_sample_values(self, resource_id, meter_name, limit=1): - samples = self.query_sample( - meter_name=meter_name, - query=self.build_query(resource_id=resource_id), - limit=limit) - values = [] - for index, sample in enumerate(samples): - values.append( - {'sample_%s' % index: { - 'timestamp': sample._info['timestamp'], - 'value': sample._info['counter_volume']}}) - return values - - def get_last_sample_value(self, resource_id, meter_name): - samples = self.query_sample( - meter_name=meter_name, - query=self.build_query(resource_id=resource_id)) - if samples: - return samples[-1]._info['counter_volume'] - else: - return False diff --git a/watcher/datasource/gnocchi.py b/watcher/datasource/gnocchi.py deleted file mode 100644 index 539fa34..0000000 --- a/watcher/datasource/gnocchi.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from datetime import datetime -import time - -from oslo_config import cfg -from oslo_log import log - -from watcher.common import clients -from watcher.common import exception - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class GnocchiHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.gnocchi = self.osc.gnocchi() - - def query_retry(self, f, *args, **kwargs): - for i in range(CONF.gnocchi_client.query_max_retries): - try: - return f(*args, **kwargs) - except Exception as e: - LOG.exception(e) - time.sleep(CONF.gnocchi_client.query_timeout) - raise - - def statistic_aggregation(self, - resource_id, - metric, - granularity, - start_time=None, - stop_time=None, - aggregation='mean'): - """Representing a statistic aggregate by operators - - :param metric: metric name of which we want the statistics - :param resource_id: id of resource to list statistics for - :param start_time: Start datetime from which metrics will be used - :param stop_time: End datetime from which metrics will be used - :param granularity: frequency of marking metric point, in seconds - :param aggregation: Should be chosen in accordance with policy - aggregations - :return: value of aggregated metric - """ - - if start_time is not None and not isinstance(start_time, datetime): - raise exception.InvalidParameter(parameter='start_time', - parameter_type=datetime) - - if stop_time is not None and not isinstance(stop_time, datetime): - raise exception.InvalidParameter(parameter='stop_time', - parameter_type=datetime) - - raw_kwargs = dict( - metric=metric, - start=start_time, - stop=stop_time, - resource_id=resource_id, - granularity=granularity, - aggregation=aggregation, - ) - - kwargs = {k: v for k, v in raw_kwargs.items() if k and v} - - statistics = self.query_retry( - f=self.gnocchi.metric.get_measures, **kwargs) - - if statistics: - # return value of latest measure - # measure has structure [time, granularity, value] - return statistics[-1][2] diff --git a/watcher/datasource/monasca.py b/watcher/datasource/monasca.py deleted file mode 100644 index a85d06f..0000000 --- a/watcher/datasource/monasca.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -from monascaclient import exc - -from watcher.common import clients - - -class MonascaHelper(object): - - def __init__(self, osc=None): - """:param osc: an OpenStackClients instance""" - self.osc = osc if osc else clients.OpenStackClients() - self.monasca = self.osc.monasca() - - def query_retry(self, f, *args, **kwargs): - try: - return f(*args, **kwargs) - except exc.HTTPUnauthorized: - self.osc.reset_clients() - self.monasca = self.osc.monasca() - return f(*args, **kwargs) - except Exception: - raise - - def _format_time_params(self, start_time, end_time, period): - """Format time-related params to the correct Monasca format - - :param start_time: Start datetime from which metrics will be used - :param end_time: End datetime from which metrics will be used - :param period: interval in seconds (int) - :return: start ISO time, end ISO time, period - """ - - if not period: - period = int(datetime.timedelta(hours=3).total_seconds()) - if not start_time: - start_time = ( - datetime.datetime.utcnow() - - datetime.timedelta(seconds=period)) - - start_timestamp = None if not start_time else start_time.isoformat() - end_timestamp = None if not end_time else end_time.isoformat() - - return start_timestamp, end_timestamp, period - - def statistics_list(self, meter_name, dimensions, start_time=None, - end_time=None, period=None,): - """List of statistics.""" - start_timestamp, end_timestamp, period = self._format_time_params( - start_time, end_time, period - ) - raw_kwargs = dict( - name=meter_name, - start_time=start_timestamp, - end_time=end_timestamp, - dimensions=dimensions, - ) - - kwargs = {k: v for k, v in raw_kwargs.items() if k and v} - - statistics = self.query_retry( - f=self.monasca.metrics.list_measurements, **kwargs) - - return statistics - - def statistic_aggregation(self, - meter_name, - dimensions, - start_time=None, - end_time=None, - period=None, - aggregate='avg', - group_by='*'): - """Representing a statistic aggregate by operators - - :param meter_name: meter names of which we want the statistics - :param dimensions: dimensions (dict) - :param start_time: Start datetime from which metrics will be used - :param end_time: End datetime from which metrics will be used - :param period: Sampling `period`: In seconds. If no period is given, - only one aggregate statistic is returned. If given, a - faceted result will be returned, divided into given - periods. Periods with no data are ignored. - :param aggregate: Should be either 'avg', 'count', 'min' or 'max' - :return: A list of dict with each dict being a distinct result row - """ - start_timestamp, end_timestamp, period = self._format_time_params( - start_time, end_time, period - ) - - raw_kwargs = dict( - name=meter_name, - start_time=start_timestamp, - end_time=end_timestamp, - dimensions=dimensions, - period=period, - statistics=aggregate, - group_by=group_by, - ) - - kwargs = {k: v for k, v in raw_kwargs.items() if k and v} - - statistics = self.query_retry( - f=self.monasca.metrics.list_statistics, **kwargs) - - return statistics diff --git a/watcher/db/__init__.py b/watcher/db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/db/api.py b/watcher/db/api.py deleted file mode 100644 index eb07493..0000000 --- a/watcher/db/api.py +++ /dev/null @@ -1,871 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Base classes for storage engines -""" - -import abc -from oslo_config import cfg -from oslo_db import api as db_api -import six - -_BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'} -IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, - lazy=True) - - -def get_instance(): - """Return a DB API instance.""" - return IMPL - - -@six.add_metaclass(abc.ABCMeta) -class BaseConnection(object): - """Base class for storage system connections.""" - - @abc.abstractmethod - def get_goal_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching goals. - - Return a list of the specified columns for all goals that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of goals to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_goal(self, values): - """Create a new goal. - - :param values: A dict containing several items used to identify - and track the goal. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'DUMMY', - 'display_name': 'Dummy', - } - :returns: A goal - :raises: :py:class:`~.GoalAlreadyExists` - """ - - @abc.abstractmethod - def get_goal_by_id(self, context, goal_id, eager=False): - """Return a goal given its ID. - - :param context: The security context - :param goal_id: The ID of a goal - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def get_goal_by_uuid(self, context, goal_uuid, eager=False): - """Return a goal given its UUID. - - :param context: The security context - :param goal_uuid: The UUID of a goal - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def get_goal_by_name(self, context, goal_name, eager=False): - """Return a goal given its name. - - :param context: The security context - :param goal_name: The name of a goal - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def destroy_goal(self, goal_uuid): - """Destroy a goal. - - :param goal_uuid: The UUID of a goal - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def update_goal(self, goal_uuid, values): - """Update properties of a goal. - - :param goal_uuid: The UUID of a goal - :param values: A dict containing several items used to identify - and track the goal. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'DUMMY', - 'display_name': 'Dummy', - } - :returns: A goal - :raises: :py:class:`~.GoalNotFound` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_goal(self, goal_id): - """Soft delete a goal. - - :param goal_id: The id or uuid of a goal. - :raises: :py:class:`~.GoalNotFound` - """ - - @abc.abstractmethod - def get_strategy_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=True): - """Get specific columns for matching strategies. - - Return a list of the specified columns for all strategies that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of strategies to return. - :param marker: The last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: Direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_strategy(self, values): - """Create a new strategy. - - :param values: A dict containing items used to identify - and track the strategy. For example: - - :: - - { - 'id': 1, - 'uuid': utils.generate_uuid(), - 'name': 'my_strategy', - 'display_name': 'My strategy', - 'goal_uuid': utils.generate_uuid(), - } - :returns: A strategy - :raises: :py:class:`~.StrategyAlreadyExists` - """ - - @abc.abstractmethod - def get_strategy_by_id(self, context, strategy_id, eager=False): - """Return a strategy given its ID. - - :param context: The security context - :param strategy_id: The ID of a strategy - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): - """Return a strategy given its UUID. - - :param context: The security context - :param strategy_uuid: The UUID of a strategy - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def get_strategy_by_name(self, context, strategy_name, eager=False): - """Return a strategy given its name. - - :param context: The security context - :param strategy_name: The name of a strategy - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def destroy_strategy(self, strategy_uuid): - """Destroy a strategy. - - :param strategy_uuid: The UUID of a strategy - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def update_strategy(self, strategy_uuid, values): - """Update properties of a strategy. - - :param strategy_uuid: The UUID of a strategy - :returns: A strategy - :raises: :py:class:`~.StrategyNotFound` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_strategy(self, strategy_id): - """Soft delete a strategy. - - :param strategy_id: The id or uuid of a strategy. - :raises: :py:class:`~.StrategyNotFound` - """ - - @abc.abstractmethod - def get_audit_template_list(self, context, filters=None, - limit=None, marker=None, sort_key=None, - sort_dir=None, eager=False): - """Get specific columns for matching audit templates. - - Return a list of the specified columns for all audit templates that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of audit templates to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_audit_template(self, values): - """Create a new audit template. - - :param values: A dict containing several items used to identify - and track the audit template. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'example', - 'description': 'free text description' - 'goal': 'DUMMY' - } - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateAlreadyExists` - """ - - @abc.abstractmethod - def get_audit_template_by_id(self, context, audit_template_id, - eager=False): - """Return an audit template. - - :param context: The security context - :param audit_template_id: The id of an audit template. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def get_audit_template_by_uuid(self, context, audit_template_uuid, - eager=False): - """Return an audit template. - - :param context: The security context - :param audit_template_uuid: The uuid of an audit template. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - def get_audit_template_by_name(self, context, audit_template_name, - eager=False): - """Return an audit template. - - :param context: The security context - :param audit_template_name: The name of an audit template. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def destroy_audit_template(self, audit_template_id): - """Destroy an audit template. - - :param audit_template_id: The id or uuid of an audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def update_audit_template(self, audit_template_id, values): - """Update properties of an audit template. - - :param audit_template_id: The id or uuid of an audit template. - :returns: An audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def soft_delete_audit_template(self, audit_template_id): - """Soft delete an audit template. - - :param audit_template_id: The id or uuid of an audit template. - :raises: :py:class:`~.AuditTemplateNotFound` - """ - - @abc.abstractmethod - def get_audit_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching audits. - - Return a list of the specified columns for all audits that match the - specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of audits to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_audit(self, values): - """Create a new audit. - - :param values: A dict containing several items used to identify - and track the audit, and several dicts which are passed - into the Drivers when managing this audit. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'type': 'ONESHOT', - } - :returns: An audit. - :raises: :py:class:`~.AuditAlreadyExists` - """ - - @abc.abstractmethod - def get_audit_by_id(self, context, audit_id, eager=False): - """Return an audit. - - :param context: The security context - :param audit_id: The id of an audit. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def get_audit_by_uuid(self, context, audit_uuid, eager=False): - """Return an audit. - - :param context: The security context - :param audit_uuid: The uuid of an audit. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def destroy_audit(self, audit_id): - """Destroy an audit and all associated action plans. - - :param audit_id: The id or uuid of an audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def update_audit(self, audit_id, values): - """Update properties of an audit. - - :param audit_id: The id or uuid of an audit. - :returns: An audit. - :raises: :py:class:`~.AuditNotFound` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_audit(self, audit_id): - """Soft delete an audit and all associated action plans. - - :param audit_id: The id or uuid of an audit. - :raises: :py:class:`~.AuditNotFound` - """ - - @abc.abstractmethod - def get_action_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - """Get specific columns for matching actions. - - Return a list of the specified columns for all actions that match the - specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of actions to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_action(self, values): - """Create a new action. - - :param values: A dict containing several items used to identify - and track the action, and several dicts which are passed - into the Drivers when managing this action. For example: - - :: - - { - 'uuid': utils.generate_uuid(), - 'name': 'example', - 'description': 'free text description' - 'aggregate': 'nova aggregate name or uuid' - } - :returns: A action. - :raises: :py:class:`~.ActionAlreadyExists` - """ - - @abc.abstractmethod - def get_action_by_id(self, context, action_id, eager=False): - """Return a action. - - :param context: The security context - :param action_id: The id of a action. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A action. - :raises: :py:class:`~.ActionNotFound` - """ - - @abc.abstractmethod - def get_action_by_uuid(self, context, action_uuid, eager=False): - """Return a action. - - :param context: The security context - :param action_uuid: The uuid of a action. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A action. - :raises: :py:class:`~.ActionNotFound` - """ - - @abc.abstractmethod - def destroy_action(self, action_id): - """Destroy a action and all associated interfaces. - - :param action_id: The id or uuid of a action. - :raises: :py:class:`~.ActionNotFound` - :raises: :py:class:`~.ActionReferenced` - """ - - @abc.abstractmethod - def update_action(self, action_id, values): - """Update properties of a action. - - :param action_id: The id or uuid of a action. - :returns: A action. - :raises: :py:class:`~.ActionNotFound` - :raises: :py:class:`~.ActionReferenced` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_action(self, action_id): - """Soft delete an action. - - :param action_id: The id or uuid of an action. - :raises: :py:class:`~.ActionNotFound` - """ - - @abc.abstractmethod - def get_action_plan_list( - self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching action plans. - - Return a list of the specified columns for all action plans that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of audits to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_action_plan(self, values): - """Create a new action plan. - - :param values: A dict containing several items used to identify - and track the action plan. - :returns: An action plan. - :raises: :py:class:`~.ActionPlanAlreadyExists` - """ - - @abc.abstractmethod - def get_action_plan_by_id(self, context, action_plan_id, eager=False): - """Return an action plan. - - :param context: The security context - :param action_plan_id: The id of an action plan. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An action plan. - :raises: :py:class:`~.ActionPlanNotFound` - """ - - @abc.abstractmethod - def get_action_plan_by_uuid(self, context, action_plan__uuid, eager=False): - """Return a action plan. - - :param context: The security context - :param action_plan__uuid: The uuid of an action plan. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An action plan. - :raises: :py:class:`~.ActionPlanNotFound` - """ - - @abc.abstractmethod - def destroy_action_plan(self, action_plan_id): - """Destroy an action plan and all associated interfaces. - - :param action_plan_id: The id or uuid of a action plan. - :raises: :py:class:`~.ActionPlanNotFound` - :raises: :py:class:`~.ActionPlanReferenced` - """ - - @abc.abstractmethod - def update_action_plan(self, action_plan_id, values): - """Update properties of an action plan. - - :param action_plan_id: The id or uuid of an action plan. - :returns: An action plan. - :raises: :py:class:`~.ActionPlanNotFound` - :raises: :py:class:`~.ActionPlanReferenced` - :raises: :py:class:`~.Invalid` - """ - - def soft_delete_action_plan(self, action_plan_id): - """Soft delete an action plan. - - :param action_plan_id: The id or uuid of an action plan. - :raises: :py:class:`~.ActionPlanNotFound` - """ - - @abc.abstractmethod - def get_efficacy_indicator_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - """Get specific columns for matching efficacy indicators. - - Return a list of the specified columns for all efficacy indicators that - match the specified filters. - - :param context: The security context - :param columns: List of column names to return. - Defaults to 'id' column when columns == None. - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of efficacy indicators to return. - :param marker: The last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: Direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_efficacy_indicator(self, values): - """Create a new efficacy indicator. - - :param values: A dict containing items used to identify - and track the efficacy indicator. For example: - - :: - - { - 'id': 1, - 'uuid': utils.generate_uuid(), - 'name': 'my_efficacy_indicator', - 'display_name': 'My efficacy indicator', - 'goal_uuid': utils.generate_uuid(), - } - :returns: An efficacy_indicator - :raises: :py:class:`~.EfficacyIndicatorAlreadyExists` - """ - - @abc.abstractmethod - def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, - eager=False): - """Return an efficacy indicator given its ID. - - :param context: The security context - :param efficacy_indicator_id: The ID of an efficacy indicator - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, - eager=False): - """Return an efficacy indicator given its UUID. - - :param context: The security context - :param efficacy_indicator_uuid: The UUID of an efficacy indicator - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, - eager=False): - """Return an efficacy indicator given its name. - - :param context: The security context - :param efficacy_indicator_name: The name of an efficacy indicator - :param eager: If True, also loads One-to-X data (Default: False) - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def destroy_efficacy_indicator(self, efficacy_indicator_uuid): - """Destroy an efficacy indicator. - - :param efficacy_indicator_uuid: The UUID of an efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - """ - - @abc.abstractmethod - def update_efficacy_indicator(self, efficacy_indicator_id, values): - """Update properties of an efficacy indicator. - - :param efficacy_indicator_id: The ID of an efficacy indicator - :returns: An efficacy indicator - :raises: :py:class:`~.EfficacyIndicatorNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def get_scoring_engine_list( - self, context, columns=None, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching scoring engines. - - Return a list of the specified columns for all scoring engines that - match the specified filters. - - :param context: The security context - :param columns: List of column names to return. - Defaults to 'id' column when columns == None. - :param filters: Filters to apply. Defaults to None. - :param limit: Maximum number of scoring engines to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_scoring_engine(self, values): - """Create a new scoring engine. - - :param values: A dict containing several items used to identify - and track the scoring engine. - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineAlreadyExists` - """ - - @abc.abstractmethod - def get_scoring_engine_by_id(self, context, scoring_engine_id, - eager=False): - """Return a scoring engine by its id. - - :param context: The security context - :param scoring_engine_id: The id of a scoring engine. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, - eager=False): - """Return a scoring engine by its uuid. - - :param context: The security context - :param scoring_engine_uuid: The uuid of a scoring engine. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def get_scoring_engine_by_name(self, context, scoring_engine_name, - eager=False): - """Return a scoring engine by its name. - - :param context: The security context - :param scoring_engine_name: The name of a scoring engine. - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def destroy_scoring_engine(self, scoring_engine_id): - """Destroy a scoring engine. - - :param scoring_engine_id: The id of a scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - """ - - @abc.abstractmethod - def update_scoring_engine(self, scoring_engine_id, values): - """Update properties of a scoring engine. - - :param scoring_engine_id: The id of a scoring engine. - :returns: A scoring engine. - :raises: :py:class:`~.ScoringEngineNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def get_service_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - """Get specific columns for matching services. - - Return a list of the specified columns for all services that - match the specified filters. - - :param context: The security context - :param filters: Filters to apply. Defaults to None. - - :param limit: Maximum number of services to return. - :param marker: The last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: Direction in which results should be sorted. - (asc, desc) - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A list of tuples of the specified columns. - """ - - @abc.abstractmethod - def create_service(self, values): - """Create a new service. - - :param values: A dict containing items used to identify - and track the service. For example: - - :: - - { - 'id': 1, - 'name': 'watcher-api', - 'status': 'ACTIVE', - 'host': 'controller' - } - :returns: A service - :raises: :py:class:`~.ServiceAlreadyExists` - """ - - @abc.abstractmethod - def get_service_by_id(self, context, service_id, eager=False): - """Return a service given its ID. - - :param context: The security context - :param service_id: The ID of a service - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A service - :raises: :py:class:`~.ServiceNotFound` - """ - - @abc.abstractmethod - def get_service_by_name(self, context, service_name, eager=False): - """Return a service given its name. - - :param context: The security context - :param service_name: The name of a service - :param eager: If True, also loads One-to-X data (Default: False) - :returns: A service - :raises: :py:class:`~.ServiceNotFound` - """ - - @abc.abstractmethod - def destroy_service(self, service_id): - """Destroy a service. - - :param service_id: The ID of a service - :raises: :py:class:`~.ServiceNotFound` - """ - - @abc.abstractmethod - def update_service(self, service_id, values): - """Update properties of a service. - - :param service_id: The ID of a service - :returns: A service - :raises: :py:class:`~.ServiceyNotFound` - :raises: :py:class:`~.Invalid` - """ - - @abc.abstractmethod - def soft_delete_service(self, service_id): - """Soft delete a service. - - :param service_id: The id of a service. - :returns: A service. - :raises: :py:class:`~.ServiceNotFound` - """ diff --git a/watcher/db/migration.py b/watcher/db/migration.py deleted file mode 100644 index 1d65aa8..0000000 --- a/watcher/db/migration.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -from oslo_config import cfg -from stevedore import driver - -_IMPL = None - - -def get_backend(): - global _IMPL - if not _IMPL: - cfg.CONF.import_opt('backend', 'oslo_db.options', group='database') - _IMPL = driver.DriverManager("watcher.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def upgrade(version=None): - """Migrate the database to `version` or the most recent version.""" - return get_backend().upgrade(version) - - -def downgrade(version=None): - return get_backend().downgrade(version) - - -def version(): - return get_backend().version() - - -def stamp(version): - return get_backend().stamp(version) - - -def revision(message, autogenerate): - return get_backend().revision(message, autogenerate) - - -def create_schema(): - return get_backend().create_schema() diff --git a/watcher/db/purge.py b/watcher/db/purge.py deleted file mode 100644 index 4fb6e5e..0000000 --- a/watcher/db/purge.py +++ /dev/null @@ -1,476 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import print_function - -import collections -import datetime -import itertools -import sys - -from oslo_log import log -from oslo_utils import strutils -import prettytable as ptable -from six.moves import input - -from watcher._i18n import _ -from watcher._i18n import lazy_translation_enabled -from watcher.common import context -from watcher.common import exception -from watcher.common import utils -from watcher import objects - -LOG = log.getLogger(__name__) - - -class WatcherObjectsMap(object): - """Wrapper to deal with watcher objects per type - - This wrapper object contains a list of watcher objects per type. - Its main use is to simplify the merge of watcher objects by avoiding - duplicates, but also for representing the relationships between these - objects. - """ - - # This is for generating the .pot translations - keymap = collections.OrderedDict([ - ("goals", _("Goals")), - ("strategies", _("Strategies")), - ("audit_templates", _("Audit Templates")), - ("audits", _("Audits")), - ("action_plans", _("Action Plans")), - ("actions", _("Actions")), - ]) - - def __init__(self): - for attr_name in self.keys(): - setattr(self, attr_name, []) - - def values(self): - return (getattr(self, key) for key in self.keys()) - - @classmethod - def keys(cls): - return cls.keymap.keys() - - def __iter__(self): - return itertools.chain(*self.values()) - - def __add__(self, other): - new_map = self.__class__() - - # Merge the 2 items dicts into a new object (and avoid dupes) - for attr_name, initials, others in zip(self.keys(), self.values(), - other.values()): - # Creates a copy - merged = initials[:] - initials_ids = [item.id for item in initials] - non_dupes = [item for item in others - if item.id not in initials_ids] - merged += non_dupes - - setattr(new_map, attr_name, merged) - - return new_map - - def __str__(self): - out = "" - for key, vals in zip(self.keys(), self.values()): - ids = [val.id for val in vals] - out += "%(key)s: %(val)s" % (dict(key=key, val=ids)) - out += "\n" - return out - - def __len__(self): - return sum(len(getattr(self, key)) for key in self.keys()) - - def get_count_table(self): - headers = list(self.keymap.values()) - headers.append(_("Total")) # We also add a total count - translated_headers = [ - h.translate() if lazy_translation_enabled() else h - for h in headers - ] - - counters = [len(cat_vals) for cat_vals in self.values()] + [len(self)] - table = ptable.PrettyTable(field_names=translated_headers) - table.add_row(counters) - return table.get_string() - - -class PurgeCommand(object): - """Purges the DB by removing soft deleted entries - - The workflow for this purge is the following: - - # Find soft deleted objects which are expired - # Find orphan objects - # Find their related objects whether they are expired or not - # Merge them together - # If it does not exceed the limit, destroy them all - """ - - ctx = context.make_context(show_deleted=True) - - def __init__(self, age_in_days=None, max_number=None, - uuid=None, exclude_orphans=False, dry_run=None): - self.age_in_days = age_in_days - self.max_number = max_number - self.uuid = uuid - self.exclude_orphans = exclude_orphans - self.dry_run = dry_run - - self._delete_up_to_max = None - self._objects_map = WatcherObjectsMap() - - def get_expiry_date(self): - if not self.age_in_days: - return None - today = datetime.datetime.today() - expiry_date = today - datetime.timedelta(days=self.age_in_days) - return expiry_date - - @classmethod - def get_goal_uuid(cls, uuid_or_name): - if uuid_or_name is None: - return - - query_func = None - if not utils.is_uuid_like(uuid_or_name): - query_func = objects.Goal.get_by_name - else: - query_func = objects.Goal.get_by_uuid - - try: - goal = query_func(cls.ctx, uuid_or_name) - except Exception as exc: - LOG.exception(exc) - raise exception.GoalNotFound(goal=uuid_or_name) - - if not goal.deleted_at: - raise exception.NotSoftDeletedStateError( - name=_('Goal'), id=uuid_or_name) - - return goal.uuid - - def _find_goals(self, filters=None): - return objects.Goal.list(self.ctx, filters=filters) - - def _find_strategies(self, filters=None): - return objects.Strategy.list(self.ctx, filters=filters) - - def _find_audit_templates(self, filters=None): - return objects.AuditTemplate.list(self.ctx, filters=filters) - - def _find_audits(self, filters=None): - return objects.Audit.list(self.ctx, filters=filters) - - def _find_action_plans(self, filters=None): - return objects.ActionPlan.list(self.ctx, filters=filters) - - def _find_actions(self, filters=None): - return objects.Action.list(self.ctx, filters=filters) - - def _find_orphans(self): - orphans = WatcherObjectsMap() - - filters = dict(deleted=False) - goals = objects.Goal.list(self.ctx, filters=filters) - strategies = objects.Strategy.list(self.ctx, filters=filters) - audit_templates = objects.AuditTemplate.list(self.ctx, filters=filters) - audits = objects.Audit.list(self.ctx, filters=filters) - action_plans = objects.ActionPlan.list(self.ctx, filters=filters) - actions = objects.Action.list(self.ctx, filters=filters) - - goal_ids = set(g.id for g in goals) - orphans.strategies = [ - strategy for strategy in strategies - if strategy.goal_id not in goal_ids] - - strategy_ids = [s.id for s in (s for s in strategies - if s not in orphans.strategies)] - orphans.audit_templates = [ - audit_template for audit_template in audit_templates - if audit_template.goal_id not in goal_ids or - (audit_template.strategy_id and - audit_template.strategy_id not in strategy_ids)] - - orphans.audits = [ - audit for audit in audits - if audit.goal_id not in goal_ids or - (audit.strategy_id and - audit.strategy_id not in strategy_ids)] - - # Objects with orphan parents are themselves orphans - audit_ids = [audit.id for audit in audits - if audit not in orphans.audits] - orphans.action_plans = [ - ap for ap in action_plans - if ap.audit_id not in audit_ids or - ap.strategy_id not in strategy_ids] - - # Objects with orphan parents are themselves orphans - action_plan_ids = [ap.id for ap in action_plans - if ap not in orphans.action_plans] - orphans.actions = [ - action for action in actions - if action.action_plan_id not in action_plan_ids] - - LOG.debug("Orphans found:\n%s", orphans) - LOG.info("Orphans found:\n%s", orphans.get_count_table()) - - return orphans - - def _find_soft_deleted_objects(self): - to_be_deleted = WatcherObjectsMap() - expiry_date = self.get_expiry_date() - filters = dict(deleted=True) - - if self.uuid: - filters["uuid"] = self.uuid - if expiry_date: - filters.update(dict(deleted_at__lt=expiry_date)) - - to_be_deleted.goals.extend(self._find_goals(filters)) - to_be_deleted.strategies.extend(self._find_strategies(filters)) - to_be_deleted.audit_templates.extend( - self._find_audit_templates(filters)) - to_be_deleted.audits.extend(self._find_audits(filters)) - to_be_deleted.action_plans.extend( - self._find_action_plans(filters)) - to_be_deleted.actions.extend(self._find_actions(filters)) - - soft_deleted_objs = self._find_related_objects( - to_be_deleted, base_filters=dict(deleted=True)) - - LOG.debug("Soft deleted objects:\n%s", soft_deleted_objs) - - return soft_deleted_objs - - def _find_related_objects(self, objects_map, base_filters=None): - base_filters = base_filters or {} - - for goal in objects_map.goals: - filters = {} - filters.update(base_filters) - filters.update(dict(goal_id=goal.id)) - related_objs = WatcherObjectsMap() - related_objs.strategies = self._find_strategies(filters) - related_objs.audit_templates = self._find_audit_templates(filters) - related_objs.audits = self._find_audits(filters) - objects_map += related_objs - - for strategy in objects_map.strategies: - filters = {} - filters.update(base_filters) - filters.update(dict(strategy_id=strategy.id)) - related_objs = WatcherObjectsMap() - related_objs.audit_templates = self._find_audit_templates(filters) - related_objs.audits = self._find_audits(filters) - objects_map += related_objs - - for audit in objects_map.audits: - filters = {} - filters.update(base_filters) - filters.update(dict(audit_id=audit.id)) - related_objs = WatcherObjectsMap() - related_objs.action_plans = self._find_action_plans(filters) - objects_map += related_objs - - for action_plan in objects_map.action_plans: - filters = {} - filters.update(base_filters) - filters.update(dict(action_plan_id=action_plan.id)) - related_objs = WatcherObjectsMap() - related_objs.actions = self._find_actions(filters) - objects_map += related_objs - - return objects_map - - def confirmation_prompt(self): - print(self._objects_map.get_count_table()) - raw_val = input( - _("There are %(count)d objects set for deletion. " - "Continue? [y/N]") % dict(count=len(self._objects_map))) - - return strutils.bool_from_string(raw_val) - - def delete_up_to_max_prompt(self, objects_map): - print(objects_map.get_count_table()) - print(_("The number of objects (%(num)s) to delete from the database " - "exceeds the maximum number of objects (%(max_number)s) " - "specified.") % dict(max_number=self.max_number, - num=len(objects_map))) - raw_val = input( - _("Do you want to delete objects up to the specified maximum " - "number? [y/N]")) - - self._delete_up_to_max = strutils.bool_from_string(raw_val) - - return self._delete_up_to_max - - def _aggregate_objects(self): - """Objects aggregated on a 'per goal' basis""" - # todo: aggregate orphans as well - aggregate = [] - for goal in self._objects_map.goals: - related_objs = WatcherObjectsMap() - - # goals - related_objs.goals = [goal] - - # strategies - goal_ids = [goal.id] - related_objs.strategies = [ - strategy for strategy in self._objects_map.strategies - if strategy.goal_id in goal_ids - ] - - # audit templates - strategy_ids = [ - strategy.id for strategy in related_objs.strategies] - related_objs.audit_templates = [ - at for at in self._objects_map.audit_templates - if at.goal_id in goal_ids or - (at.strategy_id and at.strategy_id in strategy_ids) - ] - - # audits - related_objs.audits = [ - audit for audit in self._objects_map.audits - if audit.goal_id in goal_ids - ] - - # action plans - audit_ids = [audit.id for audit in related_objs.audits] - related_objs.action_plans = [ - action_plan for action_plan in self._objects_map.action_plans - if action_plan.audit_id in audit_ids - ] - - # actions - action_plan_ids = [ - action_plan.id for action_plan in related_objs.action_plans - ] - related_objs.actions = [ - action for action in self._objects_map.actions - if action.action_plan_id in action_plan_ids - ] - aggregate.append(related_objs) - - return aggregate - - def _get_objects_up_to_limit(self): - aggregated_objects = self._aggregate_objects() - to_be_deleted_subset = WatcherObjectsMap() - - for aggregate in aggregated_objects: - if len(aggregate) + len(to_be_deleted_subset) <= self.max_number: - to_be_deleted_subset += aggregate - else: - break - - LOG.debug(to_be_deleted_subset) - return to_be_deleted_subset - - def find_objects_to_delete(self): - """Finds all the objects to be purged - - :returns: A mapping with all the Watcher objects to purged - :rtype: :py:class:`~.WatcherObjectsMap` instance - """ - to_be_deleted = self._find_soft_deleted_objects() - - if not self.exclude_orphans: - to_be_deleted += self._find_orphans() - - LOG.debug("Objects to be deleted:\n%s", to_be_deleted) - - return to_be_deleted - - def do_delete(self): - LOG.info("Deleting...") - # Reversed to avoid errors with foreign keys - for entry in reversed(list(self._objects_map)): - entry.destroy() - - def execute(self): - LOG.info("Starting purge command") - self._objects_map = self.find_objects_to_delete() - - if (self.max_number is not None and - len(self._objects_map) > self.max_number): - if self.delete_up_to_max_prompt(self._objects_map): - self._objects_map = self._get_objects_up_to_limit() - else: - return - - _orphans_note = (_(" (orphans excluded)") if self.exclude_orphans - else _(" (may include orphans)")) - if not self.dry_run and self.confirmation_prompt(): - self.do_delete() - print(_("Purge results summary%s:") % _orphans_note) - LOG.info("Purge results summary%s:", _orphans_note) - else: - LOG.debug(self._objects_map) - print(_("Here below is a table containing the objects " - "that can be purged%s:") % _orphans_note) - - LOG.info("\n%s", self._objects_map.get_count_table()) - print(self._objects_map.get_count_table()) - LOG.info("Purge process completed") - - -def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): - """Removes soft deleted objects from the database - - :param age_in_days: Number of days since deletion (from today) - to exclude from the purge. If None, everything will be purged. - :type age_in_days: int - :param max_number: Max number of objects expected to be deleted. - Prevents the deletion if exceeded. No limit if set to None. - :type max_number: int - :param goal: UUID or name of the goal to purge. - :type goal: str - :param exclude_orphans: Flag to indicate whether or not you want to - exclude orphans from deletion (default: False). - :type exclude_orphans: bool - :param dry_run: Flag to indicate whether or not you want to perform - a dry run (no deletion). - :type dry_run: bool - """ - try: - if max_number and max_number < 0: - raise exception.NegativeLimitError - - LOG.info("[options] age_in_days = %s", age_in_days) - LOG.info("[options] max_number = %s", max_number) - LOG.info("[options] goal = %s", goal) - LOG.info("[options] exclude_orphans = %s", exclude_orphans) - LOG.info("[options] dry_run = %s", dry_run) - - uuid = PurgeCommand.get_goal_uuid(goal) - - cmd = PurgeCommand(age_in_days, max_number, uuid, - exclude_orphans, dry_run) - - cmd.execute() - - except Exception as exc: - LOG.exception(exc) - print(exc) - sys.exit(1) diff --git a/watcher/db/sqlalchemy/__init__.py b/watcher/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/db/sqlalchemy/alembic.ini b/watcher/db/sqlalchemy/alembic.ini deleted file mode 100644 index a768980..0000000 --- a/watcher/db/sqlalchemy/alembic.ini +++ /dev/null @@ -1,54 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/alembic - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -#sqlalchemy.url = driver://user:pass@localhost/dbname - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/watcher/db/sqlalchemy/alembic/README.rst b/watcher/db/sqlalchemy/alembic/README.rst deleted file mode 100644 index 1faab71..0000000 --- a/watcher/db/sqlalchemy/alembic/README.rst +++ /dev/null @@ -1,62 +0,0 @@ -The migrations in the alembic/versions contain the changes needed to migrate -from older Watcher releases to newer versions. A migration occurs by executing -a script that details the changes needed to upgrade/downgrade the database. The -migration scripts are ordered so that multiple scripts can run sequentially to -update the database. The scripts are executed by Watcher's migration wrapper -which uses the Alembic library to manage the migration. Watcher supports -migration from Ocata or later. - - -If you are a deployer or developer and want to migrate from Ocata to later -release you must first add version tracking to the database:: - - $ watcher-db-manage --config-file /path/to/watcher.conf stamp ocata - - -You can upgrade to the latest database version via:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head - - -To check the current database version:: - - $ watcher-db-manage --config-file /path/to/watcher.conf current - - -To create a script to run the migration offline:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head --sql - - -To run the offline migration between specific migration versions:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade \ - : --sql - - -Upgrade the database incrementally:: - - $ watcher-db-manage --config-file /path/to/watcher.conf upgrade --revision \ - <# of revs> - - -Downgrade the database by a certain number of revisions:: - - $ watcher-db-manage --config-file /path/to/watcher.conf downgrade --revision \ - <# of revs> - - -Create new revision:: - - $ watcher-db-manage --config-file /path/to/watcher.conf revision \ - -m "description of revision" --autogenerate - - -Create a blank file:: - - $ watcher-db-manage --config-file /path/to/watcher.conf revision \ - -m "description of revision" - -Please see https://alembic.readthedocs.org/en/latest/index.html for general -documentation - diff --git a/watcher/db/sqlalchemy/alembic/env.py b/watcher/db/sqlalchemy/alembic/env.py deleted file mode 100644 index 474b1ca..0000000 --- a/watcher/db/sqlalchemy/alembic/env.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from logging import config as log_config - -from alembic import context - -from watcher.db.sqlalchemy import api as sqla_api -from watcher.db.sqlalchemy import models - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -log_config.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -target_metadata = models.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - engine = sqla_api.get_engine() - with engine.connect() as connection: - context.configure(connection=connection, - target_metadata=target_metadata) - with context.begin_transaction(): - context.run_migrations() - - -run_migrations_online() diff --git a/watcher/db/sqlalchemy/alembic/script.py.mako b/watcher/db/sqlalchemy/alembic/script.py.mako deleted file mode 100644 index 9570201..0000000 --- a/watcher/db/sqlalchemy/alembic/script.py.mako +++ /dev/null @@ -1,22 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/watcher/db/sqlalchemy/alembic/versions/001_ocata.py b/watcher/db/sqlalchemy/alembic/versions/001_ocata.py deleted file mode 100644 index 22d7220..0000000 --- a/watcher/db/sqlalchemy/alembic/versions/001_ocata.py +++ /dev/null @@ -1,203 +0,0 @@ -"""ocata release - -Revision ID: 9894235b4278 -Revises: None -Create Date: 2017-02-01 09:40:05.065981 - -""" -from alembic import op -import oslo_db -import sqlalchemy as sa -from watcher.db.sqlalchemy import models - - -# revision identifiers, used by Alembic. -revision = '001' -down_revision = None - - -def upgrade(): - op.create_table( - 'goals', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=False), - sa.Column('display_name', sa.String(length=63), nullable=False), - sa.Column('efficacy_specification', models.JSONEncodedList(), - nullable=False), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', name='uniq_goals0name'), - sa.UniqueConstraint('uuid', name='uniq_goals0uuid') - ) - - op.create_table( - 'scoring_engines', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=63), nullable=False), - sa.Column('description', sa.String(length=255), nullable=True), - sa.Column('metainfo', sa.Text(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', - name='uniq_scoring_engines0name'), - sa.UniqueConstraint('uuid', name='uniq_scoring_engines0uuid') - ) - - op.create_table( - 'services', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('last_seen_up', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('host', 'name', 'deleted', - name='uniq_services0host0name0deleted') - ) - - op.create_table( - 'strategies', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=False), - sa.Column('display_name', sa.String(length=63), nullable=False), - sa.Column('goal_id', sa.Integer(), nullable=False), - sa.Column('parameters_spec', models.JSONEncodedDict(), - nullable=True), - sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), - sa.UniqueConstraint('uuid', name='uniq_strategies0uuid') - ) - - op.create_table( - 'audit_templates', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=True), - sa.Column('description', sa.String(length=255), nullable=True), - sa.Column('goal_id', sa.Integer(), nullable=False), - sa.Column('strategy_id', sa.Integer(), nullable=True), - sa.Column('scope', models.JSONEncodedList(), - nullable=True), - sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), - sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('name', 'deleted', - name='uniq_audit_templates0name'), - sa.UniqueConstraint('uuid', name='uniq_audit_templates0uuid') - ) - op.create_table( - 'audits', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('audit_type', sa.String(length=20), nullable=True), - sa.Column('state', sa.String(length=20), nullable=True), - sa.Column('parameters', models.JSONEncodedDict(), nullable=True), - sa.Column('interval', sa.Integer(), nullable=True), - sa.Column('goal_id', sa.Integer(), nullable=False), - sa.Column('strategy_id', sa.Integer(), nullable=True), - sa.Column('scope', models.JSONEncodedList(), nullable=True), - sa.Column('auto_trigger', sa.Boolean(), nullable=False), - sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), - sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_audits0uuid') - ) - op.create_table( - 'action_plans', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('audit_id', sa.Integer(), nullable=False), - sa.Column('strategy_id', sa.Integer(), nullable=False), - sa.Column('state', sa.String(length=20), nullable=True), - sa.Column('global_efficacy', models.JSONEncodedDict(), nullable=True), - sa.ForeignKeyConstraint(['audit_id'], ['audits.id'], ), - sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_action_plans0uuid') - ) - - op.create_table( - 'actions', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=False), - sa.Column('action_plan_id', sa.Integer(), nullable=False), - sa.Column('action_type', sa.String(length=255), nullable=False), - sa.Column('input_parameters', models.JSONEncodedDict(), nullable=True), - sa.Column('state', sa.String(length=20), nullable=True), - sa.Column('parents', models.JSONEncodedList(), nullable=True), - sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_actions0uuid') - ) - - op.create_table( - 'efficacy_indicators', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('deleted_at', sa.DateTime(), nullable=True), - sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), - nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=True), - sa.Column('name', sa.String(length=63), nullable=True), - sa.Column('description', sa.String(length=255), nullable=True), - sa.Column('unit', sa.String(length=63), nullable=True), - sa.Column('value', sa.Numeric(), nullable=True), - sa.Column('action_plan_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid') - ) - - -def downgrade(): - op.drop_table('efficacy_indicators') - op.drop_table('actions') - op.drop_table('action_plans') - op.drop_table('audits') - op.drop_table('audit_templates') - op.drop_table('strategies') - op.drop_table('services') - op.drop_table('scoring_engines') - op.drop_table('goals') diff --git a/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py b/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py deleted file mode 100644 index 56f4c8c..0000000 --- a/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Add apscheduler_jobs table to store background jobs - -Revision ID: 0f6042416884 -Revises: 001 -Create Date: 2017-03-24 11:21:29.036532 - -""" -from alembic import op -import sqlalchemy as sa - -from watcher.db.sqlalchemy import models - -# revision identifiers, used by Alembic. -revision = '0f6042416884' -down_revision = '001' - - -def upgrade(): - op.create_table( - 'apscheduler_jobs', - sa.Column('id', sa.Unicode(191, _warn_on_bytestring=False), - nullable=False), - sa.Column('next_run_time', sa.Float(25), index=True), - sa.Column('job_state', sa.LargeBinary, nullable=False), - sa.Column('service_id', sa.Integer(), nullable=False), - sa.Column('tag', models.JSONEncodedDict(), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.ForeignKeyConstraint(['service_id'], ['services.id']) - ) - - -def downgrade(): - op.drop_table('apscheduler_jobs') diff --git a/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py b/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py deleted file mode 100644 index 1fae4e8..0000000 --- a/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Add cron support for audit table - -Revision ID: d098df6021e2 -Revises: 0f6042416884 -Create Date: 2017-06-08 16:21:35.746752 - -""" -from alembic import op -import sqlalchemy as sa - -# revision identifiers, used by Alembic. -revision = 'd098df6021e2' -down_revision = '0f6042416884' - - -def upgrade(): - op.alter_column('audits', 'interval', existing_type=sa.String(36), - nullable=True) - op.add_column('audits', - sa.Column('next_run_time', sa.DateTime(), nullable=True)) - - -def downgrade(): - op.alter_column('audits', 'interval', existing_type=sa.Integer(), - nullable=True) - op.drop_column('audits', 'next_run_time') diff --git a/watcher/db/sqlalchemy/api.py b/watcher/db/sqlalchemy/api.py deleted file mode 100644 index ebe9197..0000000 --- a/watcher/db/sqlalchemy/api.py +++ /dev/null @@ -1,1129 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -import collections -import datetime -import operator - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import session as db_session -from oslo_db.sqlalchemy import utils as db_utils -from oslo_utils import timeutils -from sqlalchemy.inspection import inspect -from sqlalchemy.orm import exc -from sqlalchemy.orm import joinedload - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils -from watcher.db import api -from watcher.db.sqlalchemy import models -from watcher import objects - -CONF = cfg.CONF - -_FACADE = None - - -def _create_facade_lazily(): - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade.from_config(CONF) - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return Connection() - - -def model_query(model, *args, **kwargs): - """Query helper for simpler session usage. - - :param session: if present, the session to use - """ - session = kwargs.get('session') or get_session() - query = session.query(model, *args) - return query - - -def add_identity_filter(query, value): - """Adds an identity filter to a query. - - Filters results by ID, if supplied value is a valid integer. - Otherwise attempts to filter results by UUID. - - :param query: Initial query to add filter to. - :param value: Value for filtering results by. - :return: Modified query. - """ - if utils.is_int_like(value): - return query.filter_by(id=value) - elif utils.is_uuid_like(value): - return query.filter_by(uuid=value) - else: - raise exception.InvalidIdentity(identity=value) - - -def _paginate_query(model, limit=None, marker=None, sort_key=None, - sort_dir=None, query=None): - if not query: - query = model_query(model) - sort_keys = ['id'] - if sort_key and sort_key not in sort_keys: - sort_keys.insert(0, sort_key) - query = db_utils.paginate_query(query, model, limit, sort_keys, - marker=marker, sort_dir=sort_dir) - return query.all() - - -class JoinMap(utils.Struct): - """Mapping for the Join-based queries""" - - -NaturalJoinFilter = collections.namedtuple( - 'NaturalJoinFilter', ['join_fieldname', 'join_model']) - - -class Connection(api.BaseConnection): - """SqlAlchemy connection.""" - - valid_operators = { - "": operator.eq, - "eq": operator.eq, - "neq": operator.ne, - "gt": operator.gt, - "gte": operator.ge, - "lt": operator.lt, - "lte": operator.le, - "in": lambda field, choices: field.in_(choices), - "notin": lambda field, choices: field.notin_(choices), - } - - def __init__(self): - super(Connection, self).__init__() - - def __add_simple_filter(self, query, model, fieldname, value, operator_): - field = getattr(model, fieldname) - - if (fieldname != 'deleted' and value and - field.type.python_type is datetime.datetime): - if not isinstance(value, datetime.datetime): - value = timeutils.parse_isotime(value) - - return query.filter(self.valid_operators[operator_](field, value)) - - def __add_join_filter(self, query, model, fieldname, value, operator_): - query = query.join(model) - return self.__add_simple_filter(query, model, fieldname, - value, operator_) - - def __decompose_filter(self, raw_fieldname): - """Decompose a filter name into its 2 subparts - - A filter can take 2 forms: - - - "" which is a syntactic sugar for "__eq" - - "__" where is the comparison operator - to be used. - - Available operators are: - - - eq - - neq - - gt - - gte - - lt - - lte - - in - - notin - """ - separator = '__' - fieldname, separator, operator_ = raw_fieldname.partition(separator) - - if operator_ and operator_ not in self.valid_operators: - raise exception.InvalidOperator( - operator=operator_, valid_operators=self.valid_operators) - - return fieldname, operator_ - - def _add_filters(self, query, model, filters=None, - plain_fields=None, join_fieldmap=None): - """Generic way to add filters to a Watcher model - - Each filter key provided by the `filters` parameter will be decomposed - into 2 pieces: the field name and the comparison operator - - - "": By default, the "eq" is applied if no operator is provided - - "eq", which stands for "equal" : e.g. {"state__eq": "PENDING"} - will result in the "WHERE state = 'PENDING'" clause. - - "neq", which stands for "not equal" : e.g. {"state__neq": "PENDING"} - will result in the "WHERE state != 'PENDING'" clause. - - "gt", which stands for "greater than" : e.g. - {"created_at__gt": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at > '2016-06-06T10:33:22.063176'" clause. - - "gte", which stands for "greater than or equal to" : e.g. - {"created_at__gte": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at >= '2016-06-06T10:33:22.063176'" clause. - - "lt", which stands for "less than" : e.g. - {"created_at__lt": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at < '2016-06-06T10:33:22.063176'" clause. - - "lte", which stands for "less than or equal to" : e.g. - {"created_at__lte": "2016-06-06T10:33:22.063176"} will result in the - "WHERE created_at <= '2016-06-06T10:33:22.063176'" clause. - - "in": e.g. {"state__in": ('SUCCEEDED', 'FAILED')} will result in the - "WHERE state IN ('SUCCEEDED', 'FAILED')" clause. - - :param query: a :py:class:`sqlalchemy.orm.query.Query` instance - :param model: the model class the filters should relate to - :param filters: dict with the following structure {"fieldname": value} - :param plain_fields: a :py:class:`sqlalchemy.orm.query.Query` instance - :param join_fieldmap: a :py:class:`sqlalchemy.orm.query.Query` instance - """ - soft_delete_mixin_fields = ['deleted', 'deleted_at'] - timestamp_mixin_fields = ['created_at', 'updated_at'] - filters = filters or {} - - # Special case for 'deleted' because it is a non-boolean flag - if 'deleted' in filters: - deleted_filter = filters.pop('deleted') - op = 'eq' if not bool(deleted_filter) else 'neq' - filters['deleted__%s' % op] = 0 - - plain_fields = tuple( - (list(plain_fields) or []) + - soft_delete_mixin_fields + - timestamp_mixin_fields) - join_fieldmap = join_fieldmap or {} - - for raw_fieldname, value in filters.items(): - fieldname, operator_ = self.__decompose_filter(raw_fieldname) - if fieldname in plain_fields: - query = self.__add_simple_filter( - query, model, fieldname, value, operator_) - elif fieldname in join_fieldmap: - join_field, join_model = join_fieldmap[fieldname] - query = self.__add_join_filter( - query, join_model, join_field, value, operator_) - - return query - - @staticmethod - def _get_relationships(model): - return inspect(model).relationships - - @staticmethod - def _set_eager_options(model, query): - relationships = inspect(model).relationships - for relationship in relationships: - if not relationship.uselist: - # We have a One-to-X relationship - query = query.options(joinedload(relationship.key)) - return query - - def _create(self, model, values): - obj = model() - cleaned_values = {k: v for k, v in values.items() - if k not in self._get_relationships(model)} - obj.update(cleaned_values) - obj.save() - return obj - - def _get(self, context, model, fieldname, value, eager): - query = model_query(model) - if eager: - query = self._set_eager_options(model, query) - - query = query.filter(getattr(model, fieldname) == value) - if not context.show_deleted: - query = query.filter(model.deleted_at.is_(None)) - - try: - obj = query.one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=value) - - return obj - - @staticmethod - def _update(model, id_, values): - session = get_session() - with session.begin(): - query = model_query(model, session=session) - query = add_identity_filter(query, id_) - try: - ref = query.with_lockmode('update').one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=id_) - - ref.update(values) - return ref - - @staticmethod - def _soft_delete(model, id_): - session = get_session() - with session.begin(): - query = model_query(model, session=session) - query = add_identity_filter(query, id_) - try: - row = query.one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=id_) - - row.soft_delete(session) - - return row - - @staticmethod - def _destroy(model, id_): - session = get_session() - with session.begin(): - query = model_query(model, session=session) - query = add_identity_filter(query, id_) - - try: - query.one() - except exc.NoResultFound: - raise exception.ResourceNotFound(name=model.__name__, id=id_) - - query.delete() - - def _add_goals_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'name', 'display_name'] - - return self._add_filters( - query=query, model=models.Goal, filters=filters, - plain_fields=plain_fields) - - def _add_strategies_filters(self, query, filters): - plain_fields = ['uuid', 'name', 'display_name', 'goal_id'] - join_fieldmap = JoinMap( - goal_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Goal), - goal_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Goal)) - return self._add_filters( - query=query, model=models.Strategy, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_audit_templates_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'name', 'goal_id', 'strategy_id'] - join_fieldmap = JoinMap( - goal_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Goal), - goal_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Goal), - strategy_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Strategy), - strategy_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Strategy), - ) - - return self._add_filters( - query=query, model=models.AuditTemplate, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_audits_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'audit_type', 'state', 'goal_id', - 'strategy_id'] - join_fieldmap = { - 'goal_uuid': ("uuid", models.Goal), - 'goal_name': ("name", models.Goal), - 'strategy_uuid': ("uuid", models.Strategy), - 'strategy_name': ("name", models.Strategy), - } - - return self._add_filters( - query=query, model=models.Audit, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_action_plans_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'state', 'audit_id', 'strategy_id'] - join_fieldmap = JoinMap( - audit_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Audit), - strategy_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.Strategy), - strategy_name=NaturalJoinFilter( - join_fieldname="name", join_model=models.Strategy), - ) - - return self._add_filters( - query=query, model=models.ActionPlan, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - def _add_actions_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'state', 'action_plan_id'] - join_fieldmap = { - 'action_plan_uuid': ("uuid", models.ActionPlan), - } - - query = self._add_filters( - query=query, model=models.Action, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - if 'audit_uuid' in filters: - stmt = model_query(models.ActionPlan).join( - models.Audit, - models.Audit.id == models.ActionPlan.audit_id)\ - .filter_by(uuid=filters['audit_uuid']).subquery() - query = query.filter_by(action_plan_id=stmt.c.id) - - return query - - def _add_efficacy_indicators_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['uuid', 'name', 'unit', 'schema', 'action_plan_id'] - join_fieldmap = JoinMap( - action_plan_uuid=NaturalJoinFilter( - join_fieldname="uuid", join_model=models.ActionPlan), - ) - - return self._add_filters( - query=query, model=models.EfficacyIndicator, filters=filters, - plain_fields=plain_fields, join_fieldmap=join_fieldmap) - - # ### GOALS ### # - - def get_goal_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Goal) - if eager: - query = self._set_eager_options(models.Goal, query) - query = self._add_goals_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Goal, limit, marker, - sort_key, sort_dir, query) - - def create_goal(self, values): - # ensure defaults are present for new goals - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - goal = self._create(models.Goal, values) - except db_exc.DBDuplicateEntry: - raise exception.GoalAlreadyExists(uuid=values['uuid']) - return goal - - def _get_goal(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Goal, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=value) - - def get_goal_by_id(self, context, goal_id, eager=False): - return self._get_goal( - context, fieldname="id", value=goal_id, eager=eager) - - def get_goal_by_uuid(self, context, goal_uuid, eager=False): - return self._get_goal( - context, fieldname="uuid", value=goal_uuid, eager=eager) - - def get_goal_by_name(self, context, goal_name, eager=False): - return self._get_goal( - context, fieldname="name", value=goal_name, eager=eager) - - def destroy_goal(self, goal_id): - try: - return self._destroy(models.Goal, goal_id) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=goal_id) - - def update_goal(self, goal_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing Goal.")) - - try: - return self._update(models.Goal, goal_id, values) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=goal_id) - - def soft_delete_goal(self, goal_id): - try: - return self._soft_delete(models.Goal, goal_id) - except exception.ResourceNotFound: - raise exception.GoalNotFound(goal=goal_id) - - # ### STRATEGIES ### # - - def get_strategy_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=True): - query = model_query(models.Strategy) - if eager: - query = self._set_eager_options(models.Strategy, query) - query = self._add_strategies_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Strategy, limit, marker, - sort_key, sort_dir, query) - - def create_strategy(self, values): - # ensure defaults are present for new strategies - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - strategy = self._create(models.Strategy, values) - except db_exc.DBDuplicateEntry: - raise exception.StrategyAlreadyExists(uuid=values['uuid']) - return strategy - - def _get_strategy(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Strategy, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=value) - - def get_strategy_by_id(self, context, strategy_id, eager=False): - return self._get_strategy( - context, fieldname="id", value=strategy_id, eager=eager) - - def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): - return self._get_strategy( - context, fieldname="uuid", value=strategy_uuid, eager=eager) - - def get_strategy_by_name(self, context, strategy_name, eager=False): - return self._get_strategy( - context, fieldname="name", value=strategy_name, eager=eager) - - def destroy_strategy(self, strategy_id): - try: - return self._destroy(models.Strategy, strategy_id) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=strategy_id) - - def update_strategy(self, strategy_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing Strategy.")) - - try: - return self._update(models.Strategy, strategy_id, values) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=strategy_id) - - def soft_delete_strategy(self, strategy_id): - try: - return self._soft_delete(models.Strategy, strategy_id) - except exception.ResourceNotFound: - raise exception.StrategyNotFound(strategy=strategy_id) - - # ### AUDIT TEMPLATES ### # - - def get_audit_template_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - - query = model_query(models.AuditTemplate) - if eager: - query = self._set_eager_options(models.AuditTemplate, query) - query = self._add_audit_templates_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.AuditTemplate, limit, marker, - sort_key, sort_dir, query) - - def create_audit_template(self, values): - # ensure defaults are present for new audit_templates - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - query = model_query(models.AuditTemplate) - query = query.filter_by(name=values.get('name'), - deleted_at=None) - - if len(query.all()) > 0: - raise exception.AuditTemplateAlreadyExists( - audit_template=values['name']) - - try: - audit_template = self._create(models.AuditTemplate, values) - except db_exc.DBDuplicateEntry: - raise exception.AuditTemplateAlreadyExists( - audit_template=values['name']) - return audit_template - - def _get_audit_template(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.AuditTemplate, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound(audit_template=value) - - def get_audit_template_by_id(self, context, audit_template_id, - eager=False): - return self._get_audit_template( - context, fieldname="id", value=audit_template_id, eager=eager) - - def get_audit_template_by_uuid(self, context, audit_template_uuid, - eager=False): - return self._get_audit_template( - context, fieldname="uuid", value=audit_template_uuid, eager=eager) - - def get_audit_template_by_name(self, context, audit_template_name, - eager=False): - return self._get_audit_template( - context, fieldname="name", value=audit_template_name, eager=eager) - - def destroy_audit_template(self, audit_template_id): - try: - return self._destroy(models.AuditTemplate, audit_template_id) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound( - audit_template=audit_template_id) - - def update_audit_template(self, audit_template_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Audit Template.")) - try: - return self._update( - models.AuditTemplate, audit_template_id, values) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound( - audit_template=audit_template_id) - - def soft_delete_audit_template(self, audit_template_id): - try: - return self._soft_delete(models.AuditTemplate, audit_template_id) - except exception.ResourceNotFound: - raise exception.AuditTemplateNotFound( - audit_template=audit_template_id) - - # ### AUDITS ### # - - def get_audit_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Audit) - if eager: - query = self._set_eager_options(models.Audit, query) - query = self._add_audits_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - - return _paginate_query(models.Audit, limit, marker, - sort_key, sort_dir, query) - - def create_audit(self, values): - # ensure defaults are present for new audits - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - if values.get('state') is None: - values['state'] = objects.audit.State.PENDING - - if not values.get('auto_trigger'): - values['auto_trigger'] = False - - try: - audit = self._create(models.Audit, values) - except db_exc.DBDuplicateEntry: - raise exception.AuditAlreadyExists(uuid=values['uuid']) - return audit - - def _get_audit(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Audit, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.AuditNotFound(audit=value) - - def get_audit_by_id(self, context, audit_id, eager=False): - return self._get_audit( - context, fieldname="id", value=audit_id, eager=eager) - - def get_audit_by_uuid(self, context, audit_uuid, eager=False): - return self._get_audit( - context, fieldname="uuid", value=audit_uuid, eager=eager) - - def destroy_audit(self, audit_id): - def is_audit_referenced(session, audit_id): - """Checks whether the audit is referenced by action_plan(s).""" - query = model_query(models.ActionPlan, session=session) - query = self._add_action_plans_filters( - query, {'audit_id': audit_id}) - return query.count() != 0 - - session = get_session() - with session.begin(): - query = model_query(models.Audit, session=session) - query = add_identity_filter(query, audit_id) - - try: - audit_ref = query.one() - except exc.NoResultFound: - raise exception.AuditNotFound(audit=audit_id) - - if is_audit_referenced(session, audit_ref['id']): - raise exception.AuditReferenced(audit=audit_id) - - query.delete() - - def update_audit(self, audit_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Audit.")) - - try: - return self._update(models.Audit, audit_id, values) - except exception.ResourceNotFound: - raise exception.AuditNotFound(audit=audit_id) - - def soft_delete_audit(self, audit_id): - try: - return self._soft_delete(models.Audit, audit_id) - except exception.ResourceNotFound: - raise exception.AuditNotFound(audit=audit_id) - - # ### ACTIONS ### # - - def get_action_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Action) - if eager: - query = self._set_eager_options(models.Action, query) - query = self._add_actions_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Action, limit, marker, - sort_key, sort_dir, query) - - def create_action(self, values): - # ensure defaults are present for new actions - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - if values.get('state') is None: - values['state'] = objects.action.State.PENDING - - try: - action = self._create(models.Action, values) - except db_exc.DBDuplicateEntry: - raise exception.ActionAlreadyExists(uuid=values['uuid']) - return action - - def _get_action(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Action, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ActionNotFound(action=value) - - def get_action_by_id(self, context, action_id, eager=False): - return self._get_action( - context, fieldname="id", value=action_id, eager=eager) - - def get_action_by_uuid(self, context, action_uuid, eager=False): - return self._get_action( - context, fieldname="uuid", value=action_uuid, eager=eager) - - def destroy_action(self, action_id): - session = get_session() - with session.begin(): - query = model_query(models.Action, session=session) - query = add_identity_filter(query, action_id) - count = query.delete() - if count != 1: - raise exception.ActionNotFound(action_id) - - def update_action(self, action_id, values): - # NOTE(dtantsur): this can lead to very strange errors - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing Action.")) - - return self._do_update_action(action_id, values) - - @staticmethod - def _do_update_action(action_id, values): - session = get_session() - with session.begin(): - query = model_query(models.Action, session=session) - query = add_identity_filter(query, action_id) - try: - ref = query.with_lockmode('update').one() - except exc.NoResultFound: - raise exception.ActionNotFound(action=action_id) - - ref.update(values) - return ref - - def soft_delete_action(self, action_id): - try: - return self._soft_delete(models.Action, action_id) - except exception.ResourceNotFound: - raise exception.ActionNotFound(action=action_id) - - # ### ACTION PLANS ### # - - def get_action_plan_list( - self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.ActionPlan) - if eager: - query = self._set_eager_options(models.ActionPlan, query) - query = self._add_action_plans_filters(query, filters) - if not context.show_deleted: - query = query.filter(models.ActionPlan.deleted_at.is_(None)) - - return _paginate_query(models.ActionPlan, limit, marker, - sort_key, sort_dir, query) - - def create_action_plan(self, values): - # ensure defaults are present for new audits - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - action_plan = self._create(models.ActionPlan, values) - except db_exc.DBDuplicateEntry: - raise exception.ActionPlanAlreadyExists(uuid=values['uuid']) - return action_plan - - def _get_action_plan(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.ActionPlan, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ActionPlanNotFound(action_plan=value) - - def get_action_plan_by_id(self, context, action_plan_id, eager=False): - return self._get_action_plan( - context, fieldname="id", value=action_plan_id, eager=eager) - - def get_action_plan_by_uuid(self, context, action_plan_uuid, eager=False): - return self._get_action_plan( - context, fieldname="uuid", value=action_plan_uuid, eager=eager) - - def destroy_action_plan(self, action_plan_id): - def is_action_plan_referenced(session, action_plan_id): - """Checks whether the action_plan is referenced by action(s).""" - query = model_query(models.Action, session=session) - query = self._add_actions_filters( - query, {'action_plan_id': action_plan_id}) - return query.count() != 0 - - session = get_session() - with session.begin(): - query = model_query(models.ActionPlan, session=session) - query = add_identity_filter(query, action_plan_id) - - try: - action_plan_ref = query.one() - except exc.NoResultFound: - raise exception.ActionPlanNotFound(action_plan=action_plan_id) - - if is_action_plan_referenced(session, action_plan_ref['id']): - raise exception.ActionPlanReferenced( - action_plan=action_plan_id) - - query.delete() - - def update_action_plan(self, action_plan_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Action Plan.")) - - return self._do_update_action_plan(action_plan_id, values) - - @staticmethod - def _do_update_action_plan(action_plan_id, values): - session = get_session() - with session.begin(): - query = model_query(models.ActionPlan, session=session) - query = add_identity_filter(query, action_plan_id) - try: - ref = query.with_lockmode('update').one() - except exc.NoResultFound: - raise exception.ActionPlanNotFound(action_plan=action_plan_id) - - ref.update(values) - return ref - - def soft_delete_action_plan(self, action_plan_id): - try: - return self._soft_delete(models.ActionPlan, action_plan_id) - except exception.ResourceNotFound: - raise exception.ActionPlanNotFound(action_plan=action_plan_id) - - # ### EFFICACY INDICATORS ### # - - def get_efficacy_indicator_list(self, context, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, - eager=False): - - query = model_query(models.EfficacyIndicator) - if eager: - query = self._set_eager_options(models.EfficacyIndicator, query) - query = self._add_efficacy_indicators_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.EfficacyIndicator, limit, marker, - sort_key, sort_dir, query) - - def create_efficacy_indicator(self, values): - # ensure defaults are present for new efficacy indicators - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - efficacy_indicator = self._create(models.EfficacyIndicator, values) - except db_exc.DBDuplicateEntry: - raise exception.EfficacyIndicatorAlreadyExists(uuid=values['uuid']) - return efficacy_indicator - - def _get_efficacy_indicator(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.EfficacyIndicator, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound(efficacy_indicator=value) - - def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, - eager=False): - return self._get_efficacy_indicator( - context, fieldname="id", - value=efficacy_indicator_id, eager=eager) - - def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, - eager=False): - return self._get_efficacy_indicator( - context, fieldname="uuid", - value=efficacy_indicator_uuid, eager=eager) - - def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, - eager=False): - return self._get_efficacy_indicator( - context, fieldname="name", - value=efficacy_indicator_name, eager=eager) - - def update_efficacy_indicator(self, efficacy_indicator_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "efficacy indicator.")) - - try: - return self._update( - models.EfficacyIndicator, efficacy_indicator_id, values) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound( - efficacy_indicator=efficacy_indicator_id) - - def soft_delete_efficacy_indicator(self, efficacy_indicator_id): - try: - return self._soft_delete( - models.EfficacyIndicator, efficacy_indicator_id) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound( - efficacy_indicator=efficacy_indicator_id) - - def destroy_efficacy_indicator(self, efficacy_indicator_id): - try: - return self._destroy( - models.EfficacyIndicator, efficacy_indicator_id) - except exception.ResourceNotFound: - raise exception.EfficacyIndicatorNotFound( - efficacy_indicator=efficacy_indicator_id) - - # ### SCORING ENGINES ### # - - def _add_scoring_engine_filters(self, query, filters): - if filters is None: - filters = {} - - plain_fields = ['id', 'description'] - - return self._add_filters( - query=query, model=models.ScoringEngine, filters=filters, - plain_fields=plain_fields) - - def get_scoring_engine_list( - self, context, columns=None, filters=None, limit=None, - marker=None, sort_key=None, sort_dir=None, eager=False): - query = model_query(models.ScoringEngine) - if eager: - query = self._set_eager_options(models.ScoringEngine, query) - query = self._add_scoring_engine_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - - return _paginate_query(models.ScoringEngine, limit, marker, - sort_key, sort_dir, query) - - def create_scoring_engine(self, values): - # ensure defaults are present for new scoring engines - if not values.get('uuid'): - values['uuid'] = utils.generate_uuid() - - try: - scoring_engine = self._create(models.ScoringEngine, values) - except db_exc.DBDuplicateEntry: - raise exception.ScoringEngineAlreadyExists(uuid=values['uuid']) - return scoring_engine - - def _get_scoring_engine(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.ScoringEngine, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound(scoring_engine=value) - - def get_scoring_engine_by_id(self, context, scoring_engine_id, - eager=False): - return self._get_scoring_engine( - context, fieldname="id", value=scoring_engine_id, eager=eager) - - def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, - eager=False): - return self._get_scoring_engine( - context, fieldname="uuid", value=scoring_engine_uuid, eager=eager) - - def get_scoring_engine_by_name(self, context, scoring_engine_name, - eager=False): - return self._get_scoring_engine( - context, fieldname="name", value=scoring_engine_name, eager=eager) - - def destroy_scoring_engine(self, scoring_engine_id): - try: - return self._destroy(models.ScoringEngine, scoring_engine_id) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound( - scoring_engine=scoring_engine_id) - - def update_scoring_engine(self, scoring_engine_id, values): - if 'uuid' in values: - raise exception.Invalid( - message=_("Cannot overwrite UUID for an existing " - "Scoring Engine.")) - - try: - return self._update( - models.ScoringEngine, scoring_engine_id, values) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound( - scoring_engine=scoring_engine_id) - - def soft_delete_scoring_engine(self, scoring_engine_id): - try: - return self._soft_delete( - models.ScoringEngine, scoring_engine_id) - except exception.ResourceNotFound: - raise exception.ScoringEngineNotFound( - scoring_engine=scoring_engine_id) - - # ### SERVICES ### # - - def _add_services_filters(self, query, filters): - if not filters: - filters = {} - - plain_fields = ['id', 'name', 'host'] - - return self._add_filters( - query=query, model=models.Service, filters=filters, - plain_fields=plain_fields) - - def get_service_list(self, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - query = model_query(models.Service) - if eager: - query = self._set_eager_options(models.Service, query) - query = self._add_services_filters(query, filters) - if not context.show_deleted: - query = query.filter_by(deleted_at=None) - return _paginate_query(models.Service, limit, marker, - sort_key, sort_dir, query) - - def create_service(self, values): - try: - service = self._create(models.Service, values) - except db_exc.DBDuplicateEntry: - raise exception.ServiceAlreadyExists(name=values['name'], - host=values['host']) - return service - - def _get_service(self, context, fieldname, value, eager): - try: - return self._get(context, model=models.Service, - fieldname=fieldname, value=value, eager=eager) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=value) - - def get_service_by_id(self, context, service_id, eager=False): - return self._get_service( - context, fieldname="id", value=service_id, eager=eager) - - def get_service_by_name(self, context, service_name, eager=False): - return self._get_service( - context, fieldname="name", value=service_name, eager=eager) - - def destroy_service(self, service_id): - try: - return self._destroy(models.Service, service_id) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=service_id) - - def update_service(self, service_id, values): - try: - return self._update(models.Service, service_id, values) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=service_id) - - def soft_delete_service(self, service_id): - try: - return self._soft_delete(models.Service, service_id) - except exception.ResourceNotFound: - raise exception.ServiceNotFound(service=service_id) diff --git a/watcher/db/sqlalchemy/job_store.py b/watcher/db/sqlalchemy/job_store.py deleted file mode 100644 index da5028f..0000000 --- a/watcher/db/sqlalchemy/job_store.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica LTD -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from apscheduler.jobstores.base import ConflictingIdError -from apscheduler.jobstores import sqlalchemy -from apscheduler.util import datetime_to_utc_timestamp -from apscheduler.util import maybe_ref - -from watcher.common import context -from watcher.common import service -from watcher import objects - -try: - import cPickle as pickle -except ImportError: # pragma: nocover - import pickle - -from sqlalchemy import Table, MetaData, select, and_ -from sqlalchemy.exc import IntegrityError - - -class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore): - """Stores jobs in a database table using SQLAlchemy. - - The table will be created if it doesn't exist in the database. - Plugin alias: ``sqlalchemy`` - :param str url: connection string - :param engine: an SQLAlchemy Engine to use instead of creating a new - one based on ``url`` - :param str tablename: name of the table to store jobs in - :param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of - creating a new one - :param int pickle_protocol: pickle protocol level to use - (for serialization), defaults to the highest available - :param dict tag: tag description - """ - - def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', - metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, - tag=None): - super(WatcherJobStore, self).__init__(url, engine, tablename, - metadata, pickle_protocol) - metadata = maybe_ref(metadata) or MetaData() - self.jobs_t = Table(tablename, metadata, autoload=True, - autoload_with=engine) - service_ident = service.ServiceHeartbeat.get_service_name() - self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]} - self.service_id = objects.Service.list(context=context.make_context(), - filters=self.tag)[0].id - - def start(self, scheduler, alias): - # There should be called 'start' method of parent of SQLAlchemyJobStore - super(self.__class__.__bases__[0], self).start(scheduler, alias) - - def add_job(self, job): - insert = self.jobs_t.insert().values(**{ - 'id': job.id, - 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), - 'job_state': pickle.dumps(job.__getstate__(), - self.pickle_protocol), - 'service_id': self.service_id, - 'tag': jsonutils.dumps(self.tag) - }) - try: - self.engine.execute(insert) - except IntegrityError: - raise ConflictingIdError(job.id) - - def get_all_jobs(self): - jobs = self._get_jobs(self.jobs_t.c.tag == jsonutils.dumps(self.tag)) - self._fix_paused_jobs_sorting(jobs) - return jobs - - def _get_jobs(self, *conditions): - jobs = [] - conditions += (self.jobs_t.c.service_id == self.service_id,) - selectable = select( - [self.jobs_t.c.id, self.jobs_t.c.job_state, self.jobs_t.c.tag] - ).order_by(self.jobs_t.c.next_run_time).where(and_(*conditions)) - failed_job_ids = set() - for row in self.engine.execute(selectable): - try: - jobs.append(self._reconstitute_job(row.job_state)) - except Exception: - self._logger.exception( - 'Unable to restore job "%s" -- removing it', row.id) - failed_job_ids.add(row.id) - - # Remove all the jobs we failed to restore - if failed_job_ids: - delete = self.jobs_t.delete().where( - self.jobs_t.c.id.in_(failed_job_ids)) - self.engine.execute(delete) - - return jobs diff --git a/watcher/db/sqlalchemy/migration.py b/watcher/db/sqlalchemy/migration.py deleted file mode 100644 index b342945..0000000 --- a/watcher/db/sqlalchemy/migration.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import alembic -from alembic import config as alembic_config -import alembic.migration as alembic_migration -from oslo_db import exception as db_exc - -from watcher._i18n import _ -from watcher.db.sqlalchemy import api as sqla_api -from watcher.db.sqlalchemy import models - - -def _alembic_config(): - path = os.path.join(os.path.dirname(__file__), 'alembic.ini') - config = alembic_config.Config(path) - return config - - -def version(engine=None): - """Current database version. - - :returns: Database version - :rtype: string - """ - if engine is None: - engine = sqla_api.get_engine() - with engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - return context.get_current_revision() - - -def upgrade(revision, config=None): - """Used for upgrading database. - - :param version: Desired database version - :type version: string - """ - revision = revision or 'head' - config = config or _alembic_config() - - alembic.command.upgrade(config, revision) - - -def create_schema(config=None, engine=None): - """Create database schema from models description. - - Can be used for initial installation instead of upgrade('head'). - """ - if engine is None: - engine = sqla_api.get_engine() - - # NOTE(viktors): If we will use metadata.create_all() for non empty db - # schema, it will only add the new tables, but leave - # existing as is. So we should avoid of this situation. - if version(engine=engine) is not None: - raise db_exc.DbMigrationError( - _("Watcher database schema is already under version control; " - "use upgrade() instead")) - - models.Base.metadata.create_all(engine) - stamp('head', config=config) - - -def downgrade(revision, config=None): - """Used for downgrading database. - - :param version: Desired database version - :type version: string - """ - revision = revision or 'base' - config = config or _alembic_config() - return alembic.command.downgrade(config, revision) - - -def stamp(revision, config=None): - """Stamps database with provided revision. - - Don't run any migrations. - - :param revision: Should match one from repository or head - to stamp - database with most recent revision - :type revision: string - """ - config = config or _alembic_config() - return alembic.command.stamp(config, revision=revision) - - -def revision(message=None, autogenerate=False, config=None): - """Creates template for migration. - - :param message: Text that will be used for migration title - :type message: string - :param autogenerate: If True - generates diff based on current database - state - :type autogenerate: bool - """ - config = config or _alembic_config() - return alembic.command.revision(config, message=message, - autogenerate=autogenerate) diff --git a/watcher/db/sqlalchemy/models.py b/watcher/db/sqlalchemy/models.py deleted file mode 100644 index dbe972b..0000000 --- a/watcher/db/sqlalchemy/models.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for watcher service -""" - -from oslo_db.sqlalchemy import models -from oslo_serialization import jsonutils -import six.moves.urllib.parse as urlparse -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import ForeignKey -from sqlalchemy import Integer -from sqlalchemy import Numeric -from sqlalchemy import orm -from sqlalchemy import String -from sqlalchemy import Text -from sqlalchemy.types import TypeDecorator, TEXT -from sqlalchemy import UniqueConstraint - -from watcher import conf - -CONF = conf.CONF - - -def table_args(): - engine_name = urlparse.urlparse(CONF.database.connection).scheme - if engine_name == 'mysql': - return {'mysql_engine': CONF.database.mysql_engine, - 'mysql_charset': "utf8"} - return None - - -class JsonEncodedType(TypeDecorator): - """Abstract base type serialized as json-encoded string in db.""" - - type = None - impl = TEXT - - def process_bind_param(self, value, dialect): - if value is None: - # Save default value according to current type to keep the - # interface the consistent. - value = self.type() - elif not isinstance(value, self.type): - raise TypeError("%s supposes to store %s objects, but %s given" - % (self.__class__.__name__, - self.type.__name__, - type(value).__name__)) - serialized_value = jsonutils.dumps(value) - return serialized_value - - def process_result_value(self, value, dialect): - if value is not None: - value = jsonutils.loads(value) - return value - - -class JSONEncodedDict(JsonEncodedType): - """Represents dict serialized as json-encoded string in db.""" - - type = dict - - -class JSONEncodedList(JsonEncodedType): - """Represents list serialized as json-encoded string in db.""" - - type = list - - -class WatcherBase(models.SoftDeleteMixin, - models.TimestampMixin, models.ModelBase): - metadata = None - - def as_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - def save(self, session=None): - import watcher.db.sqlalchemy.api as db_api - - if session is None: - session = db_api.get_session() - - super(WatcherBase, self).save(session) - - -Base = declarative_base(cls=WatcherBase) - - -class Goal(Base): - """Represents a goal.""" - - __tablename__ = 'goals' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_goals0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_goals0name'), - table_args(), - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - name = Column(String(63), nullable=False) - display_name = Column(String(63), nullable=False) - efficacy_specification = Column(JSONEncodedList, nullable=False) - - -class Strategy(Base): - """Represents a strategy.""" - - __tablename__ = 'strategies' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_strategies0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - name = Column(String(63), nullable=False) - display_name = Column(String(63), nullable=False) - goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) - parameters_spec = Column(JSONEncodedDict, nullable=True) - - goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) - - -class AuditTemplate(Base): - """Represents an audit template.""" - - __tablename__ = 'audit_templates' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_audit_templates0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_audit_templates0name'), - table_args() - ) - id = Column(Integer, primary_key=True) - uuid = Column(String(36)) - name = Column(String(63), nullable=True) - description = Column(String(255), nullable=True) - goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) - strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) - scope = Column(JSONEncodedList) - - goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) - strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) - - -class Audit(Base): - """Represents an audit.""" - - __tablename__ = 'audits' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_audits0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - audit_type = Column(String(20)) - state = Column(String(20), nullable=True) - parameters = Column(JSONEncodedDict, nullable=True) - interval = Column(String(36), nullable=True) - goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) - strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) - scope = Column(JSONEncodedList, nullable=True) - auto_trigger = Column(Boolean, nullable=False) - next_run_time = Column(DateTime, nullable=True) - - goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) - strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) - - -class ActionPlan(Base): - """Represents an action plan.""" - - __tablename__ = 'action_plans' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_action_plans0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - audit_id = Column(Integer, ForeignKey('audits.id'), nullable=False) - strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=False) - state = Column(String(20), nullable=True) - global_efficacy = Column(JSONEncodedDict, nullable=True) - - audit = orm.relationship(Audit, foreign_keys=audit_id, lazy=None) - strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) - - -class Action(Base): - """Represents an action.""" - - __tablename__ = 'actions' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_actions0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36), nullable=False) - action_plan_id = Column(Integer, ForeignKey('action_plans.id'), - nullable=False) - # only for the first version - action_type = Column(String(255), nullable=False) - input_parameters = Column(JSONEncodedDict, nullable=True) - state = Column(String(20), nullable=True) - parents = Column(JSONEncodedList, nullable=True) - - action_plan = orm.relationship( - ActionPlan, foreign_keys=action_plan_id, lazy=None) - - -class EfficacyIndicator(Base): - """Represents an efficacy indicator.""" - - __tablename__ = 'efficacy_indicators' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36)) - name = Column(String(63)) - description = Column(String(255), nullable=True) - unit = Column(String(63), nullable=True) - value = Column(Numeric()) - action_plan_id = Column(Integer, ForeignKey('action_plans.id'), - nullable=False) - - action_plan = orm.relationship( - ActionPlan, foreign_keys=action_plan_id, lazy=None) - - -class ScoringEngine(Base): - """Represents a scoring engine.""" - - __tablename__ = 'scoring_engines' - __table_args__ = ( - UniqueConstraint('uuid', name='uniq_scoring_engines0uuid'), - UniqueConstraint('name', 'deleted', name='uniq_scoring_engines0name'), - table_args() - ) - id = Column(Integer, primary_key=True, autoincrement=True) - uuid = Column(String(36), nullable=False) - name = Column(String(63), nullable=False) - description = Column(String(255), nullable=True) - # Metainfo might contain some additional information about the data model. - # The format might vary between different models (e.g. be JSON, XML or - # even some custom format), the blob type should cover all scenarios. - metainfo = Column(Text, nullable=True) - - -class Service(Base): - """Represents a service entity""" - - __tablename__ = 'services' - __table_args__ = ( - UniqueConstraint('host', 'name', 'deleted', - name="uniq_services0host0name0deleted"), - table_args() - ) - id = Column(Integer, primary_key=True) - name = Column(String(255), nullable=False) - host = Column(String(255), nullable=False) - last_seen_up = Column(DateTime, nullable=True) diff --git a/watcher/decision_engine/__init__.py b/watcher/decision_engine/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/audit/__init__.py b/watcher/decision_engine/audit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/audit/base.py b/watcher/decision_engine/audit/base.py deleted file mode 100644 index cccb1aa..0000000 --- a/watcher/decision_engine/audit/base.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - -from oslo_log import log - -from watcher.applier import rpcapi -from watcher.common import exception -from watcher.common import service -from watcher.decision_engine.planner import manager as planner_manager -from watcher.decision_engine.strategy.context import default as default_context -from watcher import notifications -from watcher import objects -from watcher.objects import fields - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -@six.add_metaclass(service.Singleton) -class BaseAuditHandler(object): - - @abc.abstractmethod - def execute(self, audit_uuid, request_context): - raise NotImplementedError() - - @abc.abstractmethod - def pre_execute(self, audit_uuid, request_context): - raise NotImplementedError() - - @abc.abstractmethod - def do_execute(self, audit, request_context): - raise NotImplementedError() - - @abc.abstractmethod - def post_execute(self, audit, solution, request_context): - raise NotImplementedError() - - -@six.add_metaclass(abc.ABCMeta) -class AuditHandler(BaseAuditHandler): - - def __init__(self): - super(AuditHandler, self).__init__() - self._strategy_context = default_context.DefaultStrategyContext() - self._planner_manager = planner_manager.PlannerManager() - self._planner = None - - @property - def planner(self): - if self._planner is None: - self._planner = self._planner_manager.load() - return self._planner - - @property - def strategy_context(self): - return self._strategy_context - - def do_schedule(self, request_context, audit, solution): - try: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.PLANNER, - phase=fields.NotificationPhase.START) - action_plan = self.planner.schedule(request_context, audit.id, - solution) - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.PLANNER, - phase=fields.NotificationPhase.END) - return action_plan - except Exception: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.PLANNER, - priority=fields.NotificationPriority.ERROR, - phase=fields.NotificationPhase.ERROR) - raise - - def update_audit_state(self, audit, state): - LOG.debug("Update audit state: %s", state) - audit.state = state - audit.save() - - def check_ongoing_action_plans(self, request_context): - a_plan_filters = {'state': objects.action_plan.State.ONGOING} - ongoing_action_plans = objects.ActionPlan.list( - request_context, filters=a_plan_filters) - if ongoing_action_plans: - raise exception.ActionPlanIsOngoing( - action_plan=ongoing_action_plans[0].uuid) - - def pre_execute(self, audit, request_context): - LOG.debug("Trigger audit %s", audit.uuid) - self.check_ongoing_action_plans(request_context) - # change state of the audit to ONGOING - self.update_audit_state(audit, objects.audit.State.ONGOING) - - def post_execute(self, audit, solution, request_context): - action_plan = self.do_schedule(request_context, audit, solution) - if audit.auto_trigger: - applier_client = rpcapi.ApplierAPI() - applier_client.launch_action_plan(request_context, - action_plan.uuid) - - def execute(self, audit, request_context): - try: - self.pre_execute(audit, request_context) - solution = self.do_execute(audit, request_context) - self.post_execute(audit, solution, request_context) - except exception.ActionPlanIsOngoing as e: - LOG.warning(e) - if audit.audit_type == objects.audit.AuditType.ONESHOT.value: - self.update_audit_state(audit, objects.audit.State.CANCELLED) - except Exception as e: - LOG.exception(e) - self.update_audit_state(audit, objects.audit.State.FAILED) diff --git a/watcher/decision_engine/audit/continuous.py b/watcher/decision_engine/audit/continuous.py deleted file mode 100644 index 2afcafe..0000000 --- a/watcher/decision_engine/audit/continuous.py +++ /dev/null @@ -1,170 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica LTD -# Copyright (c) 2016 Intel Corp -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import datetime -from dateutil import tz - -from apscheduler.jobstores import memory -from croniter import croniter - -from watcher.common import context -from watcher.common import scheduling -from watcher.common import utils -from watcher import conf -from watcher.db.sqlalchemy import api as sq_api -from watcher.db.sqlalchemy import job_store -from watcher.decision_engine.audit import base -from watcher import objects - - -CONF = conf.CONF - - -class ContinuousAuditHandler(base.AuditHandler): - def __init__(self): - super(ContinuousAuditHandler, self).__init__() - self._scheduler = None - self.context_show_deleted = context.RequestContext(is_admin=True, - show_deleted=True) - - @property - def scheduler(self): - if self._scheduler is None: - self._scheduler = scheduling.BackgroundSchedulerService( - jobstores={ - 'default': job_store.WatcherJobStore( - engine=sq_api.get_engine()), - 'memory': memory.MemoryJobStore() - } - ) - return self._scheduler - - def _is_audit_inactive(self, audit): - audit = objects.Audit.get_by_uuid( - self.context_show_deleted, audit.uuid) - if objects.audit.AuditStateTransitionManager().is_inactive(audit): - # if audit isn't in active states, audit's job must be removed to - # prevent using of inactive audit in future. - [job for job in self.scheduler.get_jobs() - if job.name == 'execute_audit' and - job.args[0].uuid == audit.uuid][0].remove() - return True - - return False - - def do_execute(self, audit, request_context): - # execute the strategy - solution = self.strategy_context.execute_strategy( - audit, request_context) - - if audit.audit_type == objects.audit.AuditType.CONTINUOUS.value: - a_plan_filters = {'audit_uuid': audit.uuid, - 'state': objects.action_plan.State.RECOMMENDED} - action_plans = objects.ActionPlan.list( - request_context, filters=a_plan_filters, eager=True) - for plan in action_plans: - plan.state = objects.action_plan.State.CANCELLED - plan.save() - return solution - - def _next_cron_time(self, audit): - if utils.is_cron_like(audit.interval): - return croniter(audit.interval, datetime.datetime.utcnow() - ).get_next(datetime.datetime) - - @classmethod - def execute_audit(cls, audit, request_context): - self = cls() - if not self._is_audit_inactive(audit): - try: - self.execute(audit, request_context) - except Exception: - raise - finally: - if utils.is_int_like(audit.interval): - audit.next_run_time = ( - datetime.datetime.utcnow() + - datetime.timedelta(seconds=int(audit.interval))) - else: - audit.next_run_time = self._next_cron_time(audit) - audit.save() - - def _add_job(self, trigger, audit, audit_context, **trigger_args): - time_var = 'next_run_time' if trigger_args.get( - 'next_run_time') else 'run_date' - # We should convert UTC time to local time without tzinfo - trigger_args[time_var] = trigger_args[time_var].replace( - tzinfo=tz.tzutc()).astimezone(tz.tzlocal()).replace(tzinfo=None) - self.scheduler.add_job(self.execute_audit, trigger, - args=[audit, audit_context], - name='execute_audit', - **trigger_args) - - def launch_audits_periodically(self): - audit_context = context.RequestContext(is_admin=True) - audit_filters = { - 'audit_type': objects.audit.AuditType.CONTINUOUS.value, - 'state__in': (objects.audit.State.PENDING, - objects.audit.State.ONGOING, - objects.audit.State.SUCCEEDED) - } - audits = objects.Audit.list( - audit_context, filters=audit_filters, eager=True) - scheduler_job_args = [ - job.args for job in self.scheduler.get_jobs() - if job.name == 'execute_audit'] - for audit in audits: - # if audit is not presented in scheduled audits yet. - if audit.uuid not in [arg[0].uuid for arg in scheduler_job_args]: - # if interval is provided with seconds - if utils.is_int_like(audit.interval): - # if audit has already been provided and we need - # to restore it after shutdown - if audit.next_run_time is not None: - old_run_time = audit.next_run_time - current = datetime.datetime.utcnow() - if old_run_time < current: - delta = datetime.timedelta( - seconds=(int(audit.interval) - ( - current - old_run_time).seconds % - int(audit.interval))) - audit.next_run_time = current + delta - next_run_time = audit.next_run_time - # if audit is new one - else: - next_run_time = datetime.datetime.utcnow() - self._add_job('interval', audit, audit_context, - seconds=int(audit.interval), - next_run_time=next_run_time) - - else: - audit.next_run_time = self._next_cron_time(audit) - self._add_job('date', audit, audit_context, - run_date=audit.next_run_time) - audit.save() - - def start(self): - self.scheduler.add_job( - self.launch_audits_periodically, - 'interval', - seconds=CONF.watcher_decision_engine.continuous_audit_interval, - next_run_time=datetime.datetime.now(), - jobstore='memory') - self.scheduler.start() diff --git a/watcher/decision_engine/audit/oneshot.py b/watcher/decision_engine/audit/oneshot.py deleted file mode 100644 index fae2512..0000000 --- a/watcher/decision_engine/audit/oneshot.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.audit import base -from watcher import objects - - -class OneShotAuditHandler(base.AuditHandler): - - def do_execute(self, audit, request_context): - # execute the strategy - solution = self.strategy_context.execute_strategy( - audit, request_context) - - return solution - - def post_execute(self, audit, solution, request_context): - super(OneShotAuditHandler, self).post_execute(audit, solution, - request_context) - # change state of the audit to SUCCEEDED - self.update_audit_state(audit, objects.audit.State.SUCCEEDED) diff --git a/watcher/decision_engine/gmr.py b/watcher/decision_engine/gmr.py deleted file mode 100644 index 8ddc561..0000000 --- a/watcher/decision_engine/gmr.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_reports import guru_meditation_report as gmr - -from watcher._i18n import _ -from watcher.decision_engine.model.collector import manager - - -def register_gmr_plugins(): - """Register GMR plugins that are specific to watcher-decision-engine.""" - gmr.TextGuruMeditation.register_section(_('CDMCs'), show_models) - - -def show_models(): - """Create a formatted output of all the CDMs - - Mainly used as a Guru Meditation Report (GMR) plugin - """ - mgr = manager.CollectorManager() - - output = [] - for name, cdmc in mgr.get_collectors().items(): - output.append("") - output.append("~" * len(name)) - output.append(name) - output.append("~" * len(name)) - output.append("") - - cdmc_struct = cdmc.cluster_data_model.to_string() - output.append(cdmc_struct) - - return "\n".join(output) diff --git a/watcher/decision_engine/goal/__init__.py b/watcher/decision_engine/goal/__init__.py deleted file mode 100644 index 1607884..0000000 --- a/watcher/decision_engine/goal/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.goal import goals - -Dummy = goals.Dummy -ServerConsolidation = goals.ServerConsolidation -ThermalOptimization = goals.ThermalOptimization -Unclassified = goals.Unclassified -WorkloadBalancing = goals.WorkloadBalancing -NoisyNeighbor = goals.NoisyNeighborOptimization - -__all__ = ("Dummy", "ServerConsolidation", "ThermalOptimization", - "Unclassified", "WorkloadBalancing", - "NoisyNeighborOptimization",) diff --git a/watcher/decision_engine/goal/base.py b/watcher/decision_engine/goal/base.py deleted file mode 100644 index b272cfc..0000000 --- a/watcher/decision_engine/goal/base.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class Goal(loadable.Loadable): - - def __init__(self, config): - super(Goal, self).__init__(config) - self.name = self.get_name() - self.display_name = self.get_display_name() - self.efficacy_specification = self.get_efficacy_specification() - - @classmethod - @abc.abstractmethod - def get_name(cls): - """Name of the goal: should be identical to the related entry point""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_display_name(cls): - """The goal display name for the goal""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_translatable_display_name(cls): - """The translatable msgid of the goal""" - # Note(v-francoise): Defined here to be used as the translation key for - # other services - raise NotImplementedError() - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - raise NotImplementedError() diff --git a/watcher/decision_engine/goal/efficacy/__init__.py b/watcher/decision_engine/goal/efficacy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/goal/efficacy/base.py b/watcher/decision_engine/goal/efficacy/base.py deleted file mode 100644 index b517700..0000000 --- a/watcher/decision_engine/goal/efficacy/base.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An efficacy specification is a contract that is associated to each :ref:`Goal -` that defines the various :ref:`efficacy indicators -` a strategy achieving the associated goal -should provide within its :ref:`solution `. Indeed, each -solution proposed by a strategy will be validated against this contract before -calculating its :ref:`global efficacy `. -""" - -import abc -from oslo_serialization import jsonutils - -import six -import voluptuous - - -@six.add_metaclass(abc.ABCMeta) -class EfficacySpecification(object): - - def __init__(self): - self._indicators_specs = self.get_indicators_specifications() - - @property - def indicators_specs(self): - return self._indicators_specs - - @abc.abstractmethod - def get_indicators_specifications(self): - """List the specifications of the indicator for this efficacy spec - - :return: Tuple of indicator specifications - :rtype: Tuple of :py:class:`~.IndicatorSpecification` instances - """ - raise NotImplementedError() - - @abc.abstractmethod - def get_global_efficacy_indicator(self, indicators_map): - """Compute the global efficacy for the goal it achieves - - :param indicators_map: dict-like object containing the - efficacy indicators related to this spec - :type indicators_map: :py:class:`~.IndicatorsMap` instance - :raises: NotImplementedError - :returns: :py:class:`~.Indicator` instance - """ - raise NotImplementedError() - - @property - def schema(self): - """Combined schema from the schema of the indicators""" - schema = voluptuous.Schema({}, required=True) - for indicator in self.indicators_specs: - key_constraint = (voluptuous.Required - if indicator.required else voluptuous.Optional) - schema = schema.extend( - {key_constraint(indicator.name): indicator.schema.schema}) - - return schema - - def validate_efficacy_indicators(self, indicators_map): - return self.schema(indicators_map) - - def get_indicators_specs_dicts(self): - return [indicator.to_dict() - for indicator in self.indicators_specs] - - def serialize_indicators_specs(self): - return jsonutils.dumps(self.get_indicators_specs_dicts()) diff --git a/watcher/decision_engine/goal/efficacy/indicators.py b/watcher/decision_engine/goal/efficacy/indicators.py deleted file mode 100644 index 1b24262..0000000 --- a/watcher/decision_engine/goal/efficacy/indicators.py +++ /dev/null @@ -1,146 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from oslo_log import log -import voluptuous - -from watcher._i18n import _ -from watcher.common import exception - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class IndicatorSpecification(object): - - def __init__(self, name=None, description=None, unit=None, required=True): - self.name = name - self.description = description - self.unit = unit - self.required = required - - @abc.abstractproperty - def schema(self): - """Schema used to validate the indicator value - - :return: A Voplutuous Schema - :rtype: :py:class:`.voluptuous.Schema` instance - """ - raise NotImplementedError() - - @classmethod - def validate(cls, solution): - """Validate the given solution - - :raises: :py:class:`~.InvalidIndicatorValue` when the validation fails - """ - indicator = cls() - value = None - try: - value = getattr(solution, indicator.name) - indicator.schema(value) - except Exception as exc: - LOG.exception(exc) - raise exception.InvalidIndicatorValue( - name=indicator.name, value=value, spec_type=type(indicator)) - - def to_dict(self): - return { - "name": self.name, - "description": self.description, - "unit": self.unit, - "schema": str(self.schema.schema) if self.schema else None, - } - - def __str__(self): - return str(self.to_dict()) - - -class AverageCpuLoad(IndicatorSpecification): - - def __init__(self): - super(AverageCpuLoad, self).__init__( - name="avg_cpu_percent", - description=_("Average CPU load as a percentage of the CPU time."), - unit="%", - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0, max=100), required=True) - - -class MigrationEfficacy(IndicatorSpecification): - - def __init__(self): - super(MigrationEfficacy, self).__init__( - name="migration_efficacy", - description=_("Represents the percentage of released nodes out of " - "the total number of migrations."), - unit="%", - required=True - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0, max=100), required=True) - - -class ComputeNodesCount(IndicatorSpecification): - def __init__(self): - super(ComputeNodesCount, self).__init__( - name="compute_nodes_count", - description=_("The total number of enabled compute nodes."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0), required=True) - - -class ReleasedComputeNodesCount(IndicatorSpecification): - def __init__(self): - super(ReleasedComputeNodesCount, self).__init__( - name="released_compute_nodes_count", - description=_("The number of compute nodes to be released."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0), required=True) - - -class InstanceMigrationsCount(IndicatorSpecification): - def __init__(self): - super(InstanceMigrationsCount, self).__init__( - name="instance_migrations_count", - description=_("The number of VM migrations to be performed."), - unit=None, - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0), required=True) diff --git a/watcher/decision_engine/goal/efficacy/specs.py b/watcher/decision_engine/goal/efficacy/specs.py deleted file mode 100644 index 474459e..0000000 --- a/watcher/decision_engine/goal/efficacy/specs.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher._i18n import _ -from watcher.decision_engine.goal.efficacy import base -from watcher.decision_engine.goal.efficacy import indicators -from watcher.decision_engine.solution import efficacy - - -class Unclassified(base.EfficacySpecification): - - def get_indicators_specifications(self): - return () - - def get_global_efficacy_indicator(self, indicators_map): - return None - - -class ServerConsolidation(base.EfficacySpecification): - - def get_indicators_specifications(self): - return [ - indicators.ComputeNodesCount(), - indicators.ReleasedComputeNodesCount(), - indicators.InstanceMigrationsCount(), - ] - - def get_global_efficacy_indicator(self, indicators_map=None): - value = 0 - if indicators_map and indicators_map.compute_nodes_count > 0: - value = (float(indicators_map.released_compute_nodes_count) / - float(indicators_map.compute_nodes_count)) * 100 - - return efficacy.Indicator( - name="released_nodes_ratio", - description=_("Ratio of released compute nodes divided by the " - "total number of enabled compute nodes."), - unit='%', - value=value, - ) diff --git a/watcher/decision_engine/goal/goals.py b/watcher/decision_engine/goal/goals.py deleted file mode 100644 index e5be78f..0000000 --- a/watcher/decision_engine/goal/goals.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher._i18n import _ -from watcher.decision_engine.goal import base -from watcher.decision_engine.goal.efficacy import specs - - -class Dummy(base.Goal): - """Dummy - - Reserved goal that is used for testing purposes. - """ - - @classmethod - def get_name(cls): - return "dummy" - - @classmethod - def get_display_name(cls): - return _("Dummy goal") - - @classmethod - def get_translatable_display_name(cls): - return "Dummy goal" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class Unclassified(base.Goal): - """Unclassified - - This goal is used to ease the development process of a strategy. Containing - no actual indicator specification, this goal can be used whenever a - strategy has yet to be formally associated with an existing goal. If the - goal achieve has been identified but there is no available implementation, - this Goal can also be used as a transitional stage. - """ - - @classmethod - def get_name(cls): - return "unclassified" - - @classmethod - def get_display_name(cls): - return _("Unclassified") - - @classmethod - def get_translatable_display_name(cls): - return "Unclassified" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class ServerConsolidation(base.Goal): - """ServerConsolidation - - This goal is for efficient usage of compute server resources in order to - reduce the total number of servers. - """ - - @classmethod - def get_name(cls): - return "server_consolidation" - - @classmethod - def get_display_name(cls): - return _("Server Consolidation") - - @classmethod - def get_translatable_display_name(cls): - return "Server Consolidation" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.ServerConsolidation() - - -class ThermalOptimization(base.Goal): - """ThermalOptimization - - This goal is used to balance the temperature across different servers. - """ - - @classmethod - def get_name(cls): - return "thermal_optimization" - - @classmethod - def get_display_name(cls): - return _("Thermal Optimization") - - @classmethod - def get_translatable_display_name(cls): - return "Thermal Optimization" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class WorkloadBalancing(base.Goal): - """WorkloadBalancing - - This goal is used to evenly distribute workloads across different servers. - """ - - @classmethod - def get_name(cls): - return "workload_balancing" - - @classmethod - def get_display_name(cls): - return _("Workload Balancing") - - @classmethod - def get_translatable_display_name(cls): - return "Workload Balancing" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class AirflowOptimization(base.Goal): - """AirflowOptimization - - This goal is used to optimize the airflow within a cloud infrastructure. - """ - - @classmethod - def get_name(cls): - return "airflow_optimization" - - @classmethod - def get_display_name(cls): - return _("Airflow Optimization") - - @classmethod - def get_translatable_display_name(cls): - return "Airflow Optimization" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class NoisyNeighborOptimization(base.Goal): - """NoisyNeighborOptimization - - This goal is used to identify and migrate a Noisy Neighbor - - a low priority VM that negatively affects peformance of a high priority VM - in terms of IPC by over utilizing Last Level Cache. - """ - - @classmethod - def get_name(cls): - return "noisy_neighbor" - - @classmethod - def get_display_name(cls): - return _("Noisy Neighbor") - - @classmethod - def get_translatable_display_name(cls): - return "Noisy Neighbor" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() diff --git a/watcher/decision_engine/loading/__init__.py b/watcher/decision_engine/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/loading/default.py b/watcher/decision_engine/loading/default.py deleted file mode 100644 index 8fbd5b8..0000000 --- a/watcher/decision_engine/loading/default.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import unicode_literals - - -from watcher.common.loader import default - - -class DefaultStrategyLoader(default.DefaultLoader): - def __init__(self): - super(DefaultStrategyLoader, self).__init__( - namespace='watcher_strategies') - - -class DefaultGoalLoader(default.DefaultLoader): - def __init__(self): - super(DefaultGoalLoader, self).__init__( - namespace='watcher_goals') - - -class DefaultPlannerLoader(default.DefaultLoader): - def __init__(self): - super(DefaultPlannerLoader, self).__init__( - namespace='watcher_planners') - - -class ClusterDataModelCollectorLoader(default.DefaultLoader): - def __init__(self): - super(ClusterDataModelCollectorLoader, self).__init__( - namespace='watcher_cluster_data_model_collectors') - - -class DefaultScoringLoader(default.DefaultLoader): - def __init__(self): - super(DefaultScoringLoader, self).__init__( - namespace='watcher_scoring_engines') - - -class DefaultScoringContainerLoader(default.DefaultLoader): - def __init__(self): - super(DefaultScoringContainerLoader, self).__init__( - namespace='watcher_scoring_engine_containers') diff --git a/watcher/decision_engine/manager.py b/watcher/decision_engine/manager.py deleted file mode 100644 index 7655d32..0000000 --- a/watcher/decision_engine/manager.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This component is responsible for computing a set of potential optimization -:ref:`Actions ` in order to fulfill the -:ref:`Goal ` of an :ref:`Audit `. - -It first reads the parameters of the :ref:`Audit ` from the -associated :ref:`Audit Template ` and knows the -:ref:`Goal ` to achieve. - -It then selects the most appropriate :ref:`Strategy ` -depending on how Watcher was configured for this :ref:`Goal `. - -The :ref:`Strategy ` is then executed and generates a set -of :ref:`Actions ` which are scheduled in time by the -:ref:`Watcher Planner ` (i.e., it generates an -:ref:`Action Plan `). - -See :doc:`../architecture` for more details on this component. -""" - -from watcher.common import service_manager -from watcher.decision_engine.messaging import audit_endpoint -from watcher.decision_engine.model.collector import manager - -from watcher import conf - -CONF = conf.CONF - - -class DecisionEngineManager(service_manager.ServiceManager): - - @property - def service_name(self): - return 'watcher-decision-engine' - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_decision_engine.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_decision_engine.conductor_topic - - @property - def notification_topics(self): - return CONF.watcher_decision_engine.notification_topics - - @property - def conductor_endpoints(self): - return [audit_endpoint.AuditEndpoint] - - @property - def notification_endpoints(self): - return self.collector_manager.get_notification_endpoints() - - @property - def collector_manager(self): - return manager.CollectorManager() diff --git a/watcher/decision_engine/messaging/__init__.py b/watcher/decision_engine/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/messaging/audit_endpoint.py b/watcher/decision_engine/messaging/audit_endpoint.py deleted file mode 100644 index 54d47ae..0000000 --- a/watcher/decision_engine/messaging/audit_endpoint.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from concurrent import futures - -from oslo_config import cfg -from oslo_log import log - -from watcher.decision_engine.audit import continuous as c_handler -from watcher.decision_engine.audit import oneshot as o_handler - -from watcher import objects - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -class AuditEndpoint(object): - - def __init__(self, messaging): - self._messaging = messaging - self._executor = futures.ThreadPoolExecutor( - max_workers=CONF.watcher_decision_engine.max_workers) - self._oneshot_handler = o_handler.OneShotAuditHandler() - self._continuous_handler = c_handler.ContinuousAuditHandler().start() - - @property - def executor(self): - return self._executor - - def do_trigger_audit(self, context, audit_uuid): - audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True) - self._oneshot_handler.execute(audit, context) - - def trigger_audit(self, context, audit_uuid): - LOG.debug("Trigger audit %s" % audit_uuid) - self.executor.submit(self.do_trigger_audit, - context, - audit_uuid) - return audit_uuid diff --git a/watcher/decision_engine/model/__init__.py b/watcher/decision_engine/model/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/model/base.py b/watcher/decision_engine/model/base.py deleted file mode 100644 index 8629d05..0000000 --- a/watcher/decision_engine/model/base.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This component is in charge of executing the -:ref:`Action Plan ` built by the -:ref:`Watcher Decision Engine `. - -See: :doc:`../architecture` for more details on this component. -""" - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class Model(object): - - @abc.abstractmethod - def to_string(self): - raise NotImplementedError() - - @abc.abstractmethod - def to_xml(self): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/collector/__init__.py b/watcher/decision_engine/model/collector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/model/collector/base.py b/watcher/decision_engine/model/collector/base.py deleted file mode 100644 index b251a0c..0000000 --- a/watcher/decision_engine/model/collector/base.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -A :ref:`Cluster Data Model ` (or CDM) is a -logical representation of the current state and topology of the :ref:`Cluster -` :ref:`Managed resources `. - -It is represented as a set of :ref:`Managed resources -` (which may be a simple tree or a flat list of -key-value pairs) which enables Watcher :ref:`Strategies ` -to know the current relationships between the different :ref:`resources -`) of the :ref:`Cluster ` -during an :ref:`Audit ` and enables the :ref:`Strategy -` to request information such as: - -- What compute nodes are in a given :ref:`Audit Scope - `? -- What :ref:`Instances ` are hosted on a given compute - node? -- What is the current load of a compute node? -- What is the current free memory of a compute node? -- What is the network link between two compute nodes? -- What is the available bandwidth on a given network link? -- What is the current space available on a given virtual disk of a given - :ref:`Instance ` ? -- What is the current state of a given :ref:`Instance `? -- ... - -In a word, this data model enables the :ref:`Strategy ` -to know: - -- the current topology of the :ref:`Cluster ` -- the current capacity for each :ref:`Managed resource - ` -- the current amount of used/free space for each :ref:`Managed resource - ` -- the current state of each :ref:`Managed resources - ` - -In the Watcher project, we aim at providing a some generic and basic -:ref:`Cluster Data Model ` for each :ref:`Goal -`, usable in the associated :ref:`Strategies -` through a plugin-based mechanism which are called -cluster data model collectors (or CDMCs). These CDMCs are responsible for -loading and keeping up-to-date their associated CDM by listening to events and -also periodically rebuilding themselves from the ground up. They are also -directly accessible from the strategies classes. These CDMs are used to: - -- simplify the development of a new :ref:`Strategy ` for a - given :ref:`Goal ` when there already are some existing - :ref:`Strategies ` associated to the same :ref:`Goal - ` -- avoid duplicating the same code in several :ref:`Strategies - ` associated to the same :ref:`Goal ` -- have a better consistency between the different :ref:`Strategies - ` for a given :ref:`Goal ` -- avoid any strong coupling with any external :ref:`Cluster Data Model - ` (the proposed data model acts as a pivot - data model) - -There may be various :ref:`generic and basic Cluster Data Models -` proposed in Watcher helpers, each of them -being adapted to achieving a given :ref:`Goal `: - -- For example, for a :ref:`Goal ` which aims at optimizing - the network :ref:`resources ` the :ref:`Strategy - ` may need to know which :ref:`resources - ` are communicating together. -- Whereas for a :ref:`Goal ` which aims at optimizing thermal - and power conditions, the :ref:`Strategy ` may need to - know the location of each compute node in the racks and the location of each - rack in the room. - -Note however that a developer can use his/her own :ref:`Cluster Data Model -` if the proposed data model does not fit -his/her needs as long as the :ref:`Strategy ` is able to -produce a :ref:`Solution ` for the requested :ref:`Goal -`. For example, a developer could rely on the Nova Data Model -to optimize some compute resources. - -The :ref:`Cluster Data Model ` may be persisted -in any appropriate storage system (SQL database, NoSQL database, JSON file, -XML File, In Memory Database, ...). As of now, an in-memory model is built and -maintained in the background in order to accelerate the execution of -strategies. -""" - -import abc -import copy -import threading - -from oslo_config import cfg -from oslo_log import log -import six - -from watcher.common import clients -from watcher.common.loader import loadable -from watcher.decision_engine.model import model_root - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class BaseClusterDataModelCollector(loadable.LoadableSingleton): - - STALE_MODEL = model_root.ModelRoot(stale=True) - - def __init__(self, config, osc=None): - super(BaseClusterDataModelCollector, self).__init__(config) - self.osc = osc if osc else clients.OpenStackClients() - self._cluster_data_model = None - self.lock = threading.RLock() - - @property - def cluster_data_model(self): - if self._cluster_data_model is None: - self.lock.acquire() - self._cluster_data_model = self.execute() - self.lock.release() - - return self._cluster_data_model - - @cluster_data_model.setter - def cluster_data_model(self, model): - self.lock.acquire() - self._cluster_data_model = model - self.lock.release() - - @abc.abstractproperty - def notification_endpoints(self): - """Associated notification endpoints - - :return: Associated notification endpoints - :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances - """ - raise NotImplementedError() - - def set_cluster_data_model_as_stale(self): - self.cluster_data_model = self.STALE_MODEL - - @abc.abstractmethod - def execute(self): - """Build a cluster data model""" - raise NotImplementedError() - - @classmethod - def get_config_opts(cls): - return [ - cfg.IntOpt( - 'period', - default=3600, - help='The time interval (in seconds) between each ' - 'synchronization of the model'), - ] - - def get_latest_cluster_data_model(self): - LOG.debug("Creating copy") - LOG.debug(self.cluster_data_model.to_xml()) - return copy.deepcopy(self.cluster_data_model) - - def synchronize(self): - """Synchronize the cluster data model - - Whenever called this synchronization will perform a drop-in replacement - with the existing cluster data model - """ - self.cluster_data_model = self.execute() diff --git a/watcher/decision_engine/model/collector/cinder.py b/watcher/decision_engine/model/collector/cinder.py deleted file mode 100644 index 72aa644..0000000 --- a/watcher/decision_engine/model/collector/cinder.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from oslo_log import log - -from watcher.common import cinder_helper -from watcher.common import exception -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root -from watcher.decision_engine.model.notification import cinder - -LOG = log.getLogger(__name__) - - -class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector): - """Cinder cluster data model collector - - The Cinder cluster data model collector creates an in-memory - representation of the resources exposed by the storage service. - """ - - def __init__(self, config, osc=None): - super(CinderClusterDataModelCollector, self).__init__(config, osc) - - @property - def notification_endpoints(self): - """Associated notification endpoints - - :return: Associated notification endpoints - :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances - """ - return [ - cinder.CapacityNotificationEndpoint(self), - cinder.VolumeCreateEnd(self), - cinder.VolumeDeleteEnd(self), - cinder.VolumeUpdateEnd(self), - cinder.VolumeAttachEnd(self), - cinder.VolumeDetachEnd(self), - cinder.VolumeResizeEnd(self) - ] - - def execute(self): - """Build the storage cluster data model""" - LOG.debug("Building latest Cinder cluster data model") - - builder = ModelBuilder(self.osc) - return builder.execute() - - -class ModelBuilder(object): - """Build the graph-based model - - This model builder adds the following data" - - Storage-related knowledge (Cinder) - - """ - def __init__(self, osc): - self.osc = osc - self.model = model_root.StorageModelRoot() - self.cinder = osc.cinder() - self.cinder_helper = cinder_helper.CinderHelper(osc=self.osc) - - def _add_physical_layer(self): - """Add the physical layer of the graph. - - This includes components which represent actual infrastructure - hardware. - """ - for snode in self.cinder_helper.get_storage_node_list(): - self.add_storage_node(snode) - for pool in self.cinder_helper.get_storage_pool_list(): - pool = self._build_storage_pool(pool) - self.model.add_pool(pool) - storage_name = getattr(pool, 'name') - try: - storage_node = self.model.get_node_by_name( - storage_name) - # Connect the instance to its compute node - self.model.map_pool(pool, storage_node) - except exception.StorageNodeNotFound: - continue - - def add_storage_node(self, node): - # Build and add base node. - storage_node = self.build_storage_node(node) - self.model.add_node(storage_node) - - def add_storage_pool(self, pool): - storage_pool = self._build_storage_pool(pool) - self.model.add_pool(storage_pool) - - def build_storage_node(self, node): - """Build a storage node from a Cinder storage node - - :param node: A storage node - :type node: :py:class:`~cinderclient.v2.services.Service` - """ - # node.host is formatted as host@backendname since ocata, - # or may be only host as of ocata - backend = "" - try: - backend = node.host.split('@')[1] - except IndexError: - pass - - volume_type = self.cinder_helper.get_volume_type_by_backendname( - backend) - - # build up the storage node. - node_attributes = { - "host": node.host, - "zone": node.zone, - "state": node.state, - "status": node.status, - "volume_type": volume_type} - - storage_node = element.StorageNode(**node_attributes) - return storage_node - - def _build_storage_pool(self, pool): - """Build a storage pool from a Cinder storage pool - - :param pool: A storage pool - :type pool: :py:class:`~cinderlient.v2.capabilities.Capabilities` - """ - # build up the storage pool. - node_attributes = { - "name": pool.name, - "total_volumes": pool.total_volumes, - "total_capacity_gb": pool.total_capacity_gb, - "free_capacity_gb": pool.free_capacity_gb, - "provisioned_capacity_gb": pool.provisioned_capacity_gb, - "allocated_capacity_gb": pool.allocated_capacity_gb} - - storage_pool = element.Pool(**node_attributes) - return storage_pool - - def _add_virtual_layer(self): - """Add the virtual layer to the graph. - - This layer is the virtual components of the infrastructure. - """ - self._add_virtual_storage() - - def _add_virtual_storage(self): - volumes = self.cinder_helper.get_volume_list() - for vol in volumes: - volume = self._build_volume_node(vol) - self.model.add_volume(volume) - pool_name = getattr(vol, 'os-vol-host-attr:host') - if pool_name is None: - # The volume is not attached to any pool - continue - try: - pool = self.model.get_pool_by_pool_name( - pool_name) - self.model.map_volume(volume, pool) - except exception.PoolNotFound: - continue - - def _build_volume_node(self, volume): - """Build an volume node - - Create an volume node for the graph using cinder and the - `volume` cinder object. - :param instance: Cinder Volume object. - :return: A volume node for the graph. - """ - attachments = [{k: v for k, v in six.iteritems(d) if k in ( - 'server_id', 'attachment_id')} for d in volume.attachments] - - volume_attributes = { - "uuid": volume.id, - "size": volume.size, - "status": volume.status, - "attachments": attachments, - "name": volume.name or "", - "multiattach": volume.multiattach, - "snapshot_id": volume.snapshot_id or "", - "project_id": getattr(volume, 'os-vol-tenant-attr:tenant_id'), - "metadata": volume.metadata, - "bootable": volume.bootable} - - return element.Volume(**volume_attributes) - - def execute(self): - """Instantiates the graph with the openstack cluster data. - - The graph is populated along 2 layers: virtual and physical. As each - new layer is built connections are made back to previous layers. - """ - self._add_physical_layer() - self._add_virtual_layer() - return self.model diff --git a/watcher/decision_engine/model/collector/manager.py b/watcher/decision_engine/model/collector/manager.py deleted file mode 100644 index 1191036..0000000 --- a/watcher/decision_engine/model/collector/manager.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import utils -from watcher.decision_engine.loading import default - - -class CollectorManager(object): - - def __init__(self): - self.collector_loader = default.ClusterDataModelCollectorLoader() - self._collectors = None - self._notification_endpoints = None - - def get_collectors(self): - if self._collectors is None: - collectors = utils.Struct() - available_collectors = self.collector_loader.list_available() - for collector_name in available_collectors: - collector = self.collector_loader.load(collector_name) - collectors[collector_name] = collector - self._collectors = collectors - - return self._collectors - - def get_notification_endpoints(self): - if self._notification_endpoints is None: - endpoints = [] - for collector in self.get_collectors().values(): - endpoints.extend(collector.notification_endpoints) - self._notification_endpoints = endpoints - - return self._notification_endpoints - - def get_cluster_model_collector(self, name, osc=None): - """Retrieve cluster data model collector - - :param name: name of the cluster data model collector plugin - :type name: str - :param osc: an OpenStackClients instance - :type osc: :py:class:`~.OpenStackClients` instance - :returns: cluster data model collector plugin - :rtype: :py:class:`~.BaseClusterDataModelCollector` - """ - return self.collector_loader.load(name, osc=osc) diff --git a/watcher/decision_engine/model/collector/nova.py b/watcher/decision_engine/model/collector/nova.py deleted file mode 100644 index a5fe3bd..0000000 --- a/watcher/decision_engine/model/collector/nova.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Intel Innovation and Research Ireland Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root -from watcher.decision_engine.model.notification import nova - -LOG = log.getLogger(__name__) - - -class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector): - """Nova cluster data model collector - - The Nova cluster data model collector creates an in-memory - representation of the resources exposed by the compute service. - """ - - def __init__(self, config, osc=None): - super(NovaClusterDataModelCollector, self).__init__(config, osc) - - @property - def notification_endpoints(self): - """Associated notification endpoints - - :return: Associated notification endpoints - :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances - """ - return [ - nova.ServiceUpdated(self), - - nova.InstanceCreated(self), - nova.InstanceUpdated(self), - nova.InstanceDeletedEnd(self), - - nova.LegacyInstanceCreatedEnd(self), - nova.LegacyInstanceUpdated(self), - nova.LegacyInstanceDeletedEnd(self), - nova.LegacyLiveMigratedEnd(self), - ] - - def execute(self): - """Build the compute cluster data model""" - LOG.debug("Building latest Nova cluster data model") - - builder = ModelBuilder(self.osc) - return builder.execute() - - -class ModelBuilder(object): - """Build the graph-based model - - This model builder adds the following data" - - - Compute-related knowledge (Nova) - - TODO(v-francoise): Storage-related knowledge (Cinder) - - TODO(v-francoise): Network-related knowledge (Neutron) - - NOTE(v-francoise): This model builder is meant to be extended in the future - to also include both storage and network information respectively coming - from Cinder and Neutron. Some prelimary work has been done in this - direction in https://review.openstack.org/#/c/362730 but since we cannot - guarantee a sufficient level of consistency for neither the storage nor the - network part before the end of the Ocata cycle, this work has been - re-scheduled for Pike. In the meantime, all the associated code has been - commented out. - """ - def __init__(self, osc): - self.osc = osc - self.model = model_root.ModelRoot() - self.nova = osc.nova() - self.nova_helper = nova_helper.NovaHelper(osc=self.osc) - # self.neutron = osc.neutron() - # self.cinder = osc.cinder() - - def _add_physical_layer(self): - """Add the physical layer of the graph. - - This includes components which represent actual infrastructure - hardware. - """ - for cnode in self.nova_helper.get_compute_node_list(): - self.add_compute_node(cnode) - - def add_compute_node(self, node): - # Build and add base node. - compute_node = self.build_compute_node(node) - self.model.add_node(compute_node) - - # NOTE(v-francoise): we can encapsulate capabilities of the node - # (special instruction sets of CPUs) in the attributes; as well as - # sub-nodes can be added re-presenting e.g. GPUs/Accelerators etc. - - # # Build & add disk, memory, network and cpu nodes. - # disk_id, disk_node = self.build_disk_compute_node(base_id, node) - # self.add_node(disk_id, disk_node) - # mem_id, mem_node = self.build_memory_compute_node(base_id, node) - # self.add_node(mem_id, mem_node) - # net_id, net_node = self._build_network_compute_node(base_id) - # self.add_node(net_id, net_node) - # cpu_id, cpu_node = self.build_cpu_compute_node(base_id, node) - # self.add_node(cpu_id, cpu_node) - - # # Connect the base compute node to the dependant nodes. - # self.add_edges_from([(base_id, disk_id), (base_id, mem_id), - # (base_id, cpu_id), (base_id, net_id)], - # label="contains") - - def build_compute_node(self, node): - """Build a compute node from a Nova compute node - - :param node: A node hypervisor instance - :type node: :py:class:`~novaclient.v2.hypervisors.Hypervisor` - """ - # build up the compute node. - compute_service = self.nova_helper.get_service(node.service["id"]) - node_attributes = { - "id": node.id, - "uuid": compute_service.host, - "hostname": node.hypervisor_hostname, - "memory": node.memory_mb, - "disk": node.free_disk_gb, - "disk_capacity": node.local_gb, - "vcpus": node.vcpus, - "state": node.state, - "status": node.status} - - compute_node = element.ComputeNode(**node_attributes) - # compute_node = self._build_node("physical", "compute", "hypervisor", - # node_attributes) - return compute_node - - # def _build_network_compute_node(self, base_node): - # attributes = {} - # net_node = self._build_node("physical", "network", "NIC", attributes) - # net_id = "{}_network".format(base_node) - # return net_id, net_node - - # def build_disk_compute_node(self, base_node, compute): - # # Build disk node attributes. - # disk_attributes = { - # "size_gb": compute.local_gb, - # "used_gb": compute.local_gb_used, - # "available_gb": compute.free_disk_gb} - # disk_node = self._build_node("physical", "storage", "disk", - # disk_attributes) - # disk_id = "{}_disk".format(base_node) - # return disk_id, disk_node - - # def build_memory_compute_node(self, base_node, compute): - # # Build memory node attributes. - # memory_attrs = {"size_mb": compute.memory_mb, - # "used_mb": compute.memory_mb_used, - # "available_mb": compute.free_ram_mb} - # memory_node = self._build_node("physical", "memory", "memory", - # memory_attrs) - # memory_id = "{}_memory".format(base_node) - # return memory_id, memory_node - - # def build_cpu_compute_node(self, base_node, compute): - # # Build memory node attributes. - # cpu_attributes = {"vcpus": compute.vcpus, - # "vcpus_used": compute.vcpus_used, - # "info": jsonutils.loads(compute.cpu_info)} - # cpu_node = self._build_node("physical", "cpu", "cpu", cpu_attributes) - # cpu_id = "{}_cpu".format(base_node) - # return cpu_id, cpu_node - - # @staticmethod - # def _build_node(layer, category, node_type, attributes): - # return {"layer": layer, "category": category, "type": node_type, - # "attributes": attributes} - - def _add_virtual_layer(self): - """Add the virtual layer to the graph. - - This layer is the virtual components of the infrastructure, - such as vms. - """ - self._add_virtual_servers() - # self._add_virtual_network() - # self._add_virtual_storage() - - def _add_virtual_servers(self): - all_instances = self.nova_helper.get_instance_list() - for inst in all_instances: - # Add Node - instance = self._build_instance_node(inst) - self.model.add_instance(instance) - # Get the cnode_name uuid. - cnode_uuid = getattr(inst, "OS-EXT-SRV-ATTR:host") - if cnode_uuid is None: - # The instance is not attached to any Compute node - continue - try: - # Nova compute node - # cnode = self.nova_helper.get_compute_node_by_hostname( - # cnode_uuid) - compute_node = self.model.get_node_by_uuid( - cnode_uuid) - # Connect the instance to its compute node - self.model.map_instance(instance, compute_node) - except exception.ComputeNodeNotFound: - continue - - def _build_instance_node(self, instance): - """Build an instance node - - Create an instance node for the graph using nova and the - `server` nova object. - :param instance: Nova VM object. - :return: A instance node for the graph. - """ - flavor = self.nova_helper.get_flavor(instance.flavor["id"]) - instance_attributes = { - "uuid": instance.id, - "human_id": instance.human_id, - "memory": flavor.ram, - "disk": flavor.disk, - "disk_capacity": flavor.disk, - "vcpus": flavor.vcpus, - "state": getattr(instance, "OS-EXT-STS:vm_state"), - "metadata": instance.metadata} - - # node_attributes = dict() - # node_attributes["layer"] = "virtual" - # node_attributes["category"] = "compute" - # node_attributes["type"] = "compute" - # node_attributes["attributes"] = instance_attributes - return element.Instance(**instance_attributes) - - # def _add_virtual_storage(self): - # try: - # volumes = self.cinder.volumes.list() - # except Exception: - # return - # for volume in volumes: - # volume_id, volume_node = self._build_storage_node(volume) - # self.add_node(volume_id, volume_node) - # host = self._get_volume_host_id(volume_node) - # self.add_edge(volume_id, host) - # # Add connections to an instance. - # if volume_node['attributes']['attachments']: - # for attachment in volume_node['attributes']['attachments']: - # self.add_edge(volume_id, attachment['server_id'], - # label='ATTACHED_TO') - # volume_node['attributes'].pop('attachments') - - # def _add_virtual_network(self): - # try: - # routers = self.neutron.list_routers() - # except Exception: - # return - - # for network in self.neutron.list_networks()['networks']: - # self.add_node(*self._build_network(network)) - - # for router in routers['routers']: - # self.add_node(*self._build_router(router)) - - # router_interfaces, _, compute_ports = self._group_ports() - # for router_interface in router_interfaces: - # interface = self._build_router_interface(router_interface) - # router_interface_id = interface[0] - # router_interface_node = interface[1] - # router_id = interface[2] - # self.add_node(router_interface_id, router_interface_node) - # self.add_edge(router_id, router_interface_id) - # network_id = router_interface_node['attributes']['network_id'] - # self.add_edge(router_interface_id, network_id) - - # for compute_port in compute_ports: - # cp_id, cp_node, instance_id = self._build_compute_port_node( - # compute_port) - # self.add_node(cp_id, cp_node) - # self.add_edge(cp_id, vm_id) - # net_id = cp_node['attributes']['network_id'] - # self.add_edge(net_id, cp_id) - # # Connect port to physical node - # phys_net_node = "{}_network".format(cp_node['attributes'] - # ['binding:host_id']) - # self.add_edge(cp_id, phys_net_node) - - # def _get_volume_host_id(self, volume_node): - # host = volume_node['attributes']['os-vol-host-attr:host'] - # if host.find('@') != -1: - # host = host.split('@')[0] - # elif host.find('#') != -1: - # host = host.split('#')[0] - # return "{}_disk".format(host) - - # def _build_storage_node(self, volume_obj): - # volume = volume_obj.__dict__ - # volume["name"] = volume["id"] - # volume.pop("id") - # volume.pop("manager") - # node = self._build_node("virtual", "storage", 'volume', volume) - # return volume["name"], node - - # def _build_compute_port_node(self, compute_port): - # compute_port["name"] = compute_port["id"] - # compute_port.pop("id") - # nde_type = "{}_port".format( - # compute_port["device_owner"].split(":")[0]) - # compute_port.pop("device_owner") - # device_id = compute_port["device_id"] - # compute_port.pop("device_id") - # node = self._build_node("virtual", "network", nde_type, compute_port) - # return compute_port["name"], node, device_id - - # def _group_ports(self): - # router_interfaces = [] - # floating_ips = [] - # compute_ports = [] - # interface_types = ["network:router_interface", - # 'network:router_gateway'] - - # for port in self.neutron.list_ports()['ports']: - # if port['device_owner'] in interface_types: - # router_interfaces.append(port) - # elif port['device_owner'].startswith('compute:'): - # compute_ports.append(port) - # elif port['device_owner'] == 'network:floatingip': - # floating_ips.append(port) - - # return router_interfaces, floating_ips, compute_ports - - # def _build_router_interface(self, interface): - # interface["name"] = interface["id"] - # interface.pop("id") - # node_type = interface["device_owner"].split(":")[1] - # node = self._build_node("virtual", "network", node_type, interface) - # return interface["name"], node, interface["device_id"] - - # def _build_router(self, router): - # router_attrs = {"uuid": router['id'], - # "name": router['name'], - # "state": router['status']} - # node = self._build_node('virtual', 'network', 'router', router_attrs) - # return str(router['id']), node - - # def _build_network(self, network): - # node = self._build_node('virtual', 'network', 'network', network) - # return network['id'], node - - def execute(self): - """Instantiates the graph with the openstack cluster data. - - The graph is populated along 2 layers: virtual and physical. As each - new layer is built connections are made back to previous layers. - """ - self._add_physical_layer() - self._add_virtual_layer() - return self.model diff --git a/watcher/decision_engine/model/element/__init__.py b/watcher/decision_engine/model/element/__init__.py deleted file mode 100644 index dce2528..0000000 --- a/watcher/decision_engine/model/element/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.model.element import instance -from watcher.decision_engine.model.element import node -from watcher.decision_engine.model.element import volume - -ServiceState = node.ServiceState -ComputeNode = node.ComputeNode -StorageNode = node.StorageNode -Pool = node.Pool - -InstanceState = instance.InstanceState -Instance = instance.Instance -VolumeState = volume.VolumeState -Volume = volume.Volume - -__all__ = ['ServiceState', - 'ComputeNode', - 'InstanceState', - 'Instance', - 'StorageNode', - 'Pool', - 'VolumeState', - 'Volume'] diff --git a/watcher/decision_engine/model/element/base.py b/watcher/decision_engine/model/element/base.py deleted file mode 100644 index 6ff04da..0000000 --- a/watcher/decision_engine/model/element/base.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import collections - -from lxml import etree -from oslo_log import log -import six - -from watcher.objects import base -from watcher.objects import fields as wfields - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class Element(base.WatcherObject, base.WatcherObjectDictCompat, - base.WatcherComparableObject): - - # Initial version - VERSION = '1.0' - - fields = {} - - def __init__(self, context=None, **kwargs): - for name, field in self.fields.items(): - # The idea here is to force the initialization of unspecified - # fields that have a default value - if (name not in kwargs and not field.nullable and - field.default != wfields.UnspecifiedDefault): - kwargs[name] = field.default - super(Element, self).__init__(context, **kwargs) - - @abc.abstractmethod - def accept(self, visitor): - raise NotImplementedError() - - def as_xml_element(self): - sorted_fieldmap = [] - for field in self.fields: - try: - value = str(self[field]) - sorted_fieldmap.append((field, value)) - except Exception as exc: - LOG.exception(exc) - - attrib = collections.OrderedDict(sorted_fieldmap) - - element_name = self.__class__.__name__ - instance_el = etree.Element(element_name, attrib=attrib) - - return instance_el diff --git a/watcher/decision_engine/model/element/compute_resource.py b/watcher/decision_engine/model/element/compute_resource.py deleted file mode 100644 index 4b0348a..0000000 --- a/watcher/decision_engine/model/element/compute_resource.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - -from watcher.decision_engine.model.element import base -from watcher.objects import fields as wfields - - -@six.add_metaclass(abc.ABCMeta) -class ComputeResource(base.Element): - - VERSION = '1.0' - - fields = { - "uuid": wfields.StringField(), - "human_id": wfields.StringField(default=""), - } diff --git a/watcher/decision_engine/model/element/instance.py b/watcher/decision_engine/model/element/instance.py deleted file mode 100644 index ebdb16d..0000000 --- a/watcher/decision_engine/model/element/instance.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum - -from watcher.decision_engine.model.element import compute_resource -from watcher.objects import base -from watcher.objects import fields as wfields - - -class InstanceState(enum.Enum): - ACTIVE = 'active' # Instance is running - BUILDING = 'building' # Instance only exists in DB - PAUSED = 'paused' - SUSPENDED = 'suspended' # Instance is suspended to disk. - STOPPED = 'stopped' # Instance is shut off, the disk image is still there. - RESCUED = 'rescued' # A rescue image is running with the original image - # attached. - RESIZED = 'resized' # a Instance with the new size is active. - - SOFT_DELETED = 'soft-delete' - # still available to restore. - DELETED = 'deleted' # Instance is permanently deleted. - - ERROR = 'error' - - -@base.WatcherObjectRegistry.register_if(False) -class Instance(compute_resource.ComputeResource): - - fields = { - "state": wfields.StringField(default=InstanceState.ACTIVE.value), - - "memory": wfields.NonNegativeIntegerField(), - "disk": wfields.IntegerField(), - "disk_capacity": wfields.NonNegativeIntegerField(), - "vcpus": wfields.NonNegativeIntegerField(), - "metadata": wfields.JsonField(), - } - - def accept(self, visitor): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/element/node.py b/watcher/decision_engine/model/element/node.py deleted file mode 100644 index 3807a6f..0000000 --- a/watcher/decision_engine/model/element/node.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum - -from watcher.decision_engine.model.element import compute_resource -from watcher.decision_engine.model.element import storage_resource -from watcher.objects import base -from watcher.objects import fields as wfields - - -class ServiceState(enum.Enum): - ONLINE = 'up' - OFFLINE = 'down' - ENABLED = 'enabled' - DISABLED = 'disabled' - - -@base.WatcherObjectRegistry.register_if(False) -class ComputeNode(compute_resource.ComputeResource): - - fields = { - "id": wfields.NonNegativeIntegerField(), - "hostname": wfields.StringField(), - "status": wfields.StringField(default=ServiceState.ENABLED.value), - "state": wfields.StringField(default=ServiceState.ONLINE.value), - - "memory": wfields.NonNegativeIntegerField(), - "disk": wfields.IntegerField(), - "disk_capacity": wfields.NonNegativeIntegerField(), - "vcpus": wfields.NonNegativeIntegerField(), - } - - def accept(self, visitor): - raise NotImplementedError() - - -@base.WatcherObjectRegistry.register_if(False) -class StorageNode(storage_resource.StorageResource): - - fields = { - "host": wfields.StringField(), - "zone": wfields.StringField(), - "status": wfields.StringField(default=ServiceState.ENABLED.value), - "state": wfields.StringField(default=ServiceState.ONLINE.value), - "volume_type": wfields.StringField() - } - - def accept(self, visitor): - raise NotImplementedError() - - -@base.WatcherObjectRegistry.register_if(False) -class Pool(storage_resource.StorageResource): - - fields = { - "name": wfields.StringField(), - "total_volumes": wfields.NonNegativeIntegerField(), - "total_capacity_gb": wfields.NonNegativeIntegerField(), - "free_capacity_gb": wfields.NonNegativeIntegerField(), - "provisioned_capacity_gb": wfields.NonNegativeIntegerField(), - "allocated_capacity_gb": wfields.NonNegativeIntegerField(), - "virtual_free": wfields.NonNegativeIntegerField(), - } - - def accept(self, visitor): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/element/storage_resource.py b/watcher/decision_engine/model/element/storage_resource.py deleted file mode 100644 index e65fb01..0000000 --- a/watcher/decision_engine/model/element/storage_resource.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - -from watcher.decision_engine.model.element import base -from watcher.objects import fields as wfields - - -@six.add_metaclass(abc.ABCMeta) -class StorageResource(base.Element): - - VERSION = '1.0' - - fields = { - "uuid": wfields.StringField(), - "human_id": wfields.StringField(default=""), - } diff --git a/watcher/decision_engine/model/element/volume.py b/watcher/decision_engine/model/element/volume.py deleted file mode 100644 index f96cd7c..0000000 --- a/watcher/decision_engine/model/element/volume.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum - -from watcher.decision_engine.model.element import storage_resource -from watcher.objects import base -from watcher.objects import fields as wfields - - -class VolumeState(enum.Enum): - # https://developer.openstack.org/api-ref/block-storage/v3/#volumes-volumes - - CREATING = 'creating' - AVAILABLE = 'available' - ATTACHING = 'attaching' - IN_USE = 'in-use' - DELETING = 'deleting' - ERROR = 'error' - ERROR_DELETING = 'error_deleting' - BACKING_UP = 'backing-up' - RESTORING_BACKUP = 'restoring-backup' - ERROR_RESTORING = 'error_restoring' - ERROR_EXTENDING = 'error_extending' - - -@base.WatcherObjectRegistry.register_if(False) -class Volume(storage_resource.StorageResource): - - fields = { - "size": wfields.NonNegativeIntegerField(), - "status": wfields.StringField(default=VolumeState.AVAILABLE.value), - "attachments": wfields.FlexibleListOfDictField(), - "name": wfields.StringField(), - "multiattach": wfields.BooleanField(), - "snapshot_id": wfields.UUIDField(), - "project_id": wfields.UUIDField(), - "metadata": wfields.JsonField(), - "bootable": wfields.BooleanField() - } - - def accept(self, visitor): - raise NotImplementedError() diff --git a/watcher/decision_engine/model/model_root.py b/watcher/decision_engine/model/model_root.py deleted file mode 100644 index 3b47085..0000000 --- a/watcher/decision_engine/model/model_root.py +++ /dev/null @@ -1,541 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Innovation and Research Ireland Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Openstack implementation of the cluster graph. -""" - -from lxml import etree -import networkx as nx -from oslo_concurrency import lockutils -from oslo_log import log -import six - -from watcher._i18n import _ -from watcher.common import exception -from watcher.decision_engine.model import base -from watcher.decision_engine.model import element - -LOG = log.getLogger(__name__) - - -class ModelRoot(nx.DiGraph, base.Model): - """Cluster graph for an Openstack cluster.""" - - def __init__(self, stale=False): - super(ModelRoot, self).__init__() - self.stale = stale - - def __nonzero__(self): - return not self.stale - - __bool__ = __nonzero__ - - @staticmethod - def assert_node(obj): - if not isinstance(obj, element.ComputeNode): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @staticmethod - def assert_instance(obj): - if not isinstance(obj, element.Instance): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid")) - - @lockutils.synchronized("model_root") - def add_node(self, node): - self.assert_node(node) - super(ModelRoot, self).add_node(node.uuid, node) - - @lockutils.synchronized("model_root") - def remove_node(self, node): - self.assert_node(node) - try: - super(ModelRoot, self).remove_node(node.uuid) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.ComputeNodeNotFound(name=node.uuid) - - @lockutils.synchronized("model_root") - def add_instance(self, instance): - self.assert_instance(instance) - try: - super(ModelRoot, self).add_node(instance.uuid, instance) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.InstanceNotFound(name=instance.uuid) - - @lockutils.synchronized("model_root") - def remove_instance(self, instance): - self.assert_instance(instance) - super(ModelRoot, self).remove_node(instance.uuid) - - @lockutils.synchronized("model_root") - def map_instance(self, instance, node): - """Map a newly created instance to a node - - :param instance: :py:class:`~.Instance` object or instance UUID - :type instance: str or :py:class:`~.Instance` - :param node: :py:class:`~.ComputeNode` object or node UUID - :type node: str or :py:class:`~.Instance` - """ - if isinstance(instance, six.string_types): - instance = self.get_instance_by_uuid(instance) - if isinstance(node, six.string_types): - node = self.get_node_by_uuid(node) - self.assert_node(node) - self.assert_instance(instance) - - self.add_edge(instance.uuid, node.uuid) - - @lockutils.synchronized("model_root") - def unmap_instance(self, instance, node): - if isinstance(instance, six.string_types): - instance = self.get_instance_by_uuid(instance) - if isinstance(node, six.string_types): - node = self.get_node_by_uuid(node) - - self.remove_edge(instance.uuid, node.uuid) - - def delete_instance(self, instance, node=None): - self.assert_instance(instance) - self.remove_instance(instance) - - @lockutils.synchronized("model_root") - def migrate_instance(self, instance, source_node, destination_node): - """Migrate single instance from source_node to destination_node - - :param instance: - :param source_node: - :param destination_node: - :return: - """ - self.assert_instance(instance) - self.assert_node(source_node) - self.assert_node(destination_node) - - if source_node == destination_node: - return False - - # unmap - self.remove_edge(instance.uuid, source_node.uuid) - # map - self.add_edge(instance.uuid, destination_node.uuid) - return True - - @lockutils.synchronized("model_root") - def get_all_compute_nodes(self): - return {uuid: cn for uuid, cn in self.nodes(data=True) - if isinstance(cn, element.ComputeNode)} - - @lockutils.synchronized("model_root") - def get_node_by_uuid(self, uuid): - try: - return self._get_by_uuid(uuid) - except exception.ComputeResourceNotFound: - raise exception.ComputeNodeNotFound(name=uuid) - - @lockutils.synchronized("model_root") - def get_instance_by_uuid(self, uuid): - try: - return self._get_by_uuid(uuid) - except exception.ComputeResourceNotFound: - raise exception.InstanceNotFound(name=uuid) - - def _get_by_uuid(self, uuid): - try: - return self.node[uuid] - except Exception as exc: - LOG.exception(exc) - raise exception.ComputeResourceNotFound(name=uuid) - - @lockutils.synchronized("model_root") - def get_node_by_instance_uuid(self, instance_uuid): - instance = self._get_by_uuid(instance_uuid) - for node_uuid in self.neighbors(instance.uuid): - node = self._get_by_uuid(node_uuid) - if isinstance(node, element.ComputeNode): - return node - raise exception.ComputeNodeNotFound(name=instance_uuid) - - @lockutils.synchronized("model_root") - def get_all_instances(self): - return {uuid: inst for uuid, inst in self.nodes(data=True) - if isinstance(inst, element.Instance)} - - @lockutils.synchronized("model_root") - def get_node_instances(self, node): - self.assert_node(node) - node_instances = [] - for instance_uuid in self.predecessors(node.uuid): - instance = self._get_by_uuid(instance_uuid) - if isinstance(instance, element.Instance): - node_instances.append(instance) - - return node_instances - - def to_string(self): - return self.to_xml() - - def to_xml(self): - root = etree.Element("ModelRoot") - # Build compute node tree - for cn in sorted(self.get_all_compute_nodes().values(), - key=lambda cn: cn.uuid): - compute_node_el = cn.as_xml_element() - - # Build mapped instance tree - node_instances = self.get_node_instances(cn) - for instance in sorted(node_instances, key=lambda x: x.uuid): - instance_el = instance.as_xml_element() - compute_node_el.append(instance_el) - - root.append(compute_node_el) - - # Build unmapped instance tree (i.e. not assigned to any compute node) - for instance in sorted(self.get_all_instances().values(), - key=lambda inst: inst.uuid): - try: - self.get_node_by_instance_uuid(instance.uuid) - except (exception.InstanceNotFound, exception.ComputeNodeNotFound): - root.append(instance.as_xml_element()) - - return etree.tostring(root, pretty_print=True).decode('utf-8') - - @classmethod - def from_xml(cls, data): - model = cls() - - root = etree.fromstring(data) - for cn in root.findall('.//ComputeNode'): - node = element.ComputeNode(**cn.attrib) - model.add_node(node) - - for inst in root.findall('.//Instance'): - instance = element.Instance(**inst.attrib) - model.add_instance(instance) - - parent = inst.getparent() - if parent.tag == 'ComputeNode': - node = model.get_node_by_uuid(parent.get('uuid')) - model.map_instance(instance, node) - else: - model.add_instance(instance) - - return model - - @classmethod - def is_isomorphic(cls, G1, G2): - def node_match(node1, node2): - return node1.as_dict() == node2.as_dict() - return nx.algorithms.isomorphism.isomorph.is_isomorphic( - G1, G2, node_match=node_match) - - -class StorageModelRoot(nx.DiGraph, base.Model): - """Cluster graph for an Openstack cluster.""" - - def __init__(self, stale=False): - super(StorageModelRoot, self).__init__() - self.stale = stale - - def __nonzero__(self): - return not self.stale - - __bool__ = __nonzero__ - - @staticmethod - def assert_node(obj): - if not isinstance(obj, element.StorageNode): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @staticmethod - def assert_pool(obj): - if not isinstance(obj, element.Pool): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @staticmethod - def assert_volume(obj): - if not isinstance(obj, element.Volume): - raise exception.IllegalArgumentException( - message=_("'obj' argument type is not valid: %s") % type(obj)) - - @lockutils.synchronized("storage_model") - def add_node(self, node): - self.assert_node(node) - super(StorageModelRoot, self).add_node(node.host, node) - - @lockutils.synchronized("storage_model") - def add_pool(self, pool): - self.assert_pool(pool) - super(StorageModelRoot, self).add_node(pool.name, pool) - - @lockutils.synchronized("storage_model") - def remove_node(self, node): - self.assert_node(node) - try: - super(StorageModelRoot, self).remove_node(node.host) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.StorageNodeNotFound(name=node.host) - - @lockutils.synchronized("storage_model") - def remove_pool(self, pool): - self.assert_pool(pool) - try: - super(StorageModelRoot, self).remove_node(pool.name) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.PoolNotFound(name=pool.name) - - @lockutils.synchronized("storage_model") - def map_pool(self, pool, node): - """Map a newly created pool to a node - - :param pool: :py:class:`~.Pool` object or pool name - :param node: :py:class:`~.StorageNode` object or node host - """ - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - if isinstance(node, six.string_types): - node = self.get_node_by_name(node) - self.assert_node(node) - self.assert_pool(pool) - - self.add_edge(pool.name, node.host) - - @lockutils.synchronized("storage_model") - def unmap_pool(self, pool, node): - """Unmap a pool from a node - - :param pool: :py:class:`~.Pool` object or pool name - :param node: :py:class:`~.StorageNode` object or node name - """ - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - if isinstance(node, six.string_types): - node = self.get_node_by_name(node) - - self.remove_edge(pool.name, node.host) - - @lockutils.synchronized("storage_model") - def add_volume(self, volume): - self.assert_volume(volume) - super(StorageModelRoot, self).add_node(volume.uuid, volume) - - @lockutils.synchronized("storage_model") - def remove_volume(self, volume): - self.assert_volume(volume) - try: - super(StorageModelRoot, self).remove_node(volume.uuid) - except nx.NetworkXError as exc: - LOG.exception(exc) - raise exception.VolumeNotFound(name=volume.uuid) - - @lockutils.synchronized("storage_model") - def map_volume(self, volume, pool): - """Map a newly created volume to a pool - - :param volume: :py:class:`~.Volume` object or volume UUID - :param pool: :py:class:`~.Pool` object or pool name - """ - if isinstance(volume, six.string_types): - volume = self.get_volume_by_uuid(volume) - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - self.assert_pool(pool) - self.assert_volume(volume) - - self.add_edge(volume.uuid, pool.name) - - @lockutils.synchronized("storage_model") - def unmap_volume(self, volume, pool): - """Unmap a volume from a pool - - :param volume: :py:class:`~.Volume` object or volume UUID - :param pool: :py:class:`~.Pool` object or pool name - """ - if isinstance(volume, six.string_types): - volume = self.get_volume_by_uuid(volume) - if isinstance(pool, six.string_types): - pool = self.get_pool_by_pool_name(pool) - - self.remove_edge(volume.uuid, pool.name) - - def delete_volume(self, volume): - self.assert_volume(volume) - self.remove_volume(volume) - - @lockutils.synchronized("storage_model") - def get_all_storage_nodes(self): - return {host: cn for host, cn in self.nodes(data=True) - if isinstance(cn, element.StorageNode)} - - @lockutils.synchronized("storage_model") - def get_node_by_name(self, name): - """Get a node by node name - - :param node: :py:class:`~.StorageNode` object or node name - """ - try: - return self._get_by_name(name.split("#")[0]) - except exception.StorageResourceNotFound: - raise exception.StorageNodeNotFound(name=name) - - @lockutils.synchronized("storage_model") - def get_pool_by_pool_name(self, name): - try: - return self._get_by_name(name) - except exception.StorageResourceNotFound: - raise exception.PoolNotFound(name=name) - - @lockutils.synchronized("storage_model") - def get_volume_by_uuid(self, uuid): - try: - return self._get_by_uuid(uuid) - except exception.StorageResourceNotFound: - raise exception.VolumeNotFound(name=uuid) - - def _get_by_uuid(self, uuid): - try: - return self.node[uuid] - except Exception as exc: - LOG.exception(exc) - raise exception.StorageResourceNotFound(name=uuid) - - def _get_by_name(self, name): - try: - return self.node[name] - except Exception as exc: - LOG.exception(exc) - raise exception.StorageResourceNotFound(name=name) - - @lockutils.synchronized("storage_model") - def get_node_by_pool_name(self, pool_name): - pool = self._get_by_name(pool_name) - for node_name in self.neighbors(pool.name): - node = self._get_by_name(node_name) - if isinstance(node, element.StorageNode): - return node - raise exception.StorageNodeNotFound(name=pool_name) - - @lockutils.synchronized("storage_model") - def get_node_pools(self, node): - self.assert_node(node) - node_pools = [] - for pool_name in self.predecessors(node.host): - pool = self._get_by_name(pool_name) - if isinstance(pool, element.Pool): - node_pools.append(pool) - - return node_pools - - @lockutils.synchronized("storage_model") - def get_pool_by_volume(self, volume): - self.assert_volume(volume) - volume = self._get_by_uuid(volume.uuid) - for p in self.neighbors(volume.uuid): - pool = self._get_by_name(p) - if isinstance(pool, element.Pool): - return pool - raise exception.PoolNotFound(name=volume.uuid) - - @lockutils.synchronized("storage_model") - def get_all_volumes(self): - return {name: vol for name, vol in self.nodes(data=True) - if isinstance(vol, element.Volume)} - - @lockutils.synchronized("storage_model") - def get_pool_volumes(self, pool): - self.assert_pool(pool) - volumes = [] - for vol in self.predecessors(pool.name): - volume = self._get_by_uuid(vol) - if isinstance(volume, element.Volume): - volumes.append(volume) - - return volumes - - def to_string(self): - return self.to_xml() - - def to_xml(self): - root = etree.Element("ModelRoot") - # Build storage node tree - for cn in sorted(self.get_all_storage_nodes().values(), - key=lambda cn: cn.host): - storage_node_el = cn.as_xml_element() - # Build mapped pool tree - node_pools = self.get_node_pools(cn) - for pool in sorted(node_pools, key=lambda x: x.name): - pool_el = pool.as_xml_element() - storage_node_el.append(pool_el) - # Build mapped volume tree - pool_volumes = self.get_pool_volumes(pool) - for volume in sorted(pool_volumes, key=lambda x: x.uuid): - volume_el = volume.as_xml_element() - pool_el.append(volume_el) - - root.append(storage_node_el) - - # Build unmapped volume tree (i.e. not assigned to any pool) - for volume in sorted(self.get_all_volumes().values(), - key=lambda vol: vol.uuid): - try: - self.get_pool_by_volume(volume) - except (exception.VolumeNotFound, exception.PoolNotFound): - root.append(volume.as_xml_element()) - - return etree.tostring(root, pretty_print=True).decode('utf-8') - - @classmethod - def from_xml(cls, data): - model = cls() - - root = etree.fromstring(data) - for cn in root.findall('.//StorageNode'): - node = element.StorageNode(**cn.attrib) - model.add_node(node) - - for p in root.findall('.//Pool'): - pool = element.Pool(**p.attrib) - model.add_pool(pool) - - parent = p.getparent() - if parent.tag == 'StorageNode': - node = model.get_node_by_name(parent.get('host')) - model.map_pool(pool, node) - else: - model.add_pool(pool) - - for vol in root.findall('.//Volume'): - volume = element.Volume(**vol.attrib) - model.add_volume(volume) - - parent = vol.getparent() - if parent.tag == 'Pool': - pool = model.get_pool_by_pool_name(parent.get('name')) - model.map_volume(volume, pool) - else: - model.add_volume(volume) - - return model - - @classmethod - def is_isomorphic(cls, G1, G2): - return nx.algorithms.isomorphism.isomorph.is_isomorphic( - G1, G2) diff --git a/watcher/decision_engine/model/notification/__init__.py b/watcher/decision_engine/model/notification/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/model/notification/base.py b/watcher/decision_engine/model/notification/base.py deleted file mode 100644 index 9090ab3..0000000 --- a/watcher/decision_engine/model/notification/base.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class NotificationEndpoint(object): - - def __init__(self, collector): - super(NotificationEndpoint, self).__init__() - self.collector = collector - self._notifier = None - - @abc.abstractproperty - def filter_rule(self): - """Notification Filter""" - raise NotImplementedError() - - @property - def cluster_data_model(self): - return self.collector.cluster_data_model diff --git a/watcher/decision_engine/model/notification/cinder.py b/watcher/decision_engine/model/notification/cinder.py deleted file mode 100644 index 7d305dc..0000000 --- a/watcher/decision_engine/model/notification/cinder.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from oslo_log import log -from watcher.common import cinder_helper -from watcher.common import exception -from watcher.decision_engine.model import element -from watcher.decision_engine.model.notification import base -from watcher.decision_engine.model.notification import filtering - -LOG = log.getLogger(__name__) - - -class CinderNotification(base.NotificationEndpoint): - - def __init__(self, collector): - super(CinderNotification, self).__init__(collector) - self._cinder = None - - @property - def cinder(self): - if self._cinder is None: - self._cinder = cinder_helper.CinderHelper() - return self._cinder - - def update_pool(self, pool, data): - """Update the storage pool using the notification data.""" - pool.update({ - "total_capacity_gb": data['total'], - "free_capacity_gb": data['free'], - "provisioned_capacity_gb": data['provisioned'], - "allocated_capacity_gb": data['allocated'], - "virtual_free": data['virtual_free'] - }) - - node_name = pool.name.split("#")[0] - node = self.get_or_create_node(node_name) - self.cluster_data_model.map_pool(pool, node) - LOG.debug("Mapped pool %s to %s", pool.name, node.host) - - def update_pool_by_api(self, pool): - """Update the storage pool using the API data.""" - if not pool: - return - _pool = self.cinder.get_storage_pool_by_name(pool.name) - pool.update({ - "total_volumes": _pool.total_volumes, - "total_capacity_gb": _pool.total_capacity_gb, - "free_capacity_gb": _pool.free_capacity_gb, - "provisioned_capacity_gb": _pool.provisioned_capacity_gb, - "allocated_capacity_gb": _pool.allocated_capacity_gb - }) - node_name = pool.name.split("#")[0] - node = self.get_or_create_node(node_name) - self.cluster_data_model.map_pool(pool, node) - LOG.debug("Mapped pool %s to %s", pool.name, node.host) - - def create_storage_node(self, name): - """Create the storage node by querying the Cinder API.""" - try: - _node = self.cinder.get_storage_node_by_name(name) - _volume_type = self.cinder.get_volume_type_by_backendname( - # name is formatted as host@backendname - name.split('@')[1]) - storage_node = element.StorageNode( - host=_node.host, - zone=_node.zone, - state=_node.state, - status=_node.status, - volume_type=_volume_type) - return storage_node - except Exception as exc: - LOG.exception(exc) - LOG.debug("Could not create storage node %s.", name) - raise exception.StorageNodeNotFound(name=name) - - def get_or_create_node(self, name): - """Get storage node by name, otherwise create storage node""" - if name is None: - LOG.debug("Storage node name not provided: skipping") - return - try: - return self.cluster_data_model.get_node_by_name(name) - except exception.StorageNodeNotFound: - # The node didn't exist yet so we create a new node object - node = self.create_storage_node(name) - LOG.debug("New storage node created: %s", name) - self.cluster_data_model.add_node(node) - LOG.debug("New storage node added: %s", name) - return node - - def create_pool(self, pool_name): - """Create the storage pool by querying the Cinder API.""" - try: - _pool = self.cinder.get_storage_pool_by_name(pool_name) - pool = element.Pool( - name=_pool.name, - total_volumes=_pool.total_volumes, - total_capacity_gb=_pool.total_capacity_gb, - free_capacity_gb=_pool.free_capacity_gb, - provisioned_capacity_gb=_pool.provisioned_capacity_gb, - allocated_capacity_gb=_pool.allocated_capacity_gb) - return pool - except Exception as exc: - LOG.exception(exc) - LOG.debug("Could not refresh the pool %s.", pool_name) - raise exception.PoolNotFound(name=pool_name) - - def get_or_create_pool(self, name): - if not name: - LOG.debug("Pool name not provided: skipping") - return - try: - return self.cluster_data_model.get_pool_by_pool_name(name) - except exception.PoolNotFound: - # The pool didn't exist yet so we create a new pool object - pool = self.create_pool(name) - LOG.debug("New storage pool created: %s", name) - self.cluster_data_model.add_pool(pool) - LOG.debug("New storage pool added: %s", name) - return pool - - def get_or_create_volume(self, volume_id, pool_name=None): - try: - if pool_name: - self.get_or_create_pool(pool_name) - except exception.PoolNotFound: - LOG.warning("Could not find storage pool %(pool)s for " - "volume %(volume)s", - dict(pool=pool_name, volume=volume_id)) - try: - return self.cluster_data_model.get_volume_by_uuid(volume_id) - except exception.VolumeNotFound: - # The volume didn't exist yet so we create a new volume object - volume = element.Volume(uuid=volume_id) - self.cluster_data_model.add_volume(volume) - return volume - - def update_volume(self, volume, data): - """Update the volume using the notification data.""" - - def _keyReplace(key): - if key == 'instance_uuid': - return 'server_id' - if key == 'id': - return 'attachment_id' - - attachments = [ - {_keyReplace(k): v for k, v in six.iteritems(d) - if k in ('instance_uuid', 'id')} - for d in data['volume_attachment'] - ] - - # glance_metadata is provided if volume is bootable - bootable = False - if 'glance_metadata' in data: - bootable = True - - volume.update({ - "name": data['display_name'] or "", - "size": data['size'], - "status": data['status'], - "attachments": attachments, - "snapshot_id": data['snapshot_id'] or "", - "project_id": data['tenant_id'], - "metadata": data['metadata'], - "bootable": bootable - }) - - try: - # if volume is under pool, let's update pool element. - # get existing pool or create pool by cinder api - pool = self.get_or_create_pool(data['host']) - self.update_pool_by_api(pool) - - except exception.PoolNotFound as exc: - LOG.exception(exc) - pool = None - - self.update_volume_mapping(volume, pool) - - def update_volume_mapping(self, volume, pool): - if pool is None: - self.cluster_data_model.add_volume(volume) - LOG.debug("Volume %s not yet attached to any pool: skipping", - volume.uuid) - return - try: - try: - current_pool = ( - self.cluster_data_model.get_pool_by_volume( - volume) or self.get_or_create_pool(pool.name)) - except exception.PoolNotFound as exc: - LOG.exception(exc) - # If we can't create the pool, - # we consider the volume as unmapped - current_pool = None - - LOG.debug("Mapped pool %s found", pool.name) - if current_pool and pool != current_pool: - LOG.debug("Unmapping volume %s from %s", - volume.uuid, pool.name) - self.cluster_data_model.unmap_volume(volume, current_pool) - except exception.VolumeNotFound: - # The instance didn't exist yet so we map it for the first time - LOG.debug("New volume: mapping it to %s", pool.name) - finally: - if pool: - self.cluster_data_model.map_volume(volume, pool) - LOG.debug("Mapped volume %s to %s", volume.uuid, pool.name) - - def delete_volume(self, volume, pool): - try: - self.cluster_data_model.delete_volume(volume) - except Exception: - LOG.info("Volume %s already deleted", volume.uuid) - - try: - if pool: - # if volume is under pool, let's update pool element. - # get existing pool or create pool by cinder api - pool = self.get_or_create_pool(pool.name) - self.update_pool_by_api(pool) - except exception.PoolNotFound as exc: - LOG.exception(exc) - pool = None - - -class CapacityNotificationEndpoint(CinderNotification): - - @property - def filter_rule(self): - """Cinder capacity notification filter""" - return filtering.NotificationFilter( - publisher_id=r'capacity.*', - event_type='capacity.pool', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - name = payload['name_to_id'] - try: - pool = self.get_or_create_pool(name) - self.update_pool(pool, payload) - except exception.PoolNotFound as exc: - LOG.exception(exc) - - -class VolumeNotificationEndpoint(CinderNotification): - publisher_id_regex = r'^volume.*' - - -class VolumeCreateEnd(VolumeNotificationEndpoint): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.create.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - volume_id = payload['volume_id'] - poolname = payload['host'] - volume = self.get_or_create_volume(volume_id, poolname) - self.update_volume(volume, payload) - - -class VolumeUpdateEnd(VolumeNotificationEndpoint): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.update.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - volume_id = payload['volume_id'] - poolname = payload['host'] - volume = self.get_or_create_volume(volume_id, poolname) - self.update_volume(volume, payload) - - -class VolumeAttachEnd(VolumeUpdateEnd): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.attach.end', - ) - - -class VolumeDetachEnd(VolumeUpdateEnd): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.detach.end', - ) - - -class VolumeResizeEnd(VolumeUpdateEnd): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.resize.end', - ) - - -class VolumeDeleteEnd(VolumeNotificationEndpoint): - - @property - def filter_rule(self): - """Cinder volume notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='volume.delete.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - volume_id = payload['volume_id'] - poolname = payload['host'] - volume = self.get_or_create_volume(volume_id, poolname) - - try: - pool = self.get_or_create_pool(poolname) - except exception.PoolNotFound as exc: - LOG.exception(exc) - pool = None - - self.delete_volume(volume, pool) diff --git a/watcher/decision_engine/model/notification/filtering.py b/watcher/decision_engine/model/notification/filtering.py deleted file mode 100644 index 737e317..0000000 --- a/watcher/decision_engine/model/notification/filtering.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -import oslo_messaging as om -import six - - -class NotificationFilter(om.NotificationFilter): - """Notification Endpoint base class - - This class is responsible for handling incoming notifications. Depending - on the priority level of the incoming, you may need to implement one or - more of the following methods: - - .. code: py - def audit(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def warn(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def error(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - - def critical(self, ctxt, publisher_id, event_type, payload, metadata): - do_something(payload) - """ - - def _build_regex_dict(self, regex_list): - if regex_list is None: - return {} - - regex_mapping = {} - for key, value in regex_list.items(): - if isinstance(value, dict): - regex_mapping[key] = self._build_regex_dict(value) - else: - if callable(value): - regex_mapping[key] = value - elif value is not None: - regex_mapping[key] = re.compile(value) - else: - regex_mapping[key] = None - - return regex_mapping - - def _check_for_mismatch(self, data, regex): - if isinstance(regex, dict): - mismatch_results = [ - k not in data or not self._check_for_mismatch(data[k], v) - for k, v in regex.items() - ] - if not mismatch_results: - return False - - return all(mismatch_results) - elif callable(regex): - # The filter is a callable that should return True - # if there is a mismatch - return regex(data) - elif regex is not None and data is None: - return True - elif (regex is not None and - isinstance(data, six.string_types) and - not regex.match(data)): - return True - - return False diff --git a/watcher/decision_engine/model/notification/nova.py b/watcher/decision_engine/model/notification/nova.py deleted file mode 100644 index 42df5cd..0000000 --- a/watcher/decision_engine/model/notification/nova.py +++ /dev/null @@ -1,466 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.model import element -from watcher.decision_engine.model.notification import base -from watcher.decision_engine.model.notification import filtering - -LOG = log.getLogger(__name__) - - -class NovaNotification(base.NotificationEndpoint): - - def __init__(self, collector): - super(NovaNotification, self).__init__(collector) - self._nova = None - - @property - def nova(self): - if self._nova is None: - self._nova = nova_helper.NovaHelper() - return self._nova - - def get_or_create_instance(self, instance_uuid, node_uuid=None): - try: - if node_uuid: - self.get_or_create_node(node_uuid) - except exception.ComputeNodeNotFound: - LOG.warning("Could not find compute node %(node)s for " - "instance %(instance)s", - dict(node=node_uuid, instance=instance_uuid)) - try: - instance = self.cluster_data_model.get_instance_by_uuid( - instance_uuid) - except exception.InstanceNotFound: - # The instance didn't exist yet so we create a new instance object - LOG.debug("New instance created: %s", instance_uuid) - instance = element.Instance(uuid=instance_uuid) - - self.cluster_data_model.add_instance(instance) - - return instance - - def update_instance(self, instance, data): - instance_data = data['nova_object.data'] - instance_flavor_data = instance_data['flavor']['nova_object.data'] - - memory_mb = instance_flavor_data['memory_mb'] - num_cores = instance_flavor_data['vcpus'] - disk_gb = instance_flavor_data['root_gb'] - instance_metadata = data['nova_object.data']['metadata'] - - instance.update({ - 'state': instance_data['state'], - 'hostname': instance_data['host_name'], - 'human_id': instance_data['display_name'], - 'memory': memory_mb, - 'vcpus': num_cores, - 'disk': disk_gb, - 'disk_capacity': disk_gb, - 'metadata': instance_metadata, - }) - - try: - node = self.get_or_create_node(instance_data['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.update_instance_mapping(instance, node) - - def legacy_update_instance(self, instance, data): - memory_mb = data['memory_mb'] - num_cores = data['vcpus'] - disk_gb = data['root_gb'] - instance_metadata = data['metadata'] - - instance.update({ - 'state': data['state'], - 'hostname': data['hostname'], - 'human_id': data['display_name'], - 'memory': memory_mb, - 'vcpus': num_cores, - 'disk': disk_gb, - 'disk_capacity': disk_gb, - 'metadata': instance_metadata, - }) - - try: - node = self.get_or_create_node(data['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.update_instance_mapping(instance, node) - - def update_compute_node(self, node, data): - """Update the compute node using the notification data.""" - node_data = data['nova_object.data'] - node_state = ( - element.ServiceState.OFFLINE.value - if node_data['forced_down'] else element.ServiceState.ONLINE.value) - node_status = ( - element.ServiceState.DISABLED.value - if node_data['disabled'] else element.ServiceState.ENABLED.value) - - node.update({ - 'hostname': node_data['host'], - 'state': node_state, - 'status': node_status, - }) - - def create_compute_node(self, node_hostname): - """Update the compute node by querying the Nova API.""" - try: - _node = self.nova.get_compute_node_by_hostname(node_hostname) - node = element.ComputeNode( - id=_node.id, - uuid=node_hostname, - hostname=_node.hypervisor_hostname, - state=_node.state, - status=_node.status, - memory=_node.memory_mb, - vcpus=_node.vcpus, - disk=_node.free_disk_gb, - disk_capacity=_node.local_gb, - ) - return node - except Exception as exc: - LOG.exception(exc) - LOG.debug("Could not refresh the node %s.", node_hostname) - raise exception.ComputeNodeNotFound(name=node_hostname) - - return False - - def get_or_create_node(self, uuid): - if uuid is None: - LOG.debug("Compute node UUID not provided: skipping") - return - try: - return self.cluster_data_model.get_node_by_uuid(uuid) - except exception.ComputeNodeNotFound: - # The node didn't exist yet so we create a new node object - node = self.create_compute_node(uuid) - LOG.debug("New compute node created: %s", uuid) - self.cluster_data_model.add_node(node) - LOG.debug("New compute node mapped: %s", uuid) - return node - - def update_instance_mapping(self, instance, node): - if node is None: - self.cluster_data_model.add_instance(instance) - LOG.debug("Instance %s not yet attached to any node: skipping", - instance.uuid) - return - try: - try: - current_node = ( - self.cluster_data_model.get_node_by_instance_uuid( - instance.uuid) or self.get_or_create_node(node.uuid)) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, - # we consider the instance as unmapped - current_node = None - - LOG.debug("Mapped node %s found", node.uuid) - if current_node and node != current_node: - LOG.debug("Unmapping instance %s from %s", - instance.uuid, node.uuid) - self.cluster_data_model.unmap_instance(instance, current_node) - except exception.InstanceNotFound: - # The instance didn't exist yet so we map it for the first time - LOG.debug("New instance: mapping it to %s", node.uuid) - finally: - if node: - self.cluster_data_model.map_instance(instance, node) - LOG.debug("Mapped instance %s to %s", instance.uuid, node.uuid) - - def delete_instance(self, instance, node): - try: - self.cluster_data_model.delete_instance(instance, node) - except Exception: - LOG.info("Instance %s already deleted", instance.uuid) - - -class VersionedNotificationEndpoint(NovaNotification): - publisher_id_regex = r'^nova-compute.*' - - -class UnversionedNotificationEndpoint(NovaNotification): - publisher_id_regex = r'^compute.*' - - -class ServiceUpdated(VersionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova service.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='service.update', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - node_data = payload['nova_object.data'] - node_uuid = node_data['host'] - try: - node = self.get_or_create_node(node_uuid) - self.update_compute_node(node, payload) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - - -class InstanceCreated(VersionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova instance.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='instance.update', - # To be "fully" created, an instance transitions - # from the 'building' state to the 'active' one. - # See http://docs.openstack.org/developer/nova/vmstates.html - payload={ - 'nova_object.data': { - 'state': element.InstanceState.ACTIVE.value, - 'state_update': { - 'nova_object.data': { - 'old_state': element.InstanceState.BUILDING.value, - 'state': element.InstanceState.ACTIVE.value, - }, - 'nova_object.name': 'InstanceStateUpdatePayload', - 'nova_object.namespace': 'nova', - }, - } - } - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - instance_data = payload['nova_object.data'] - instance_uuid = instance_data['uuid'] - node_uuid = instance_data.get('host') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.update_instance(instance, payload) - - -class InstanceUpdated(VersionedNotificationEndpoint): - - @staticmethod - def _match_not_new_instance_state(data): - is_new_instance = ( - data['old_state'] == element.InstanceState.BUILDING.value and - data['state'] == element.InstanceState.ACTIVE.value) - - return not is_new_instance - - @property - def filter_rule(self): - """Nova instance.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='instance.update', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - instance_data = payload['nova_object.data'] - instance_uuid = instance_data['uuid'] - node_uuid = instance_data.get('host') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.update_instance(instance, payload) - - -class InstanceDeletedEnd(VersionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova service.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='instance.delete.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_data = payload['nova_object.data'] - instance_uuid = instance_data['uuid'] - node_uuid = instance_data.get('host') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - try: - node = self.get_or_create_node(instance_data['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.delete_instance(instance, node) - - -class LegacyInstanceUpdated(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova compute.instance.update notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.update', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.legacy_update_instance(instance, payload) - - -class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova compute.instance.create.end notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.create.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.legacy_update_instance(instance, payload) - - -class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova compute.instance.delete.end notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.delete.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - try: - node = self.get_or_create_node(payload['host']) - except exception.ComputeNodeNotFound as exc: - LOG.exception(exc) - # If we can't create the node, we consider the instance as unmapped - node = None - - self.delete_instance(instance, node) - - -class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint): - - @property - def filter_rule(self): - """Nova *.live_migration.post.dest.end notification filter""" - return filtering.NotificationFilter( - publisher_id=self.publisher_id_regex, - event_type='compute.instance.live_migration.post.dest.end', - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - ctxt.request_id = metadata['message_id'] - ctxt.project_domain = event_type - LOG.info("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s" % - dict(event=event_type, - publisher=publisher_id, - metadata=metadata)) - LOG.debug(payload) - - instance_uuid = payload['instance_id'] - node_uuid = payload.get('node') - instance = self.get_or_create_instance(instance_uuid, node_uuid) - - self.legacy_update_instance(instance, payload) diff --git a/watcher/decision_engine/planner/__init__.py b/watcher/decision_engine/planner/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/planner/base.py b/watcher/decision_engine/planner/base.py deleted file mode 100644 index 9c255b4..0000000 --- a/watcher/decision_engine/planner/base.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -The :ref:`Watcher Planner ` is part of the -:ref:`Watcher Decision Engine `. - -This module takes the set of :ref:`Actions ` generated by a -:ref:`Strategy ` and builds the design of a workflow which -defines how-to schedule in time those different -:ref:`Actions ` and for each -:ref:`Action ` what are the prerequisite conditions. - -It is important to schedule :ref:`Actions ` in time in order -to prevent overload of the :ref:`Cluster ` while applying -the :ref:`Action Plan `. For example, it is important -not to migrate too many instances at the same time in order to avoid a network -congestion which may decrease the :ref:`SLA ` for -:ref:`Customers `. - -It is also important to schedule :ref:`Actions ` in order to -avoid security issues such as denial of service on core OpenStack services. - -:ref:`Some default implementations are provided `, but it is -possible to :ref:`develop new implementations ` -which are dynamically loaded by Watcher at launch time. - -See :doc:`../architecture` for more details on this component. -""" - -import abc -import six - -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class BasePlanner(loadable.Loadable): - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def schedule(self, context, audit_uuid, solution): - """The planner receives a solution to schedule - - :param solution: A solution provided by a strategy for scheduling - :type solution: :py:class:`~.BaseSolution` subclass instance - :param audit_uuid: the audit uuid - :type audit_uuid: str - :return: Action plan with an ordered sequence of actions such that all - security, dependency, and performance requirements are met. - :rtype: :py:class:`watcher.objects.ActionPlan` instance - """ - # example: directed acyclic graph - raise NotImplementedError() diff --git a/watcher/decision_engine/planner/manager.py b/watcher/decision_engine/planner/manager.py deleted file mode 100644 index 7169470..0000000 --- a/watcher/decision_engine/planner/manager.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log - -from watcher.decision_engine.loading import default as loader - -from watcher import conf - -LOG = log.getLogger(__name__) -CONF = conf.CONF - - -class PlannerManager(object): - def __init__(self): - self._loader = loader.DefaultPlannerLoader() - - @property - def loader(self): - return self._loader - - def load(self): - selected_planner = CONF.watcher_planner.planner - LOG.debug("Loading %s", selected_planner) - return self.loader.load(name=selected_planner) diff --git a/watcher/decision_engine/planner/weight.py b/watcher/decision_engine/planner/weight.py deleted file mode 100644 index 24c707a..0000000 --- a/watcher/decision_engine/planner/weight.py +++ /dev/null @@ -1,222 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vincent Francoise -# Alexander Chadin -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections - -import networkx as nx -from oslo_config import cfg -from oslo_config import types -from oslo_log import log - -from watcher.common import utils -from watcher.decision_engine.planner import base -from watcher import objects - -LOG = log.getLogger(__name__) - - -class WeightPlanner(base.BasePlanner): - """Weight planner implementation - - This implementation builds actions with parents in accordance with weights. - Set of actions having a higher weight will be scheduled before - the other ones. There are two config options to configure: - action_weights and parallelization. - - *Limitations* - - - This planner requires to have action_weights and parallelization configs - tuned well. - """ - - def __init__(self, config): - super(WeightPlanner, self).__init__(config) - - action_weights = { - 'nop': 60, - 'change_nova_service_state': 50, - 'sleep': 40, - 'migrate': 30, - 'resize': 20, - 'turn_host_to_acpi_s3_state': 10, - 'change_node_power_state': 9, - } - - parallelization = { - 'turn_host_to_acpi_s3_state': 2, - 'resize': 2, - 'migrate': 2, - 'sleep': 1, - 'change_nova_service_state': 1, - 'nop': 1, - 'change_node_power_state': 2, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.Opt( - 'weights', - type=types.Dict(value_type=types.Integer()), - help="These weights are used to schedule the actions. " - "Action Plan will be build in accordance with sets of " - "actions ordered by descending weights." - "Two action types cannot have the same weight. ", - default=cls.action_weights), - cfg.Opt( - 'parallelization', - type=types.Dict(value_type=types.Integer()), - help="Number of actions to be run in parallel on a per " - "action type basis.", - default=cls.parallelization), - ] - - @staticmethod - def chunkify(lst, n): - """Yield successive n-sized chunks from lst.""" - if n < 1: - # Just to make sure the number is valid - n = 1 - - # Split a flat list in a list of chunks of size n. - # e.g. chunkify([0, 1, 2, 3, 4], 2) -> [[0, 1], [2, 3], [4]] - for i in range(0, len(lst), n): - yield lst[i:i + n] - - def compute_action_graph(self, sorted_weighted_actions): - reverse_weights = {v: k for k, v in self.config.weights.items()} - # leaf_groups contains a list of list of nodes called groups - # each group is a set of nodes from which a future node will - # branch off (parent nodes). - - # START --> migrate-1 --> migrate-3 - # \ \--> resize-1 --> FINISH - # \--> migrate-2 -------------/ - # In the above case migrate-1 will be the only member of the leaf - # group that migrate-3 will use as parent group, whereas - # resize-1 will have both migrate-2 and migrate-3 in its - # parent/leaf group - leaf_groups = [] - action_graph = nx.DiGraph() - # We iterate through each action type category (sorted by weight) to - # insert them in a Directed Acyclic Graph - for idx, (weight, actions) in enumerate(sorted_weighted_actions): - action_chunks = self.chunkify( - actions, self.config.parallelization[reverse_weights[weight]]) - - # We split the actions into chunks/layers that will have to be - # spread across all the available branches of the graph - for chunk_idx, actions_chunk in enumerate(action_chunks): - for action in actions_chunk: - action_graph.add_node(action) - - # all other actions - parent_nodes = [] - if not idx and not chunk_idx: - parent_nodes = [] - elif leaf_groups: - parent_nodes = leaf_groups - - for parent_node in parent_nodes: - action_graph.add_edge(parent_node, action) - action.parents.append(parent_node.uuid) - - if leaf_groups: - leaf_groups = [] - leaf_groups.extend([a for a in actions_chunk]) - - return action_graph - - def schedule(self, context, audit_id, solution): - LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) - action_plan = self.create_action_plan(context, audit_id, solution) - - sorted_weighted_actions = self.get_sorted_actions_by_weight( - context, action_plan, solution) - action_graph = self.compute_action_graph(sorted_weighted_actions) - - self._create_efficacy_indicators( - context, action_plan.id, solution.efficacy_indicators) - - if len(action_graph.nodes()) == 0: - LOG.warning("The action plan is empty") - action_plan.state = objects.action_plan.State.SUCCEEDED - action_plan.save() - - self.create_scheduled_actions(action_graph) - return action_plan - - def get_sorted_actions_by_weight(self, context, action_plan, solution): - # We need to make them immutable to add them to the graph - action_objects = list([ - objects.Action( - context, uuid=utils.generate_uuid(), parents=[], - action_plan_id=action_plan.id, **a) - for a in solution.actions]) - # This is a dict of list with each being a weight and the list being - # all the actions associated to this weight - weighted_actions = collections.defaultdict(list) - for action in action_objects: - action_weight = self.config.weights[action.action_type] - weighted_actions[action_weight].append(action) - - return reversed(sorted(weighted_actions.items(), key=lambda x: x[0])) - - def create_scheduled_actions(self, graph): - for action in graph.nodes(): - LOG.debug("Creating the %s in the Watcher database", - action.action_type) - try: - action.create() - except Exception as exc: - LOG.exception(exc) - raise - - def create_action_plan(self, context, audit_id, solution): - strategy = objects.Strategy.get_by_name( - context, solution.strategy.name) - - action_plan_dict = { - 'uuid': utils.generate_uuid(), - 'audit_id': audit_id, - 'strategy_id': strategy.id, - 'state': objects.action_plan.State.RECOMMENDED, - 'global_efficacy': solution.global_efficacy, - } - - new_action_plan = objects.ActionPlan(context, **action_plan_dict) - new_action_plan.create() - - return new_action_plan - - def _create_efficacy_indicators(self, context, action_plan_id, indicators): - efficacy_indicators = [] - for indicator in indicators: - efficacy_indicator_dict = { - 'uuid': utils.generate_uuid(), - 'name': indicator.name, - 'description': indicator.description, - 'unit': indicator.unit, - 'value': indicator.value, - 'action_plan_id': action_plan_id, - } - new_efficacy_indicator = objects.EfficacyIndicator( - context, **efficacy_indicator_dict) - new_efficacy_indicator.create() - - efficacy_indicators.append(new_efficacy_indicator) - return efficacy_indicators diff --git a/watcher/decision_engine/planner/workload_stabilization.py b/watcher/decision_engine/planner/workload_stabilization.py deleted file mode 100644 index f7cd96a..0000000 --- a/watcher/decision_engine/planner/workload_stabilization.py +++ /dev/null @@ -1,300 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc - -from oslo_config import cfg -from oslo_config import types -from oslo_log import log - -from watcher.common import clients -from watcher.common import exception -from watcher.common import nova_helper -from watcher.common import utils -from watcher.decision_engine.planner import base -from watcher import objects - -LOG = log.getLogger(__name__) - - -class WorkloadStabilizationPlanner(base.BasePlanner): - """Workload Stabilization planner implementation - - This implementation comes with basic rules with a set of action types that - are weighted. An action having a lower weight will be scheduled before the - other ones. The set of action types can be specified by 'weights' in the - ``watcher.conf``. You need to associate a different weight to all available - actions into the configuration file, otherwise you will get an error when - the new action will be referenced in the solution produced by a strategy. - - *Limitations* - - - This is a proof of concept that is not meant to be used in production - """ - - def __init__(self, config): - super(WorkloadStabilizationPlanner, self).__init__(config) - self._osc = clients.OpenStackClients() - - @property - def osc(self): - return self._osc - - weights_dict = { - 'turn_host_to_acpi_s3_state': 0, - 'resize': 1, - 'migrate': 2, - 'sleep': 3, - 'change_nova_service_state': 4, - 'nop': 5, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.Opt( - 'weights', - type=types.Dict(value_type=types.Integer()), - help="These weights are used to schedule the actions", - default=cls.weights_dict), - ] - - def create_action(self, - action_plan_id, - action_type, - input_parameters=None): - uuid = utils.generate_uuid() - action = { - 'uuid': uuid, - 'action_plan_id': int(action_plan_id), - 'action_type': action_type, - 'input_parameters': input_parameters, - 'state': objects.action.State.PENDING, - 'parents': None - } - - return action - - def load_child_class(self, child_name): - for c in BaseActionValidator.__subclasses__(): - if child_name == c.action_name: - return c() - return None - - def schedule(self, context, audit_id, solution): - LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) - weights = self.config.weights - action_plan = self._create_action_plan(context, audit_id, solution) - - actions = list(solution.actions) - to_schedule = [] - for action in actions: - json_action = self.create_action( - action_plan_id=action_plan.id, - action_type=action.get('action_type'), - input_parameters=action.get('input_parameters')) - to_schedule.append((weights[action.get('action_type')], - json_action)) - - self._create_efficacy_indicators( - context, action_plan.id, solution.efficacy_indicators) - - # scheduling - scheduled = sorted(to_schedule, key=lambda weight: (weight[0]), - reverse=True) - if len(scheduled) == 0: - LOG.warning("The action plan is empty") - action_plan.state = objects.action_plan.State.SUCCEEDED - action_plan.save() - else: - resource_action_map = {} - scheduled_actions = [x[1] for x in scheduled] - for action in scheduled_actions: - a_type = action['action_type'] - if a_type != 'turn_host_to_acpi_s3_state': - plugin_action = self.load_child_class( - action.get("action_type")) - if not plugin_action: - raise exception.UnsupportedActionType( - action_type=action.get("action_type")) - db_action = self._create_action(context, action) - parents = plugin_action.validate_parents( - resource_action_map, action) - if parents: - db_action.parents = parents - db_action.save() - # if we have an action that will make host unreachable, we need - # to complete all actions (resize and migration type) - # related to the host. - # Note(alexchadin): turn_host_to_acpi_s3_state doesn't - # actually exist. Placed code shows relations between - # action types. - # TODO(alexchadin): add turn_host_to_acpi_s3_state action type. - else: - host_to_acpi_s3 = action['input_parameters']['resource_id'] - host_actions = resource_action_map.get(host_to_acpi_s3) - action_parents = [] - if host_actions: - resize_actions = [x[0] for x in host_actions - if x[1] == 'resize'] - migrate_actions = [x[0] for x in host_actions - if x[1] == 'migrate'] - resize_migration_parents = [ - x.parents for x in - [objects.Action.get_by_uuid(context, resize_action) - for resize_action in resize_actions]] - # resize_migration_parents should be one level list - resize_migration_parents = [ - parent for sublist in resize_migration_parents - for parent in sublist] - action_parents.extend([uuid for uuid in - resize_actions]) - action_parents.extend([uuid for uuid in - migrate_actions if uuid not in - resize_migration_parents]) - db_action = self._create_action(context, action) - db_action.parents = action_parents - db_action.save() - - return action_plan - - def _create_action_plan(self, context, audit_id, solution): - strategy = objects.Strategy.get_by_name( - context, solution.strategy.name) - - action_plan_dict = { - 'uuid': utils.generate_uuid(), - 'audit_id': audit_id, - 'strategy_id': strategy.id, - 'state': objects.action_plan.State.RECOMMENDED, - 'global_efficacy': solution.global_efficacy, - } - - new_action_plan = objects.ActionPlan(context, **action_plan_dict) - new_action_plan.create() - - return new_action_plan - - def _create_efficacy_indicators(self, context, action_plan_id, indicators): - efficacy_indicators = [] - for indicator in indicators: - efficacy_indicator_dict = { - 'uuid': utils.generate_uuid(), - 'name': indicator.name, - 'description': indicator.description, - 'unit': indicator.unit, - 'value': indicator.value, - 'action_plan_id': action_plan_id, - } - new_efficacy_indicator = objects.EfficacyIndicator( - context, **efficacy_indicator_dict) - new_efficacy_indicator.create() - - efficacy_indicators.append(new_efficacy_indicator) - return efficacy_indicators - - def _create_action(self, context, _action): - try: - LOG.debug("Creating the %s in the Watcher database", - _action.get("action_type")) - - new_action = objects.Action(context, **_action) - new_action.create() - - return new_action - except Exception as exc: - LOG.exception(exc) - raise - - -class BaseActionValidator(object): - action_name = None - - def __init__(self): - super(BaseActionValidator, self).__init__() - self._osc = None - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @abc.abstractmethod - def validate_parents(self, resource_action_map, action): - raise NotImplementedError() - - def _mapping(self, resource_action_map, resource_id, action_uuid, - action_type): - if resource_id not in resource_action_map: - resource_action_map[resource_id] = [(action_uuid, - action_type,)] - else: - resource_action_map[resource_id].append((action_uuid, - action_type,)) - - -class MigrationActionValidator(BaseActionValidator): - action_name = "migrate" - - def validate_parents(self, resource_action_map, action): - instance_uuid = action['input_parameters']['resource_id'] - host_name = action['input_parameters']['source_node'] - self._mapping(resource_action_map, instance_uuid, action['uuid'], - 'migrate') - self._mapping(resource_action_map, host_name, action['uuid'], - 'migrate') - - -class ResizeActionValidator(BaseActionValidator): - action_name = "resize" - - def validate_parents(self, resource_action_map, action): - nova = nova_helper.NovaHelper(osc=self.osc) - instance_uuid = action['input_parameters']['resource_id'] - parent_actions = resource_action_map.get(instance_uuid) - host_of_instance = nova.get_hostname( - nova.get_instance_by_uuid(instance_uuid)[0]) - self._mapping(resource_action_map, host_of_instance, action['uuid'], - 'resize') - if parent_actions: - return [x[0] for x in parent_actions] - else: - return [] - - -class ChangeNovaServiceStateActionValidator(BaseActionValidator): - action_name = "change_nova_service_state" - - def validate_parents(self, resource_action_map, action): - host_name = action['input_parameters']['resource_id'] - self._mapping(resource_action_map, host_name, action.uuid, - 'change_nova_service_state') - return [] - - -class SleepActionValidator(BaseActionValidator): - action_name = "sleep" - - def validate_parents(self, resource_action_map, action): - return [] - - -class NOPActionValidator(BaseActionValidator): - action_name = "nop" - - def validate_parents(self, resource_action_map, action): - return [] diff --git a/watcher/decision_engine/rpcapi.py b/watcher/decision_engine/rpcapi.py deleted file mode 100644 index f0e0e2a..0000000 --- a/watcher/decision_engine/rpcapi.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from watcher.common import exception -from watcher.common import service -from watcher.common import service_manager -from watcher.common import utils - -from watcher import conf - -CONF = conf.CONF - - -class DecisionEngineAPI(service.Service): - - def __init__(self): - super(DecisionEngineAPI, self).__init__(DecisionEngineAPIManager) - - def trigger_audit(self, context, audit_uuid=None): - if not utils.is_uuid_like(audit_uuid): - raise exception.InvalidUuidOrName(name=audit_uuid) - - self.conductor_client.cast( - context, 'trigger_audit', audit_uuid=audit_uuid) - - -class DecisionEngineAPIManager(service_manager.ServiceManager): - - @property - def service_name(self): - return None - - @property - def api_version(self): - return '1.0' - - @property - def publisher_id(self): - return CONF.watcher_decision_engine.publisher_id - - @property - def conductor_topic(self): - return CONF.watcher_decision_engine.conductor_topic - - @property - def notification_topics(self): - return [] - - @property - def conductor_endpoints(self): - return [] - - @property - def notification_endpoints(self): - return [] diff --git a/watcher/decision_engine/scheduling.py b/watcher/decision_engine/scheduling.py deleted file mode 100644 index 4ef0481..0000000 --- a/watcher/decision_engine/scheduling.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import eventlet -from oslo_log import log - -from watcher.common import context -from watcher.common import exception -from watcher.common import scheduling - -from watcher.decision_engine.model.collector import manager -from watcher import objects - -from watcher import conf - -LOG = log.getLogger(__name__) -CONF = conf.CONF - - -class DecisionEngineSchedulingService(scheduling.BackgroundSchedulerService): - - def __init__(self, gconfig=None, **options): - gconfig = None or {} - super(DecisionEngineSchedulingService, self).__init__( - gconfig, **options) - self.collector_manager = manager.CollectorManager() - - @property - def collectors(self): - return self.collector_manager.get_collectors() - - def add_sync_jobs(self): - for name, collector in self.collectors.items(): - timed_task = self._wrap_collector_sync_with_timeout( - collector, name) - self.add_job(timed_task, - trigger='interval', - seconds=collector.config.period, - next_run_time=datetime.datetime.now()) - - def _as_timed_sync_func(self, sync_func, name, timeout): - def _timed_sync(): - with eventlet.Timeout( - timeout, - exception=exception.ClusterDataModelCollectionError(cdm=name) - ): - sync_func() - - return _timed_sync - - def _wrap_collector_sync_with_timeout(self, collector, name): - """Add an execution timeout constraint on a function""" - timeout = collector.config.period - - def _sync(): - try: - timed_sync = self._as_timed_sync_func( - collector.synchronize, name, timeout) - timed_sync() - except Exception as exc: - LOG.exception(exc) - collector.set_cluster_data_model_as_stale() - - return _sync - - def add_checkstate_job(self): - # 30 minutes interval - interval = CONF.watcher_decision_engine.check_periodic_interval - ap_manager = objects.action_plan.StateManager() - if CONF.watcher_decision_engine.action_plan_expiry != 0: - self.add_job(ap_manager.check_expired, 'interval', - args=[context.make_context()], - seconds=interval, - next_run_time=datetime.datetime.now()) - - def start(self): - """Start service.""" - self.add_sync_jobs() - self.add_checkstate_job() - super(DecisionEngineSchedulingService, self).start() - - def stop(self): - """Stop service.""" - self.shutdown() - - def wait(self): - """Wait for service to complete.""" - - def reset(self): - """Reset service. - - Called in case service running in daemon mode receives SIGHUP. - """ diff --git a/watcher/decision_engine/scope/__init__.py b/watcher/decision_engine/scope/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/scope/base.py b/watcher/decision_engine/scope/base.py deleted file mode 100644 index 76f1746..0000000 --- a/watcher/decision_engine/scope/base.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import six - -from watcher.common import context - - -@six.add_metaclass(abc.ABCMeta) -class BaseScope(object): - """A base class for Scope mechanism - - Child of this class is called when audit launches strategy. This strategy - requires Cluster Data Model which can be segregated to achieve audit scope. - """ - - def __init__(self, scope, config): - self.ctx = context.make_context() - self.scope = scope - self.config = config - - @abc.abstractmethod - def get_scoped_model(self, cluster_model): - """Leave only nodes and instances proposed in the audit scope""" diff --git a/watcher/decision_engine/scope/default.py b/watcher/decision_engine/scope/default.py deleted file mode 100644 index 4e74f6e..0000000 --- a/watcher/decision_engine/scope/default.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.scope import base - - -LOG = log.getLogger(__name__) - - -class DefaultScope(base.BaseScope): - """Default Audit Scope Handler""" - - DEFAULT_SCHEMA = { - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "array", - "items": { - "type": "object", - "properties": { - "host_aggregates": { - "type": "array", - "items": { - "type": "object", - "properties": { - "anyOf": [ - {"type": ["string", "number"]} - ] - }, - } - }, - "availability_zones": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - } - }, - "exclude": { - "type": "array", - "items": { - "type": "object", - "properties": { - "instances": { - "type": "array", - "items": { - "type": "object", - "properties": { - "uuid": { - "type": "string" - } - } - } - }, - "compute_nodes": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - } - }, - "host_aggregates": { - "type": "array", - "items": { - "type": "object", - "properties": { - "anyOf": [ - {"type": ["string", "number"]} - ] - }, - } - }, - "instance_metadata": { - "type": "array", - "items": { - "type": "object" - } - } - }, - "additionalProperties": False - } - } - }, - "additionalProperties": False - } - } - - def __init__(self, scope, config, osc=None): - super(DefaultScope, self).__init__(scope, config) - self._osc = osc - self.wrapper = nova_helper.NovaHelper(osc=self._osc) - - def remove_instance(self, cluster_model, instance, node_name): - node = cluster_model.get_node_by_uuid(node_name) - cluster_model.delete_instance(instance, node) - - def _check_wildcard(self, aggregate_list): - if '*' in aggregate_list: - if len(aggregate_list) == 1: - return True - else: - raise exception.WildcardCharacterIsUsed( - resource="host aggregates") - return False - - def _collect_aggregates(self, host_aggregates, compute_nodes): - aggregate_list = self.wrapper.get_aggregate_list() - aggregate_ids = [aggregate['id'] for aggregate - in host_aggregates if 'id' in aggregate] - aggregate_names = [aggregate['name'] for aggregate - in host_aggregates if 'name' in aggregate] - include_all_nodes = any(self._check_wildcard(field) - for field in (aggregate_ids, aggregate_names)) - - for aggregate in aggregate_list: - detailed_aggregate = self.wrapper.get_aggregate_detail( - aggregate.id) - if (detailed_aggregate.id in aggregate_ids or - detailed_aggregate.name in aggregate_names or - include_all_nodes): - compute_nodes.extend(detailed_aggregate.hosts) - - def _collect_zones(self, availability_zones, allowed_nodes): - zone_list = self.wrapper.get_availability_zone_list() - zone_names = [zone['name'] for zone - in availability_zones] - include_all_nodes = False - if '*' in zone_names: - if len(zone_names) == 1: - include_all_nodes = True - else: - raise exception.WildcardCharacterIsUsed( - resource="availability zones") - for zone in zone_list: - if zone.zoneName in zone_names or include_all_nodes: - allowed_nodes.extend(zone.hosts.keys()) - - def exclude_resources(self, resources, **kwargs): - instances_to_exclude = kwargs.get('instances') - nodes_to_exclude = kwargs.get('nodes') - instance_metadata = kwargs.get('instance_metadata') - - for resource in resources: - if 'instances' in resource: - instances_to_exclude.extend( - [instance['uuid'] for instance - in resource['instances']]) - elif 'compute_nodes' in resource: - nodes_to_exclude.extend( - [host['name'] for host - in resource['compute_nodes']]) - elif 'host_aggregates' in resource: - prohibited_nodes = [] - self._collect_aggregates(resource['host_aggregates'], - prohibited_nodes) - nodes_to_exclude.extend(prohibited_nodes) - elif 'instance_metadata' in resource: - instance_metadata.extend( - [metadata for metadata in resource['instance_metadata']]) - - def remove_nodes_from_model(self, nodes_to_remove, cluster_model): - for node_uuid in nodes_to_remove: - node = cluster_model.get_node_by_uuid(node_uuid) - instances = cluster_model.get_node_instances(node) - for instance in instances: - self.remove_instance(cluster_model, instance, node_uuid) - cluster_model.remove_node(node) - - def remove_instances_from_model(self, instances_to_remove, cluster_model): - for instance_uuid in instances_to_remove: - try: - node_name = cluster_model.get_node_by_instance_uuid( - instance_uuid).uuid - except exception.ComputeResourceNotFound: - LOG.warning("The following instance %s cannot be found. " - "It might be deleted from CDM along with node" - " instance was hosted on.", - instance_uuid) - continue - self.remove_instance( - cluster_model, - cluster_model.get_instance_by_uuid(instance_uuid), - node_name) - - def exclude_instances_with_given_metadata( - self, instance_metadata, cluster_model, instances_to_remove): - metadata_dict = { - key: val for d in instance_metadata for key, val in d.items()} - instances = cluster_model.get_all_instances() - for uuid, instance in instances.items(): - metadata = instance.metadata - common_metadata = set(metadata_dict) & set(metadata) - if common_metadata and len(common_metadata) == len(metadata_dict): - for key, value in metadata_dict.items(): - if str(value).lower() == str(metadata.get(key)).lower(): - instances_to_remove.add(uuid) - - def get_scoped_model(self, cluster_model): - """Leave only nodes and instances proposed in the audit scope""" - if not cluster_model: - return None - - allowed_nodes = [] - nodes_to_exclude = [] - nodes_to_remove = set() - instances_to_exclude = [] - instance_metadata = [] - model_hosts = list(cluster_model.get_all_compute_nodes().keys()) - - if not self.scope: - return cluster_model - - for rule in self.scope: - if 'host_aggregates' in rule: - self._collect_aggregates(rule['host_aggregates'], - allowed_nodes) - elif 'availability_zones' in rule: - self._collect_zones(rule['availability_zones'], - allowed_nodes) - elif 'exclude' in rule: - self.exclude_resources( - rule['exclude'], instances=instances_to_exclude, - nodes=nodes_to_exclude, - instance_metadata=instance_metadata) - - instances_to_remove = set(instances_to_exclude) - if allowed_nodes: - nodes_to_remove = set(model_hosts) - set(allowed_nodes) - nodes_to_remove.update(nodes_to_exclude) - - self.remove_nodes_from_model(nodes_to_remove, cluster_model) - - if instance_metadata and self.config.check_optimize_metadata: - self.exclude_instances_with_given_metadata( - instance_metadata, cluster_model, instances_to_remove) - - self.remove_instances_from_model(instances_to_remove, cluster_model) - - return cluster_model diff --git a/watcher/decision_engine/scoring/__init__.py b/watcher/decision_engine/scoring/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/scoring/base.py b/watcher/decision_engine/scoring/base.py deleted file mode 100644 index 3fcc68b..0000000 --- a/watcher/decision_engine/scoring/base.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from watcher.common.loader import loadable - - -@six.add_metaclass(abc.ABCMeta) -class ScoringEngine(loadable.Loadable): - """A base class for all the Scoring Engines. - - A Scoring Engine is an instance of a data model, to which the learning - data was applied. - - Please note that this class contains non-static and non-class methods by - design, so that it's easy to create multiple Scoring Engine instances - using a single class (possibly configured differently). - """ - - @abc.abstractmethod - def get_name(self): - """Returns the name of the Scoring Engine. - - The name should be unique across all Scoring Engines. - - :return: A Scoring Engine name - :rtype: str - """ - - @abc.abstractmethod - def get_description(self): - """Returns the description of the Scoring Engine. - - The description might contain any human readable information, which - might be useful for Strategy developers planning to use this Scoring - Engine. It will be also visible in the Watcher API and CLI. - - :return: A Scoring Engine description - :rtype: str - """ - - @abc.abstractmethod - def get_metainfo(self): - """Returns the metadata information about Scoring Engine. - - The metadata might contain a machine-friendly (e.g. in JSON format) - information needed to use this Scoring Engine. For example, some - Scoring Engines require to pass the array of features in particular - order to be able to calculate the score value. This order can be - defined in metadata and used in Strategy. - - :return: A Scoring Engine metadata - :rtype: str - """ - - @abc.abstractmethod - def calculate_score(self, features): - """Calculates a score value based on arguments passed. - - Scoring Engines might be very different to each other. They might - solve different problems or use different algorithms or frameworks - internally. To enable this kind of flexibility, the method takes only - one argument (string) and produces the results in the same format - (string). The consumer of the Scoring Engine is ultimately responsible - for providing the right arguments and parsing the result. - - :param features: Input data for Scoring Engine - :type features: str - :return: A score result - :rtype: str - """ - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - -@six.add_metaclass(abc.ABCMeta) -class ScoringEngineContainer(loadable.Loadable): - """A base class for all the Scoring Engines Containers. - - A Scoring Engine Container is an abstraction which allows to plugin - multiple Scoring Engines as a single Stevedore plugin. This enables some - more advanced scenarios like dynamic reloading of Scoring Engine - implementations without having to restart any Watcher services. - """ - - @classmethod - @abc.abstractmethod - def get_scoring_engine_list(self): - """Returns a list of Scoring Engine instances. - - :return: A list of Scoring Engine instances - :rtype: :class: `~.scoring_engine.ScoringEngine` - """ - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] diff --git a/watcher/decision_engine/scoring/dummy_scorer.py b/watcher/decision_engine/scoring/dummy_scorer.py deleted file mode 100644 index 735dbac..0000000 --- a/watcher/decision_engine/scoring/dummy_scorer.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import units - -from watcher._i18n import _ -from watcher.decision_engine.scoring import base - -LOG = log.getLogger(__name__) - - -class DummyScorer(base.ScoringEngine): - """Sample Scoring Engine implementing simplified workload classification. - - Typically a scoring engine would be implemented using machine learning - techniques. For example, for workload classification problem the solution - could consist of the following steps: - - 1. Define a problem to solve: we want to detect the workload on the - machine based on the collected metrics like power consumption, - temperature, CPU load, memory usage, disk usage, network usage, etc. - 2. The workloads could be predefined, e.g. IDLE, CPU-INTENSIVE, - MEMORY-INTENSIVE, IO-BOUND, ... - Or we could let the ML algorithm to find the workloads based on the - learning data provided. The decision here leads to learning algorithm - used (supervised vs. non-supervised learning). - 3. Collect metrics from sample servers (learning data). - 4. Define the analytical model, pick ML framework and algorithm. - 5. Apply learning data to the data model. Once taught, the data model - becomes a scoring engine and can start doing predictions or - classifications. - 6. Wrap up the scoring engine with the class like this one, so it has a - standard interface and can be used inside Watcher. - - This class is a greatly very simplified version of the above model. The - goal is to provide an example how such class could be implemented and used - in Watcher, without adding additional dependencies like machine learning - frameworks (which can be quite heavy) or over-complicating it's internal - implementation, which can distract from looking at the overall picture. - - That said, this class implements a workload classification "manually" - (in plain python code) and is not intended to be used in production. - """ - - # Constants defining column indices for the input data - PROCESSOR_TIME_PERC = 0 - MEM_TOTAL_BYTES = 1 - MEM_AVAIL_BYTES = 2 - MEM_PAGE_READS_PER_SEC = 3 - MEM_PAGE_WRITES_PER_SEC = 4 - DISK_READ_BYTES_PER_SEC = 5 - DISK_WRITE_BYTES_PER_SEC = 6 - NET_BYTES_RECEIVED_PER_SEC = 7 - NET_BYTES_SENT_PER_SEC = 8 - - # Types of workload - WORKLOAD_IDLE = 0 - WORKLOAD_CPU = 1 - WORKLOAD_MEM = 2 - WORKLOAD_DISK = 3 - - def get_name(self): - return 'dummy_scorer' - - def get_description(self): - return 'Dummy workload classifier' - - def get_metainfo(self): - """Metadata about input/output format of this scoring engine. - - This information is used in strategy using this scoring engine to - prepare the input information and to understand the results. - """ - - return """{ - "feature_columns": [ - "proc-processor-time-%", - "mem-total-bytes", - "mem-avail-bytes", - "mem-page-reads/sec", - "mem-page-writes/sec", - "disk-read-bytes/sec", - "disk-write-bytes/sec", - "net-bytes-received/sec", - "net-bytes-sent/sec"], - "result_columns": [ - "workload", - "idle-probability", - "cpu-probability", - "memory-probability", - "disk-probability"], - "workloads": [ - "idle", - "cpu-intensive", - "memory-intensive", - "disk-intensive"] - }""" - - def calculate_score(self, features): - """Arbitrary algorithm calculating the score. - - It demonstrates how to parse the input data (features) and serialize - the results. It detects the workload type based on the metrics and - also returns the probabilities of each workload detection (again, - the arbitrary values are returned, just for demonstration how the - "real" machine learning algorithm could work. For example, the - Gradient Boosting Machine from H2O framework is using exactly the - same format: - http://www.h2o.ai/verticals/algos/gbm/ - """ - - LOG.debug('Calculating score, features: %s', features) - - # By default IDLE workload will be returned - workload = self.WORKLOAD_IDLE - idle_prob = 0.0 - cpu_prob = 0.0 - mem_prob = 0.0 - disk_prob = 0.0 - - # Basic input validation - try: - flist = jsonutils.loads(features) - except Exception as e: - raise ValueError(_('Unable to parse features: ') % e) - if type(flist) is not list: - raise ValueError(_('JSON list expected in feature argument')) - if len(flist) != 9: - raise ValueError(_('Invalid number of features, expected 9')) - - # Simple logic for workload classification - if flist[self.PROCESSOR_TIME_PERC] >= 80: - workload = self.WORKLOAD_CPU - cpu_prob = 100.0 - elif flist[self.MEM_PAGE_READS_PER_SEC] >= 1000 \ - and flist[self.MEM_PAGE_WRITES_PER_SEC] >= 1000: - workload = self.WORKLOAD_MEM - mem_prob = 100.0 - elif flist[self.DISK_READ_BYTES_PER_SEC] >= 50*units.Mi \ - and flist[self.DISK_WRITE_BYTES_PER_SEC] >= 50*units.Mi: - workload = self.WORKLOAD_DISK - disk_prob = 100.0 - else: - idle_prob = 100.0 - if flist[self.PROCESSOR_TIME_PERC] >= 40: - cpu_prob = 50.0 - if flist[self.MEM_PAGE_READS_PER_SEC] >= 500 \ - or flist[self.MEM_PAGE_WRITES_PER_SEC] >= 500: - mem_prob = 50.0 - - return jsonutils.dumps( - [workload, idle_prob, cpu_prob, mem_prob, disk_prob]) diff --git a/watcher/decision_engine/scoring/dummy_scoring_container.py b/watcher/decision_engine/scoring/dummy_scoring_container.py deleted file mode 100644 index 5b89bdf..0000000 --- a/watcher/decision_engine/scoring/dummy_scoring_container.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from oslo_log import log -from oslo_serialization import jsonutils - -from watcher._i18n import _ -from watcher.decision_engine.scoring import base - -LOG = log.getLogger(__name__) - - -class DummyScoringContainer(base.ScoringEngineContainer): - """Sample Scoring Engine container returning a list of scoring engines. - - Please note that it can be used in dynamic scenarios and the returned list - might return instances based on some external configuration (e.g. in - database). In order for these scoring engines to become discoverable in - Watcher API and Watcher CLI, a database re-sync is required. It can be - executed using watcher-sync tool for example. - """ - - @classmethod - def get_scoring_engine_list(self): - return [ - SimpleFunctionScorer( - 'dummy_min_scorer', - 'Dummy Scorer calculating the minimum value', - min), - SimpleFunctionScorer( - 'dummy_max_scorer', - 'Dummy Scorer calculating the maximum value', - max), - SimpleFunctionScorer( - 'dummy_avg_scorer', - 'Dummy Scorer calculating the average value', - lambda x: float(sum(x)) / len(x)), - ] - - -class SimpleFunctionScorer(base.ScoringEngine): - """A simple generic scoring engine for demonstration purposes only. - - A generic scoring engine implementation, which is expecting a JSON - formatted array of numbers to be passed as an input for score calculation. - It then executes the aggregate function on this array and returns an - array with a single aggregated number (also JSON formatted). - """ - - def __init__(self, name, description, aggregate_function): - super(SimpleFunctionScorer, self).__init__(config=None) - self._name = name - self._description = description - self._aggregate_function = aggregate_function - - def get_name(self): - return self._name - - def get_description(self): - return self._description - - def get_metainfo(self): - return '' - - def calculate_score(self, features): - LOG.debug('Calculating score, features: %s', features) - - # Basic input validation - try: - flist = jsonutils.loads(features) - except Exception as e: - raise ValueError(_('Unable to parse features: %s') % e) - if type(flist) is not list: - raise ValueError(_('JSON list expected in feature argument')) - if len(flist) < 1: - raise ValueError(_('At least one feature is required')) - - # Calculate the result - result = self._aggregate_function(flist) - - # Return the aggregated result - return jsonutils.dumps([result]) diff --git a/watcher/decision_engine/scoring/scoring_factory.py b/watcher/decision_engine/scoring/scoring_factory.py deleted file mode 100644 index c716cff..0000000 --- a/watcher/decision_engine/scoring/scoring_factory.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A module providing helper methods to work with Scoring Engines. -""" - -from oslo_log import log - -from watcher._i18n import _ -from watcher.decision_engine.loading import default - - -LOG = log.getLogger(__name__) - -_scoring_engine_map = None - - -def get_scoring_engine(scoring_engine_name): - """Returns a Scoring Engine by its name. - - Method retrieves a Scoring Engine instance by its name. Scoring Engine - instances are being cached in memory to avoid enumerating the Stevedore - plugins on each call. - - When called for the first time, it reloads the cache. - - :return: A Scoring Engine instance with a given name - :rtype: :class: - `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` - """ - global _scoring_engine_map - - _reload_scoring_engines() - scoring_engine = _scoring_engine_map.get(scoring_engine_name) - if scoring_engine is None: - raise KeyError(_('Scoring Engine with name=%s not found') - % scoring_engine_name) - - return scoring_engine - - -def get_scoring_engine_list(): - """Returns a list of Scoring Engine instances. - - The main use case for this method is discoverability, so the Scoring - Engine list is always reloaded before returning any results. - - Frequent calling of this method might have a negative performance impact. - - :return: A list of all available Scoring Engine instances - :rtype: List of :class: - `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` - """ - global _scoring_engine_map - - _reload_scoring_engines(True) - return _scoring_engine_map.values() - - -def _reload_scoring_engines(refresh=False): - """Reloads Scoring Engines from Stevedore plugins to memory. - - Please note that two Stevedore entry points are used: - - watcher_scoring_engines: for simple plugin implementations - - watcher_scoring_engine_containers: for container plugins, which enable - the dynamic scenarios (its get_scoring_engine_list method might return - different values on each call) - """ - global _scoring_engine_map - - if _scoring_engine_map is None or refresh: - LOG.debug("Reloading Scoring Engine plugins") - engines = default.DefaultScoringLoader().list_available() - _scoring_engine_map = dict() - - for name in engines.keys(): - se_impl = default.DefaultScoringLoader().load(name) - LOG.debug("Found Scoring Engine plugin: %s" % se_impl.get_name()) - _scoring_engine_map[se_impl.get_name()] = se_impl - - engine_containers = \ - default.DefaultScoringContainerLoader().list_available() - - for container_id, container_cls in engine_containers.items(): - LOG.debug("Found Scoring Engine container plugin: %s" % - container_id) - for se in container_cls.get_scoring_engine_list(): - LOG.debug("Found Scoring Engine plugin: %s" % - se.get_name()) - _scoring_engine_map[se.get_name()] = se diff --git a/watcher/decision_engine/solution/__init__.py b/watcher/decision_engine/solution/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/solution/base.py b/watcher/decision_engine/solution/base.py deleted file mode 100644 index 3aa895c..0000000 --- a/watcher/decision_engine/solution/base.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -A :ref:`Solution ` is the result of execution of a -:ref:`strategy ` (i.e., an algorithm). -Each solution is composed of many pieces of information: - -- A set of :ref:`actions ` generated by the strategy in - order to achieve the :ref:`goal ` of an associated - :ref:`audit `. -- A set of :ref:`efficacy indicators ` as - defined by the associated goal -- A :ref:`global efficacy ` which is computed by the - associated goal using the aforementioned efficacy indicators. - -A :ref:`Solution ` is different from an -:ref:`Action Plan ` because it contains the -non-scheduled list of :ref:`Actions ` which is produced by a -:ref:`Strategy `. In other words, the list of Actions in -a :ref:`Solution ` has not yet been re-ordered by the -:ref:`Watcher Planner `. - -Note that some algorithms (i.e. :ref:`Strategies `) may -generate several :ref:`Solutions `. This gives rise to the -problem of determining which :ref:`Solution ` should be -applied. - -Two approaches to dealing with this can be envisaged: - -- **fully automated mode**: only the :ref:`Solution ` - with the highest ranking (i.e., the highest - :ref:`Optimization Efficacy `) will be sent to the - :ref:`Watcher Planner ` and translated into - concrete :ref:`Actions `. -- **manual mode**: several :ref:`Solutions ` are proposed - to the :ref:`Administrator ` with a detailed - measurement of the estimated :ref:`Optimization Efficacy - ` and he/she decides which one will be launched. -""" - -import abc -import six - -from watcher.decision_engine.solution import efficacy - - -@six.add_metaclass(abc.ABCMeta) -class BaseSolution(object): - def __init__(self, goal, strategy): - """Base Solution constructor - - :param goal: Goal associated to this solution - :type goal: :py:class:`~.base.Goal` instance - :param strategy: Strategy associated to this solution - :type strategy: :py:class:`~.BaseStrategy` instance - """ - self.goal = goal - self.strategy = strategy - self.origin = None - self.model = None - self.efficacy = efficacy.Efficacy(self.goal, self.strategy) - - @property - def global_efficacy(self): - return self.efficacy.global_efficacy - - @property - def efficacy_indicators(self): - return self.efficacy.indicators - - def compute_global_efficacy(self): - """Compute the global efficacy given a map of efficacy indicators""" - self.efficacy.compute_global_efficacy() - - def set_efficacy_indicators(self, **indicators_map): - """Set the efficacy indicators mapping (no validation) - - :param indicators_map: mapping between the indicator name and its value - :type indicators_map: dict {`str`: `object`} - """ - self.efficacy.set_efficacy_indicators(**indicators_map) - - @abc.abstractmethod - def add_action(self, action_type, resource_id, input_parameters=None): - """Add a new Action in the Solution - - :param action_type: the unique id of an action type defined in - entry point 'watcher_actions' - :param resource_id: the unique id of the resource to which the - `Action` applies. - :param input_parameters: An array of input parameters provided as - key-value pairs of strings. Each key-pair contains names and - values that match what was previously defined in the `Action` - type schema. - """ - raise NotImplementedError() - - @abc.abstractproperty - def actions(self): - raise NotImplementedError() diff --git a/watcher/decision_engine/solution/default.py b/watcher/decision_engine/solution/default.py deleted file mode 100644 index be895df..0000000 --- a/watcher/decision_engine/solution/default.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher.applier.actions import base as baction -from watcher.common import exception -from watcher.decision_engine.solution import base - -LOG = log.getLogger(__name__) - - -class DefaultSolution(base.BaseSolution): - def __init__(self, goal, strategy): - """Stores a set of actions generated by a strategy - - The DefaultSolution class store a set of actions generated by a - strategy in order to achieve the goal. - - :param goal: Goal associated to this solution - :type goal: :py:class:`~.base.Goal` instance - :param strategy: Strategy associated to this solution - :type strategy: :py:class:`~.BaseStrategy` instance - """ - super(DefaultSolution, self).__init__(goal, strategy) - self._actions = [] - - def add_action(self, action_type, input_parameters=None, resource_id=None): - if input_parameters is not None: - if baction.BaseAction.RESOURCE_ID in input_parameters.keys(): - raise exception.ReservedWord(name=baction.BaseAction. - RESOURCE_ID) - else: - input_parameters = {} - - if resource_id is not None: - input_parameters[baction.BaseAction.RESOURCE_ID] = resource_id - action = { - 'action_type': action_type, - 'input_parameters': input_parameters - } - if action not in self._actions: - self._actions.append(action) - else: - LOG.warning('Action %s has been added into the solution, ' - 'duplicate action will be dropped.', str(action)) - - def __str__(self): - return "\n".join(self._actions) - - @property - def actions(self): - """Get the current actions of the solution""" - return self._actions diff --git a/watcher/decision_engine/solution/efficacy.py b/watcher/decision_engine/solution/efficacy.py deleted file mode 100644 index 108e78f..0000000 --- a/watcher/decision_engine/solution/efficacy.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numbers - -from oslo_log import log as logging - -from watcher._i18n import _ -from watcher.common import exception -from watcher.common import utils - -LOG = logging.getLogger(__name__) - - -class IndicatorsMap(utils.Struct): - pass - - -class Indicator(utils.Struct): - - def __init__(self, name, description, unit, value): - super(Indicator, self).__init__() - self.name = name - self.description = description - self.unit = unit - if not isinstance(value, numbers.Number): - raise exception.InvalidIndicatorValue( - _("An indicator value should be a number")) - self.value = value - - -class Efficacy(object): - """Solution efficacy""" - - def __init__(self, goal, strategy): - """Solution efficacy - - :param goal: Goal associated to this solution - :type goal: :py:class:`~.base.Goal` instance - :param strategy: Strategy associated to this solution - :type strategy: :py:class:`~.BaseStrategy` instance - """ - self.goal = goal - self.strategy = strategy - - self._efficacy_spec = self.goal.efficacy_specification - - # Used to store in DB the info related to the efficacy indicators - self.indicators = [] - # Used to compute the global efficacy - self._indicators_mapping = IndicatorsMap() - self.global_efficacy = None - - def set_efficacy_indicators(self, **indicators_map): - """Set the efficacy indicators - - :param indicators_map: kwargs where the key is the name of the efficacy - indicator as defined in the associated - :py:class:`~.IndicatorSpecification` and the - value is a number. - :type indicators_map: dict {str: numerical value} - """ - self._indicators_mapping.update(indicators_map) - - def compute_global_efficacy(self): - self._efficacy_spec.validate_efficacy_indicators( - self._indicators_mapping) - try: - self.global_efficacy = ( - self._efficacy_spec.get_global_efficacy_indicator( - self._indicators_mapping)) - - indicators_specs_map = { - indicator_spec.name: indicator_spec - for indicator_spec in self._efficacy_spec.indicators_specs} - - indicators = [] - for indicator_name, value in self._indicators_mapping.items(): - related_indicator_spec = indicators_specs_map[indicator_name] - indicators.append( - Indicator( - name=related_indicator_spec.name, - description=related_indicator_spec.description, - unit=related_indicator_spec.unit, - value=value)) - - self.indicators = indicators - except Exception as exc: - LOG.exception(exc) - raise exception.GlobalEfficacyComputationError( - goal=self.goal.name, - strategy=self.strategy.name) diff --git a/watcher/decision_engine/solution/solution_comparator.py b/watcher/decision_engine/solution/solution_comparator.py deleted file mode 100644 index 254cd6c..0000000 --- a/watcher/decision_engine/solution/solution_comparator.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseSolutionComparator(object): - @abc.abstractmethod - def compare(self, sol1, sol2): - raise NotImplementedError() diff --git a/watcher/decision_engine/solution/solution_evaluator.py b/watcher/decision_engine/solution/solution_evaluator.py deleted file mode 100644 index b36b70f..0000000 --- a/watcher/decision_engine/solution/solution_evaluator.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseSolutionEvaluator(object): - @abc.abstractmethod - def evaluate(self, solution): - raise NotImplementedError() diff --git a/watcher/decision_engine/strategy/__init__.py b/watcher/decision_engine/strategy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/common/__init__.py b/watcher/decision_engine/strategy/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/common/level.py b/watcher/decision_engine/strategy/common/level.py deleted file mode 100644 index 83d95c0..0000000 --- a/watcher/decision_engine/strategy/common/level.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import enum - - -class StrategyLevel(enum.Enum): - conservative = "conservative" - balanced = "balanced" - growth = "growth" - aggressive = "aggressive" diff --git a/watcher/decision_engine/strategy/context/__init__.py b/watcher/decision_engine/strategy/context/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/context/base.py b/watcher/decision_engine/strategy/context/base.py deleted file mode 100644 index 37286b2..0000000 --- a/watcher/decision_engine/strategy/context/base.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import six - -from watcher import notifications -from watcher.objects import fields - - -@six.add_metaclass(abc.ABCMeta) -class StrategyContext(object): - - def execute_strategy(self, audit, request_context): - """Execute the strategy for the given an audit - - :param audit: Audit object - :type audit: :py:class:`~.objects.audit.Audit` instance - :param request_context: Current request context - :type request_context: :py:class:`~.RequestContext` instance - :returns: The computed solution - :rtype: :py:class:`~.BaseSolution` instance - """ - try: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.STRATEGY, - phase=fields.NotificationPhase.START) - solution = self.do_execute_strategy(audit, request_context) - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.STRATEGY, - phase=fields.NotificationPhase.END) - return solution - except Exception: - notifications.audit.send_action_notification( - request_context, audit, - action=fields.NotificationAction.STRATEGY, - priority=fields.NotificationPriority.ERROR, - phase=fields.NotificationPhase.ERROR) - raise - - @abc.abstractmethod - def do_execute_strategy(self, audit, request_context): - """Execute the strategy for the given an audit - - :param audit: Audit object - :type audit: :py:class:`~.objects.audit.Audit` instance - :param request_context: Current request context - :type request_context: :py:class:`~.RequestContext` instance - :returns: The computed solution - :rtype: :py:class:`~.BaseSolution` instance - """ - raise NotImplementedError() diff --git a/watcher/decision_engine/strategy/context/default.py b/watcher/decision_engine/strategy/context/default.py deleted file mode 100644 index e0b56ba..0000000 --- a/watcher/decision_engine/strategy/context/default.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo_log import log - -from watcher.common import clients -from watcher.common import utils -from watcher.decision_engine.strategy.context import base -from watcher.decision_engine.strategy.selection import default - -from watcher import objects - -LOG = log.getLogger(__name__) - - -class DefaultStrategyContext(base.StrategyContext): - def __init__(self): - super(DefaultStrategyContext, self).__init__() - LOG.debug("Initializing Strategy Context") - - def do_execute_strategy(self, audit, request_context): - osc = clients.OpenStackClients() - # todo(jed) retrieve in audit parameters (threshold,...) - # todo(jed) create ActionPlan - - goal = objects.Goal.get_by_id(request_context, audit.goal_id) - - # NOTE(jed56) In the audit object, the 'strategy_id' attribute - # is optional. If the admin wants to force the trigger of a Strategy - # it could specify the Strategy uuid in the Audit. - strategy_name = None - if audit.strategy_id: - strategy = objects.Strategy.get_by_id( - request_context, audit.strategy_id) - strategy_name = strategy.name - - strategy_selector = default.DefaultStrategySelector( - goal_name=goal.name, - strategy_name=strategy_name, - osc=osc) - - selected_strategy = strategy_selector.select() - - selected_strategy.audit_scope = audit.scope - - schema = selected_strategy.get_schema() - if not audit.parameters and schema: - # Default value feedback if no predefined strategy - utils.StrictDefaultValidatingDraft4Validator(schema).validate( - audit.parameters) - - selected_strategy.input_parameters.update({ - name: value for name, value in audit.parameters.items() - }) - - return selected_strategy.execute() diff --git a/watcher/decision_engine/strategy/selection/__init__.py b/watcher/decision_engine/strategy/selection/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/decision_engine/strategy/selection/base.py b/watcher/decision_engine/strategy/selection/base.py deleted file mode 100644 index 7bf9490..0000000 --- a/watcher/decision_engine/strategy/selection/base.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class BaseSelector(object): - - @abc.abstractmethod - def select(self): - raise NotImplementedError() diff --git a/watcher/decision_engine/strategy/selection/default.py b/watcher/decision_engine/strategy/selection/default.py deleted file mode 100644 index ac73447..0000000 --- a/watcher/decision_engine/strategy/selection/default.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception -from watcher.decision_engine.loading import default -from watcher.decision_engine.strategy.selection import base - -LOG = log.getLogger(__name__) - - -class DefaultStrategySelector(base.BaseSelector): - - def __init__(self, goal_name, strategy_name=None, osc=None): - """Default strategy selector - - :param goal_name: Name of the goal - :param strategy_name: Name of the strategy - :param osc: an OpenStackClients instance - """ - super(DefaultStrategySelector, self).__init__() - self.goal_name = goal_name - self.strategy_name = strategy_name - self.osc = osc - self.strategy_loader = default.DefaultStrategyLoader() - - def select(self): - """Selects a strategy - - :raises: :py:class:`~.LoadingError` if it failed to load a strategy - :returns: A :py:class:`~.BaseStrategy` instance - """ - strategy_to_load = None - try: - if self.strategy_name: - strategy_to_load = self.strategy_name - else: - available_strategies = self.strategy_loader.list_available() - available_strategies_for_goal = list( - key for key, strat in available_strategies.items() - if strat.get_goal_name() == self.goal_name) - - if not available_strategies_for_goal: - raise exception.NoAvailableStrategyForGoal( - goal=self.goal_name) - - # TODO(v-francoise): We should do some more work here to select - # a strategy out of a given goal instead of just choosing the - # 1st one - strategy_to_load = available_strategies_for_goal[0] - return self.strategy_loader.load(strategy_to_load, osc=self.osc) - except exception.NoAvailableStrategyForGoal: - raise - except Exception as exc: - LOG.exception(exc) - raise exception.LoadingError( - _("Could not load any strategy for goal %(goal)s"), - goal=self.goal_name) diff --git a/watcher/decision_engine/strategy/strategies/__init__.py b/watcher/decision_engine/strategy/strategies/__init__.py deleted file mode 100644 index c1a2821..0000000 --- a/watcher/decision_engine/strategy/strategies/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.strategy.strategies import basic_consolidation -from watcher.decision_engine.strategy.strategies import dummy_strategy -from watcher.decision_engine.strategy.strategies import dummy_with_scorer -from watcher.decision_engine.strategy.strategies import noisy_neighbor -from watcher.decision_engine.strategy.strategies import outlet_temp_control -from watcher.decision_engine.strategy.strategies import uniform_airflow -from watcher.decision_engine.strategy.strategies import \ - vm_workload_consolidation -from watcher.decision_engine.strategy.strategies import workload_balance -from watcher.decision_engine.strategy.strategies import workload_stabilization - -BasicConsolidation = basic_consolidation.BasicConsolidation -OutletTempControl = outlet_temp_control.OutletTempControl -DummyStrategy = dummy_strategy.DummyStrategy -DummyWithScorer = dummy_with_scorer.DummyWithScorer -VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation -WorkloadBalance = workload_balance.WorkloadBalance -WorkloadStabilization = workload_stabilization.WorkloadStabilization -UniformAirflow = uniform_airflow.UniformAirflow -NoisyNeighbor = noisy_neighbor.NoisyNeighbor - -__all__ = ("BasicConsolidation", "OutletTempControl", "DummyStrategy", - "DummyWithScorer", "VMWorkloadConsolidation", "WorkloadBalance", - "WorkloadStabilization", "UniformAirflow", "NoisyNeighbor") diff --git a/watcher/decision_engine/strategy/strategies/base.py b/watcher/decision_engine/strategy/strategies/base.py deleted file mode 100644 index 607f98a..0000000 --- a/watcher/decision_engine/strategy/strategies/base.py +++ /dev/null @@ -1,360 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Strategy ` is an algorithm implementation which is -able to find a :ref:`Solution ` for a given -:ref:`Goal `. - -There may be several potential strategies which are able to achieve the same -:ref:`Goal `. This is why it is possible to configure which -specific :ref:`Strategy ` should be used for each -:ref:`Goal `. - -Some strategies may provide better optimization results but may take more time -to find an optimal :ref:`Solution `. - -When a new :ref:`Goal ` is added to the Watcher configuration, -at least one default associated :ref:`Strategy ` should be -provided as well. - -:ref:`Some default implementations are provided `, but it -is possible to :ref:`develop new implementations ` -which are dynamically loaded by Watcher at launch time. -""" - -import abc -import six - -from oslo_utils import strutils - -from watcher.common import clients -from watcher.common import context -from watcher.common import exception -from watcher.common.loader import loadable -from watcher.common import utils -from watcher.decision_engine.loading import default as loading -from watcher.decision_engine.model.collector import manager -from watcher.decision_engine.scope import default as default_scope -from watcher.decision_engine.solution import default -from watcher.decision_engine.strategy.common import level - - -@six.add_metaclass(abc.ABCMeta) -class BaseStrategy(loadable.Loadable): - """A base class for all the strategies - - A Strategy is an algorithm implementation which is able to find a - Solution for a given Goal. - """ - - def __init__(self, config, osc=None): - """Constructor: the signature should be identical within the subclasses - - :param config: Configuration related to this plugin - :type config: :py:class:`~.Struct` - :param osc: An OpenStackClients instance - :type osc: :py:class:`~.OpenStackClients` instance - """ - super(BaseStrategy, self).__init__(config) - self.ctx = context.make_context() - self._name = self.get_name() - self._display_name = self.get_display_name() - self._goal = self.get_goal() - # default strategy level - self._strategy_level = level.StrategyLevel.conservative - self._cluster_state_collector = None - # the solution given by the strategy - self._solution = default.DefaultSolution(goal=self.goal, strategy=self) - self._osc = osc - self._collector_manager = None - self._compute_model = None - self._storage_model = None - self._input_parameters = utils.Struct() - self._audit_scope = None - self._audit_scope_handler = None - - @classmethod - @abc.abstractmethod - def get_name(cls): - """The name of the strategy""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_display_name(cls): - """The goal display name for the strategy""" - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_translatable_display_name(cls): - """The translatable msgid of the strategy""" - # Note(v-francoise): Defined here to be used as the translation key for - # other services - raise NotImplementedError() - - @classmethod - @abc.abstractmethod - def get_goal_name(cls): - """The goal name the strategy achieves""" - raise NotImplementedError() - - @classmethod - def get_goal(cls): - """The goal the strategy achieves""" - goal_loader = loading.DefaultGoalLoader() - return goal_loader.load(cls.get_goal_name()) - - @classmethod - def get_config_opts(cls): - """Defines the configuration options to be associated to this loadable - - :return: A list of configuration options relative to this Loadable - :rtype: list of :class:`oslo_config.cfg.Opt` instances - """ - return [] - - @abc.abstractmethod - def pre_execute(self): - """Pre-execution phase - - This can be used to fetch some pre-requisites or data. - """ - raise NotImplementedError() - - @abc.abstractmethod - def do_execute(self): - """Strategy execution phase - - This phase is where you should put the main logic of your strategy. - """ - raise NotImplementedError() - - @abc.abstractmethod - def post_execute(self): - """Post-execution phase - - This can be used to compute the global efficacy - """ - raise NotImplementedError() - - def execute(self): - """Execute a strategy - - :return: A computed solution (via a placement algorithm) - :rtype: :py:class:`~.BaseSolution` instance - """ - self.pre_execute() - self.do_execute() - self.post_execute() - - self.solution.compute_global_efficacy() - - return self.solution - - @property - def collector_manager(self): - if self._collector_manager is None: - self._collector_manager = manager.CollectorManager() - return self._collector_manager - - @property - def compute_model(self): - """Cluster data model - - :returns: Cluster data model the strategy is executed on - :rtype model: :py:class:`~.ModelRoot` instance - """ - if self._compute_model is None: - collector = self.collector_manager.get_cluster_model_collector( - 'compute', osc=self.osc) - self._compute_model = self.audit_scope_handler.get_scoped_model( - collector.get_latest_cluster_data_model()) - - if not self._compute_model: - raise exception.ClusterStateNotDefined() - - if self._compute_model.stale: - raise exception.ClusterStateStale() - - return self._compute_model - - @property - def storage_model(self): - """Cluster data model - - :returns: Cluster data model the strategy is executed on - :rtype model: :py:class:`~.ModelRoot` instance - """ - if self._storage_model is None: - collector = self.collector_manager.get_cluster_model_collector( - 'storage', osc=self.osc) - self._storage_model = self.audit_scope_handler.get_scoped_model( - collector.get_latest_cluster_data_model()) - - if not self._storage_model: - raise exception.ClusterStateNotDefined() - - if self._storage_model.stale: - raise exception.ClusterStateStale() - - return self._storage_model - - @classmethod - def get_schema(cls): - """Defines a Schema that the input parameters shall comply to - - :return: A jsonschema format (mandatory default setting) - :rtype: dict - """ - return {} - - @property - def input_parameters(self): - return self._input_parameters - - @input_parameters.setter - def input_parameters(self, p): - self._input_parameters = p - - @property - def osc(self): - if not self._osc: - self._osc = clients.OpenStackClients() - return self._osc - - @property - def solution(self): - return self._solution - - @solution.setter - def solution(self, s): - self._solution = s - - @property - def audit_scope(self): - return self._audit_scope - - @audit_scope.setter - def audit_scope(self, s): - self._audit_scope = s - - @property - def audit_scope_handler(self): - if not self._audit_scope_handler: - self._audit_scope_handler = default_scope.DefaultScope( - self.audit_scope, self.config) - return self._audit_scope_handler - - @property - def name(self): - return self._name - - @property - def display_name(self): - return self._display_name - - @property - def goal(self): - return self._goal - - @property - def strategy_level(self): - return self._strategy_level - - @strategy_level.setter - def strategy_level(self, s): - self._strategy_level = s - - @property - def state_collector(self): - return self._cluster_state_collector - - @state_collector.setter - def state_collector(self, s): - self._cluster_state_collector = s - - def filter_instances_by_audit_tag(self, instances): - if not self.config.check_optimize_metadata: - return instances - instances_to_migrate = [] - for instance in instances: - optimize = True - if instance.metadata: - try: - optimize = strutils.bool_from_string( - instance.metadata.get('optimize')) - except ValueError: - optimize = False - if optimize: - instances_to_migrate.append(instance) - return instances_to_migrate - - -@six.add_metaclass(abc.ABCMeta) -class DummyBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "dummy" - - -@six.add_metaclass(abc.ABCMeta) -class UnclassifiedStrategy(BaseStrategy): - """This base class is used to ease the development of new strategies - - The goal defined within this strategy can be used to simplify the - documentation explaining how to implement a new strategy plugin by - omitting the need for the strategy developer to define a goal straight - away. - """ - - @classmethod - def get_goal_name(cls): - return "unclassified" - - -@six.add_metaclass(abc.ABCMeta) -class ServerConsolidationBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "server_consolidation" - - -@six.add_metaclass(abc.ABCMeta) -class ThermalOptimizationBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "thermal_optimization" - - -@six.add_metaclass(abc.ABCMeta) -class WorkloadStabilizationBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "workload_balancing" - - -@six.add_metaclass(abc.ABCMeta) -class NoisyNeighborBaseStrategy(BaseStrategy): - - @classmethod - def get_goal_name(cls): - return "noisy_neighbor" diff --git a/watcher/decision_engine/strategy/strategies/basic_consolidation.py b/watcher/decision_engine/strategy/strategies/basic_consolidation.py deleted file mode 100644 index 5618d0f..0000000 --- a/watcher/decision_engine/strategy/strategies/basic_consolidation.py +++ /dev/null @@ -1,565 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*Good server consolidation strategy* - -Consolidation of VMs is essential to achieve energy optimization in cloud -environments such as OpenStack. As VMs are spinned up and/or moved over time, -it becomes necessary to migrate VMs among servers to lower the costs. However, -migration of VMs introduces runtime overheads and consumes extra energy, thus -a good server consolidation strategy should carefully plan for migration in -order to both minimize energy consumption and comply to the various SLAs. - -This algorithm not only minimizes the overall number of used servers, but also -minimizes the number of migrations. - -It has been developed only for tests. You must have at least 2 physical compute -nodes to run it, so you can easily run it on DevStack. It assumes that live -migration is possible on your OpenStack cluster. - -""" - -import datetime - -from oslo_config import cfg -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.datasource import monasca as mon -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class BasicConsolidation(base.ServerConsolidationBaseStrategy): - """Basic offline consolidation using live migration""" - - HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent' - INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util' - - METRIC_NAMES = dict( - ceilometer=dict( - host_cpu_usage='compute.node.cpu.percent', - instance_cpu_usage='cpu_util'), - monasca=dict( - host_cpu_usage='cpu.percent', - instance_cpu_usage='vm.cpu.utilization_perc'), - gnocchi=dict( - host_cpu_usage='compute.node.cpu.percent', - instance_cpu_usage='cpu_util'), - ) - - MIGRATION = "migrate" - CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" - - def __init__(self, config, osc=None): - """Basic offline Consolidation using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: :py:class:`~.Struct` instance - :param osc: :py:class:`~.OpenStackClients` instance - """ - super(BasicConsolidation, self).__init__(config, osc) - - # set default value for the number of enabled compute nodes - self.number_of_enabled_nodes = 0 - # set default value for the number of released nodes - self.number_of_released_nodes = 0 - # set default value for the number of migrations - self.number_of_migrations = 0 - - # set default value for the efficacy - self.efficacy = 100 - - self._ceilometer = None - self._monasca = None - self._gnocchi = None - - # TODO(jed): improve threshold overbooking? - self.threshold_mem = 1 - self.threshold_disk = 1 - self.threshold_cores = 1 - - @classmethod - def get_name(cls): - return "basic" - - @property - def migration_attempts(self): - return self.input_parameters.get('migration_attempts', 0) - - @property - def period(self): - return self.input_parameters.get('period', 7200) - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_display_name(cls): - return _("Basic offline consolidation") - - @classmethod - def get_translatable_display_name(cls): - return "Basic offline consolidation" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "migration_attempts": { - "description": "Maximum number of combinations to be " - "tried by the strategy while searching " - "for potential candidates. To remove the " - "limit, set it to 0 (by default)", - "type": "number", - "default": 0 - }, - "period": { - "description": "The time interval in seconds for " - "getting statistic aggregation", - "type": "number", - "default": 7200 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "monasca", "gnocchi"]), - cfg.BoolOpt( - "check_optimize_metadata", - help="Check optimize metadata field in instance before " - "migration", - default=False), - ] - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, ceilometer): - self._ceilometer = ceilometer - - @property - def monasca(self): - if self._monasca is None: - self._monasca = mon.MonascaHelper(osc=self.osc) - return self._monasca - - @monasca.setter - def monasca(self, monasca): - self._monasca = monasca - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - def check_migration(self, source_node, destination_node, - instance_to_migrate): - """Check if the migration is possible - - :param source_node: the current node of the virtual machine - :param destination_node: the destination of the virtual machine - :param instance_to_migrate: the instance / virtual machine - :return: True if the there is enough place otherwise false - """ - if source_node == destination_node: - return False - - LOG.debug('Migrate instance %s from %s to %s', - instance_to_migrate, source_node, destination_node) - - total_cores = 0 - total_disk = 0 - total_mem = 0 - for instance in self.compute_model.get_node_instances( - destination_node): - total_cores += instance.vcpus - total_disk += instance.disk - total_mem += instance.memory - - # capacity requested by the compute node - total_cores += instance_to_migrate.vcpus - total_disk += instance_to_migrate.disk - total_mem += instance_to_migrate.memory - - return self.check_threshold(destination_node, total_cores, total_disk, - total_mem) - - def check_threshold(self, destination_node, total_cores, - total_disk, total_mem): - """Check threshold - - Check the threshold value defined by the ratio of - aggregated CPU capacity of VMs on one node to CPU capacity - of this node must not exceed the threshold value. - - :param destination_node: the destination of the virtual machine - :param total_cores: total cores of the virtual machine - :param total_disk: total disk size used by the virtual machine - :param total_mem: total memory used by the virtual machine - :return: True if the threshold is not exceed - """ - cpu_capacity = destination_node.vcpus - disk_capacity = destination_node.disk - memory_capacity = destination_node.memory - - return (cpu_capacity >= total_cores * self.threshold_cores and - disk_capacity >= total_disk * self.threshold_disk and - memory_capacity >= total_mem * self.threshold_mem) - - def calculate_weight(self, compute_resource, total_cores_used, - total_disk_used, total_memory_used): - """Calculate weight of every resource - - :param compute_resource: - :param total_cores_used: - :param total_disk_used: - :param total_memory_used: - :return: - """ - cpu_capacity = compute_resource.vcpus - disk_capacity = compute_resource.disk - memory_capacity = compute_resource.memory - - score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) / - float(cpu_capacity)) - - # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0 - if disk_capacity == 0: - score_disk = 0 - else: - score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) / - float(disk_capacity)) - - score_memory = ( - 1 - (float(memory_capacity) - float(total_memory_used)) / - float(memory_capacity)) - # TODO(jed): take in account weight - return (score_cores + score_disk + score_memory) / 3 - - def get_node_cpu_usage(self, node): - metric_name = self.METRIC_NAMES[ - self.config.datasource]['host_cpu_usage'] - if self.config.datasource == "ceilometer": - resource_id = "%s_%s" % (node.uuid, node.hostname) - return self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=metric_name, - period=self.period, - aggregate='avg', - ) - elif self.config.datasource == "gnocchi": - resource_id = "%s_%s" % (node.uuid, node.hostname) - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - return self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=metric_name, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - elif self.config.datasource == "monasca": - statistics = self.monasca.statistic_aggregation( - meter_name=metric_name, - dimensions=dict(hostname=node.uuid), - period=self.period, - aggregate='avg' - ) - cpu_usage = None - for stat in statistics: - avg_col_idx = stat['columns'].index('avg') - values = [r[avg_col_idx] for r in stat['statistics']] - value = float(sum(values)) / len(values) - cpu_usage = value - - return cpu_usage - - raise exception.UnsupportedDataSource( - strategy=self.name, datasource=self.config.datasource) - - def get_instance_cpu_usage(self, instance): - metric_name = self.METRIC_NAMES[ - self.config.datasource]['instance_cpu_usage'] - if self.config.datasource == "ceilometer": - return self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=metric_name, - period=self.period, - aggregate='avg' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - return self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=metric_name, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean', - ) - elif self.config.datasource == "monasca": - statistics = self.monasca.statistic_aggregation( - meter_name=metric_name, - dimensions=dict(resource_id=instance.uuid), - period=self.period, - aggregate='avg' - ) - cpu_usage = None - for stat in statistics: - avg_col_idx = stat['columns'].index('avg') - values = [r[avg_col_idx] for r in stat['statistics']] - value = float(sum(values)) / len(values) - cpu_usage = value - return cpu_usage - - raise exception.UnsupportedDataSource( - strategy=self.name, datasource=self.config.datasource) - - def calculate_score_node(self, node): - """Calculate the score that represent the utilization level - - :param node: :py:class:`~.ComputeNode` instance - :return: Score for the given compute node - :rtype: float - """ - host_avg_cpu_util = self.get_node_cpu_usage(node) - - if host_avg_cpu_util is None: - resource_id = "%s_%s" % (node.uuid, node.hostname) - LOG.error( - "No values returned by %(resource_id)s " - "for %(metric_name)s" % dict( - resource_id=resource_id, - metric_name=self.METRIC_NAMES[ - self.config.datasource]['host_cpu_usage'])) - host_avg_cpu_util = 100 - - total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0) - - return self.calculate_weight(node, total_cores_used, 0, 0) - - def calculate_score_instance(self, instance): - """Calculate Score of virtual machine - - :param instance: the virtual machine - :return: score - """ - instance_cpu_utilization = self.get_instance_cpu_usage(instance) - if instance_cpu_utilization is None: - LOG.error( - "No values returned by %(resource_id)s " - "for %(metric_name)s" % dict( - resource_id=instance.uuid, - metric_name=self.METRIC_NAMES[ - self.config.datasource]['instance_cpu_usage'])) - instance_cpu_utilization = 100 - - total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0) - - return self.calculate_weight(instance, total_cores_used, 0, 0) - - def add_change_service_state(self, resource_id, state): - parameters = {'state': state} - self.solution.add_action(action_type=self.CHANGE_NOVA_SERVICE_STATE, - resource_id=resource_id, - input_parameters=parameters) - - def add_migration(self, - resource_id, - migration_type, - source_node, - destination_node): - parameters = {'migration_type': migration_type, - 'source_node': source_node, - 'destination_node': destination_node} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=resource_id, - input_parameters=parameters) - - def compute_score_of_nodes(self): - """Calculate score of nodes based on load by VMs""" - score = [] - for node in self.compute_model.get_all_compute_nodes().values(): - if node.status == element.ServiceState.ENABLED.value: - self.number_of_enabled_nodes += 1 - - instances = self.compute_model.get_node_instances(node) - if len(instances) > 0: - result = self.calculate_score_node(node) - score.append((node.uuid, result)) - - return score - - def node_and_instance_score(self, sorted_scores): - """Get List of VMs from node""" - node_to_release = sorted_scores[len(sorted_scores) - 1][0] - instances = self.compute_model.get_node_instances( - self.compute_model.get_node_by_uuid(node_to_release)) - - instances_to_migrate = self.filter_instances_by_audit_tag(instances) - instance_score = [] - for instance in instances_to_migrate: - if instance.state == element.InstanceState.ACTIVE.value: - instance_score.append( - (instance, self.calculate_score_instance(instance))) - - return node_to_release, instance_score - - def create_migration_instance(self, mig_instance, mig_source_node, - mig_destination_node): - """Create migration VM""" - if self.compute_model.migrate_instance( - mig_instance, mig_source_node, mig_destination_node): - self.add_migration(mig_instance.uuid, 'live', - mig_source_node.uuid, - mig_destination_node.uuid) - - if len(self.compute_model.get_node_instances(mig_source_node)) == 0: - self.add_change_service_state(mig_source_node. - uuid, - element.ServiceState.DISABLED.value) - self.number_of_released_nodes += 1 - - def calculate_num_migrations(self, sorted_instances, node_to_release, - sorted_score): - number_migrations = 0 - for mig_instance, __ in sorted_instances: - for node_uuid, __ in sorted_score: - mig_source_node = self.compute_model.get_node_by_uuid( - node_to_release) - mig_destination_node = self.compute_model.get_node_by_uuid( - node_uuid) - - result = self.check_migration( - mig_source_node, mig_destination_node, mig_instance) - if result: - self.create_migration_instance( - mig_instance, mig_source_node, mig_destination_node) - number_migrations += 1 - break - return number_migrations - - def unsuccessful_migration_actualization(self, number_migrations, - unsuccessful_migration): - if number_migrations > 0: - self.number_of_migrations += number_migrations - return 0 - else: - return unsuccessful_migration + 1 - - def pre_execute(self): - LOG.info("Initializing Server Consolidation") - - if not self.compute_model: - raise exception.ClusterStateNotDefined() - - if len(self.compute_model.get_all_compute_nodes()) == 0: - raise exception.ClusterEmpty() - - if self.compute_model.stale: - raise exception.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - unsuccessful_migration = 0 - - scores = self.compute_score_of_nodes() - # Sort compute nodes by Score decreasing - sorted_scores = sorted(scores, reverse=True, key=lambda x: (x[1])) - LOG.debug("Compute node(s) BFD %s", sorted_scores) - # Get Node to be released - if len(scores) == 0: - LOG.warning( - "The workloads of the compute nodes" - " of the cluster is zero") - return - - while sorted_scores and ( - not self.migration_attempts or - self.migration_attempts >= unsuccessful_migration): - node_to_release, instance_score = self.node_and_instance_score( - sorted_scores) - - # Sort instances by Score - sorted_instances = sorted( - instance_score, reverse=True, key=lambda x: (x[1])) - # BFD: Best Fit Decrease - LOG.debug("Instance(s) BFD %s", sorted_instances) - - migrations = self.calculate_num_migrations( - sorted_instances, node_to_release, sorted_scores) - - unsuccessful_migration = self.unsuccessful_migration_actualization( - migrations, unsuccessful_migration) - - if not migrations: - # We don't have any possible migrations to perform on this node - # so we discard the node so we can try to migrate instances - # from the next one in the list - sorted_scores.pop() - - infos = { - "compute_nodes_count": self.number_of_enabled_nodes, - "released_compute_nodes_count": self.number_of_released_nodes, - "instance_migrations_count": self.number_of_migrations, - "efficacy": self.efficacy - } - LOG.debug(infos) - - def post_execute(self): - self.solution.set_efficacy_indicators( - compute_nodes_count=self.number_of_enabled_nodes, - released_compute_nodes_count=self.number_of_released_nodes, - instance_migrations_count=self.number_of_migrations, - ) - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/dummy_strategy.py b/watcher/decision_engine/strategy/strategies/dummy_strategy.py deleted file mode 100644 index 22d7f4d..0000000 --- a/watcher/decision_engine/strategy/strategies/dummy_strategy.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher._i18n import _ -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class DummyStrategy(base.DummyBaseStrategy): - """Dummy strategy used for integration testing via Tempest - - *Description* - - This strategy does not provide any useful optimization. Its only purpose - is to be used by Tempest tests. - - *Requirements* - - - - *Limitations* - - Do not use in production. - - *Spec URL* - - - """ - - NOP = "nop" - SLEEP = "sleep" - - def pre_execute(self): - pass - - def do_execute(self): - para1 = self.input_parameters.para1 - para2 = self.input_parameters.para2 - LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", - {'p1': para1, 'p2': para2}) - parameters = {'message': 'hello World'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - parameters = {'message': para2} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - self.solution.add_action(action_type=self.SLEEP, - input_parameters={'duration': para1}) - - def post_execute(self): - pass - - @classmethod - def get_name(cls): - return "dummy" - - @classmethod - def get_display_name(cls): - return _("Dummy strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Dummy strategy" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "para1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - }, - "para2": { - "description": "string parameter example", - "type": "string", - "default": "hello" - }, - }, - } diff --git a/watcher/decision_engine/strategy/strategies/dummy_with_resize.py b/watcher/decision_engine/strategy/strategies/dummy_with_resize.py deleted file mode 100644 index 1c4c27c..0000000 --- a/watcher/decision_engine/strategy/strategies/dummy_with_resize.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher._i18n import _ -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class DummyWithResize(base.DummyBaseStrategy): - """Dummy strategy used for integration testing via Tempest - - *Description* - - This strategy does not provide any useful optimization. Its only purpose - is to be used by Tempest tests. - - *Requirements* - - - - *Limitations* - - Do not use in production. - - *Spec URL* - - - """ - - NOP = "nop" - SLEEP = "sleep" - - def pre_execute(self): - pass - - def do_execute(self): - para1 = self.input_parameters.para1 - para2 = self.input_parameters.para2 - LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", - {'p1': para1, 'p2': para2}) - parameters = {'message': 'hello World'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - parameters = {'message': 'Welcome'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - self.solution.add_action(action_type=self.SLEEP, - input_parameters={'duration': 5.0}) - self.solution.add_action( - action_type='migrate', - resource_id='b199db0c-1408-4d52-b5a5-5ca14de0ff36', - input_parameters={ - 'source_node': 'compute2', - 'destination_node': 'compute3', - 'migration_type': 'live'}) - - self.solution.add_action( - action_type='migrate', - resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', - input_parameters={ - 'source_node': 'compute2', - 'destination_node': 'compute3', - 'migration_type': 'live'} - ) - self.solution.add_action( - action_type='resize', - resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', - input_parameters={'flavor': 'x2'} - ) - - def post_execute(self): - pass - - @classmethod - def get_name(cls): - return "dummy_with_resize" - - @classmethod - def get_display_name(cls): - return _("Dummy strategy with resize") - - @classmethod - def get_translatable_display_name(cls): - return "Dummy strategy with resize" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "para1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - }, - "para2": { - "description": "string parameter example", - "type": "string", - "default": "hello" - }, - }, - } diff --git a/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py b/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py deleted file mode 100644 index d99db22..0000000 --- a/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import random - -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import units - -from watcher._i18n import _ -from watcher.decision_engine.scoring import scoring_factory -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class DummyWithScorer(base.DummyBaseStrategy): - """A dummy strategy using dummy scoring engines. - - This is a dummy strategy demonstrating how to work with scoring - engines. One scoring engine is predicting the workload type of a machine - based on the telemetry data, the other one is simply calculating the - average value for given elements in a list. Results are then passed to the - NOP action. - - The strategy is presenting the whole workflow: - - Get a reference to a scoring engine - - Prepare input data (features) for score calculation - - Perform score calculation - - Use scorer's metadata for results interpretation - """ - - DEFAULT_NAME = "dummy_with_scorer" - DEFAULT_DESCRIPTION = "Dummy Strategy with Scorer" - - NOP = "nop" - SLEEP = "sleep" - - def __init__(self, config, osc=None): - """Constructor: the signature should be identical within the subclasses - - :param config: Configuration related to this plugin - :type config: :py:class:`~.Struct` - :param osc: An OpenStackClients instance - :type osc: :py:class:`~.OpenStackClients` instance - """ - - super(DummyWithScorer, self).__init__(config, osc) - - # Setup Scoring Engines - self._workload_scorer = (scoring_factory - .get_scoring_engine('dummy_scorer')) - self._avg_scorer = (scoring_factory - .get_scoring_engine('dummy_avg_scorer')) - - # Get metainfo from Workload Scorer for result intepretation - metainfo = jsonutils.loads(self._workload_scorer.get_metainfo()) - self._workloads = {index: workload - for index, workload in enumerate( - metainfo['workloads'])} - - def pre_execute(self): - pass - - def do_execute(self): - # Simple "hello world" from strategy - param1 = self.input_parameters.param1 - param2 = self.input_parameters.param2 - LOG.debug('DummyWithScorer params: param1=%(p1)f, param2=%(p2)s', - {'p1': param1, 'p2': param2}) - parameters = {'message': 'Hello from Dummy Strategy with Scorer!'} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - # Demonstrate workload scorer - features = self._generate_random_telemetry() - result_str = self._workload_scorer.calculate_score(features) - LOG.debug('Workload Scorer result: %s', result_str) - - # Parse the result using workloads from scorer's metainfo - result = self._workloads[jsonutils.loads(result_str)[0]] - LOG.debug('Detected Workload: %s', result) - parameters = {'message': 'Detected Workload: %s' % result} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - # Demonstrate AVG scorer - features = jsonutils.dumps(random.sample(range(1000), 20)) - result_str = self._avg_scorer.calculate_score(features) - LOG.debug('AVG Scorer result: %s', result_str) - result = jsonutils.loads(result_str)[0] - LOG.debug('AVG Scorer result (parsed): %d', result) - parameters = {'message': 'AVG Scorer result: %s' % result} - self.solution.add_action(action_type=self.NOP, - input_parameters=parameters) - - # Sleep action - self.solution.add_action(action_type=self.SLEEP, - input_parameters={'duration': 5.0}) - - def post_execute(self): - pass - - @classmethod - def get_name(cls): - return 'dummy_with_scorer' - - @classmethod - def get_display_name(cls): - return _('Dummy Strategy using sample Scoring Engines') - - @classmethod - def get_translatable_display_name(cls): - return 'Dummy Strategy using sample Scoring Engines' - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - 'properties': { - 'param1': { - 'description': 'number parameter example', - 'type': 'number', - 'default': 3.2, - 'minimum': 1.0, - 'maximum': 10.2, - }, - 'param2': { - 'description': 'string parameter example', - 'type': "string", - 'default': "hello" - }, - }, - } - - def _generate_random_telemetry(self): - processor_time = random.randint(0, 100) - mem_total_bytes = 4*units.Gi - mem_avail_bytes = random.randint(1*units.Gi, 4*units.Gi) - mem_page_reads = random.randint(0, 2000) - mem_page_writes = random.randint(0, 2000) - disk_read_bytes = random.randint(0*units.Mi, 200*units.Mi) - disk_write_bytes = random.randint(0*units.Mi, 200*units.Mi) - net_bytes_received = random.randint(0*units.Mi, 20*units.Mi) - net_bytes_sent = random.randint(0*units.Mi, 10*units.Mi) - - return jsonutils.dumps([ - processor_time, mem_total_bytes, mem_avail_bytes, - mem_page_reads, mem_page_writes, disk_read_bytes, - disk_write_bytes, net_bytes_received, net_bytes_sent]) diff --git a/watcher/decision_engine/strategy/strategies/noisy_neighbor.py b/watcher/decision_engine/strategy/strategies/noisy_neighbor.py deleted file mode 100644 index d67b411..0000000 --- a/watcher/decision_engine/strategy/strategies/noisy_neighbor.py +++ /dev/null @@ -1,304 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class NoisyNeighbor(base.NoisyNeighborBaseStrategy): - - MIGRATION = "migrate" - # The meter to report L3 cache in ceilometer - METER_NAME_L3 = "cpu_l3_cache" - DEFAULT_WATCHER_PRIORITY = 5 - - def __init__(self, config, osc=None): - """Noisy Neighbor strategy using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: dict - :param osc: an OpenStackClients object, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - - super(NoisyNeighbor, self).__init__(config, osc) - - self.meter_name = self.METER_NAME_L3 - self._ceilometer = None - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @classmethod - def get_name(cls): - return "noisy_neighbor" - - @classmethod - def get_display_name(cls): - return _("Noisy Neighbor") - - @classmethod - def get_translatable_display_name(cls): - return "Noisy Neighbor" - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "cache_threshold": { - "description": "Performance drop in L3_cache threshold " - "for migration", - "type": "number", - "default": 35.0 - }, - "period": { - "description": "Aggregate time period of ceilometer", - "type": "number", - "default": 100.0 - }, - }, - } - - def get_current_and_previous_cache(self, instance): - - try: - current_cache = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=self.meter_name, period=self.period, - aggregate='avg') - - previous_cache = 2 * ( - self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=self.meter_name, - period=2*self.period, aggregate='avg')) - current_cache - - except Exception as exc: - LOG.exception(exc) - return None - - return current_cache, previous_cache - - def find_priority_instance(self, instance): - - current_cache, previous_cache = \ - self.get_current_and_previous_cache(instance) - - if None in (current_cache, previous_cache): - LOG.warning("Ceilometer unable to pick L3 Cache " - "values. Skipping the instance") - return None - - if (current_cache < (1 - (self.cache_threshold / 100.0)) * - previous_cache): - return instance - else: - return None - - def find_noisy_instance(self, instance): - - noisy_current_cache, noisy_previous_cache = \ - self.get_current_and_previous_cache(instance) - - if None in (noisy_current_cache, noisy_previous_cache): - LOG.warning("Ceilometer unable to pick " - "L3 Cache. Skipping the instance") - return None - - if (noisy_current_cache > (1 + (self.cache_threshold / 100.0)) * - noisy_previous_cache): - return instance - else: - return None - - def group_hosts(self): - - nodes = self.compute_model.get_all_compute_nodes() - size_cluster = len(nodes) - if size_cluster == 0: - raise wexc.ClusterEmpty() - - hosts_need_release = {} - hosts_target = [] - - for node in nodes.values(): - instances_of_node = self.compute_model.get_node_instances(node) - node_instance_count = len(instances_of_node) - - # Flag that tells us whether to skip the node or not. If True, - # the node is skipped. Will be true if we find a noisy instance or - # when potential priority instance will be same as potential noisy - # instance - loop_break_flag = False - - if node_instance_count > 1: - - instance_priority_list = [] - - for instance in instances_of_node: - instance_priority_list.append(instance) - - # If there is no metadata regarding watcher-priority, it takes - # DEFAULT_WATCHER_PRIORITY as priority. - instance_priority_list.sort(key=lambda a: ( - a.get('metadata').get('watcher-priority'), - self.DEFAULT_WATCHER_PRIORITY)) - - instance_priority_list_reverse = list(instance_priority_list) - instance_priority_list_reverse.reverse() - - for potential_priority_instance in instance_priority_list: - - priority_instance = self.find_priority_instance( - potential_priority_instance) - - if (priority_instance is not None): - - for potential_noisy_instance in ( - instance_priority_list_reverse): - if(potential_noisy_instance == - potential_priority_instance): - loop_break_flag = True - break - - noisy_instance = self.find_noisy_instance( - potential_noisy_instance) - - if noisy_instance is not None: - hosts_need_release[node.uuid] = { - 'priority_vm': potential_priority_instance, - 'noisy_vm': potential_noisy_instance} - LOG.debug("Priority VM found: %s" % ( - potential_priority_instance.uuid)) - LOG.debug("Noisy VM found: %s" % ( - potential_noisy_instance.uuid)) - loop_break_flag = True - break - - # No need to check other instances in the node - if loop_break_flag is True: - break - - if node.uuid not in hosts_need_release: - hosts_target.append(node) - - return hosts_need_release, hosts_target - - def calc_used_resource(self, node): - """Calculate the used vcpus, memory and disk based on VM flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def filter_dest_servers(self, hosts, instance_to_migrate): - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_memory = instance_to_migrate.memory - - dest_servers = [] - for host in hosts: - cores_used, mem_used, disk_used = self.calc_used_resource(host) - cores_available = host.vcpus - cores_used - disk_available = host.disk - disk_used - mem_available = host.memory - mem_used - if (cores_available >= required_cores and disk_available >= - required_disk and mem_available >= required_memory): - dest_servers.append(host) - - return dest_servers - - def pre_execute(self): - LOG.debug("Initializing Noisy Neighbor strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - self.cache_threshold = self.input_parameters.cache_threshold - self.period = self.input_parameters.period - - hosts_need_release, hosts_target = self.group_hosts() - - if len(hosts_need_release) == 0: - LOG.debug("No hosts require optimization") - return - - if len(hosts_target) == 0: - LOG.debug("No hosts available to migrate") - return - - mig_source_node_name = max(hosts_need_release.keys(), key=lambda a: - hosts_need_release[a]['priority_vm']) - instance_to_migrate = hosts_need_release[mig_source_node_name][ - 'noisy_vm'] - - if instance_to_migrate is None: - return - - dest_servers = self.filter_dest_servers(hosts_target, - instance_to_migrate) - - if len(dest_servers) == 0: - LOG.info("No proper target host could be found") - return - - # Destination node will be the first available node in the list. - mig_destination_node = dest_servers[0] - mig_source_node = self.compute_model.get_node_by_uuid( - mig_source_node_name) - - if self.compute_model.migrate_instance(instance_to_migrate, - mig_source_node, - mig_destination_node): - parameters = {'migration_type': 'live', - 'source_node': mig_source_node.uuid, - 'destination_node': mig_destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance_to_migrate.uuid, - input_parameters=parameters) - - def post_execute(self): - self.solution.model = self.compute_model - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/outlet_temp_control.py b/watcher/decision_engine/strategy/strategies/outlet_temp_control.py deleted file mode 100644 index bbafd02..0000000 --- a/watcher/decision_engine/strategy/strategies/outlet_temp_control.py +++ /dev/null @@ -1,333 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -*Good Thermal Strategy*: - -Towards to software defined infrastructure, the power and thermal -intelligences is being adopted to optimize workload, which can help -improve efficiency, reduce power, as well as to improve datacenter PUE -and lower down operation cost in data center. -Outlet (Exhaust Air) Temperature is one of the important thermal -telemetries to measure thermal/workload status of server. -""" - -import datetime - -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - - -LOG = log.getLogger(__name__) - - -class OutletTempControl(base.ThermalOptimizationBaseStrategy): - """[PoC] Outlet temperature control using live migration - - *Description* - - It is a migration strategy based on the outlet temperature of compute - hosts. It generates solutions to move a workload whenever a server's - outlet temperature is higher than the specified threshold. - - *Requirements* - - * Hardware: All computer hosts should support IPMI and PTAS technology - * Software: Ceilometer component ceilometer-agent-ipmi running - in each compute host, and Ceilometer API can report such telemetry - ``hardware.ipmi.node.outlet_temperature`` successfully. - * You must have at least 2 physical compute hosts to run this strategy. - - *Limitations* - - - This is a proof of concept that is not meant to be used in production - - We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. - - It assume that live migrations are possible - - *Spec URL* - - https://github.com/openstack/watcher-specs/blob/master/specs/mitaka/approved/outlet-temperature-based-strategy.rst - """ # noqa - - # The meter to report outlet temperature in ceilometer - MIGRATION = "migrate" - - METRIC_NAMES = dict( - ceilometer=dict( - host_outlet_temp='hardware.ipmi.node.outlet_temperature'), - gnocchi=dict( - host_outlet_temp='hardware.ipmi.node.outlet_temperature'), - ) - - def __init__(self, config, osc=None): - """Outlet temperature control using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: dict - :param osc: an OpenStackClients object, defaults to None - :type osc: :py:class:`~.OpenStackClients` instance, optional - """ - super(OutletTempControl, self).__init__(config, osc) - self._ceilometer = None - self._gnocchi = None - - @classmethod - def get_name(cls): - return "outlet_temperature" - - @classmethod - def get_display_name(cls): - return _("Outlet temperature based strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Outlet temperature based strategy" - - @property - def period(self): - return self.input_parameters.get('period', 30) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "threshold": { - "description": "temperature threshold for migration", - "type": "number", - "default": 35.0 - }, - "period": { - "description": "The time interval in seconds for " - "getting statistic aggregation", - "type": "number", - "default": 30 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, g): - self._gnocchi = g - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - def calc_used_resource(self, node): - """Calculate the used vcpus, memory and disk based on VM flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def group_hosts_by_outlet_temp(self): - """Group hosts based on outlet temp meters""" - nodes = self.compute_model.get_all_compute_nodes() - size_cluster = len(nodes) - if size_cluster == 0: - raise wexc.ClusterEmpty() - - hosts_need_release = [] - hosts_target = [] - metric_name = self.METRIC_NAMES[ - self.config.datasource]['host_outlet_temp'] - for node in nodes.values(): - resource_id = node.uuid - outlet_temp = None - - if self.config.datasource == "ceilometer": - outlet_temp = self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=metric_name, - period=self.period, - aggregate='avg' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - outlet_temp = self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=metric_name, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - # some hosts may not have outlet temp meters, remove from target - if outlet_temp is None: - LOG.warning("%s: no outlet temp data", resource_id) - continue - - LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp)) - instance_data = {'node': node, 'outlet_temp': outlet_temp} - if outlet_temp >= self.threshold: - # mark the node to release resources - hosts_need_release.append(instance_data) - else: - hosts_target.append(instance_data) - return hosts_need_release, hosts_target - - def choose_instance_to_migrate(self, hosts): - """Pick up an active instance to migrate from provided hosts""" - for instance_data in hosts: - mig_source_node = instance_data['node'] - instances_of_src = self.compute_model.get_node_instances( - mig_source_node) - for instance in instances_of_src: - try: - # select the first active instance to migrate - if (instance.state != - element.InstanceState.ACTIVE.value): - LOG.info("Instance not active, skipped: %s", - instance.uuid) - continue - return mig_source_node, instance - except wexc.InstanceNotFound as e: - LOG.exception(e) - LOG.info("Instance not found") - - return None - - def filter_dest_servers(self, hosts, instance_to_migrate): - """Only return hosts with sufficient available resources""" - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_memory = instance_to_migrate.memory - - # filter nodes without enough resource - dest_servers = [] - for instance_data in hosts: - host = instance_data['node'] - # available - cores_used, mem_used, disk_used = self.calc_used_resource(host) - cores_available = host.vcpus - cores_used - disk_available = host.disk - disk_used - mem_available = host.memory - mem_used - if cores_available >= required_cores \ - and disk_available >= required_disk \ - and mem_available >= required_memory: - dest_servers.append(instance_data) - - return dest_servers - - def pre_execute(self): - LOG.debug("Initializing Outlet temperature strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - # the migration plan will be triggered when the outlet temperature - # reaches threshold - self.threshold = self.input_parameters.threshold - LOG.debug("Initializing Outlet temperature strategy with threshold=%d", - self.threshold) - hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp() - - if len(hosts_need_release) == 0: - # TODO(zhenzanz): return something right if there's no hot servers - LOG.debug("No hosts require optimization") - return self.solution - - if len(hosts_target) == 0: - LOG.warning("No hosts under outlet temp threshold found") - return self.solution - - # choose the server with highest outlet t - hosts_need_release = sorted(hosts_need_release, - reverse=True, - key=lambda x: (x["outlet_temp"])) - - instance_to_migrate = self.choose_instance_to_migrate( - hosts_need_release) - # calculate the instance's cpu cores,memory,disk needs - if instance_to_migrate is None: - return self.solution - - mig_source_node, instance_src = instance_to_migrate - dest_servers = self.filter_dest_servers(hosts_target, instance_src) - # sort the filtered result by outlet temp - # pick up the lowest one as dest server - if len(dest_servers) == 0: - # TODO(zhenzanz): maybe to warn that there's no resource - # for instance. - LOG.info("No proper target host could be found") - return self.solution - - dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) - # always use the host with lowerest outlet temperature - mig_destination_node = dest_servers[0]['node'] - # generate solution to migrate the instance to the dest server, - if self.compute_model.migrate_instance( - instance_src, mig_source_node, mig_destination_node): - parameters = {'migration_type': 'live', - 'source_node': mig_source_node.uuid, - 'destination_node': mig_destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance_src.uuid, - input_parameters=parameters) - - def post_execute(self): - self.solution.model = self.compute_model - # TODO(v-francoise): Add the indicators to the solution - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/uniform_airflow.py b/watcher/decision_engine/strategy/strategies/uniform_airflow.py deleted file mode 100644 index e58b733..0000000 --- a/watcher/decision_engine/strategy/strategies/uniform_airflow.py +++ /dev/null @@ -1,442 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -[PoC]Uniform Airflow using live migration - -*Description* - -It is a migration strategy based on the airflow of physical -servers. It generates solutions to move VM whenever a server's -airflow is higher than the specified threshold. - -*Requirements* - -* Hardware: compute node with NodeManager 3.0 support -* Software: Ceilometer component ceilometer-agent-compute running - in each compute node, and Ceilometer API can report such telemetry - "airflow, system power, inlet temperature" successfully. -* You must have at least 2 physical compute nodes to run this strategy - -*Limitations* - -- This is a proof of concept that is not meant to be used in production. -- We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. -- It assumes that live migrations are possible. -""" - -import datetime - -from oslo_config import cfg -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class UniformAirflow(base.BaseStrategy): - """[PoC]Uniform Airflow using live migration - - *Description* - - It is a migration strategy based on the airflow of physical - servers. It generates solutions to move VM whenever a server's - airflow is higher than the specified threshold. - - *Requirements* - - * Hardware: compute node with NodeManager 3.0 support - * Software: Ceilometer component ceilometer-agent-compute running - in each compute node, and Ceilometer API can report such telemetry - "airflow, system power, inlet temperature" successfully. - * You must have at least 2 physical compute nodes to run this strategy - - *Limitations* - - - This is a proof of concept that is not meant to be used in production. - - We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. - - It assumes that live migrations are possible. - """ - - # choose 300 seconds as the default duration of meter aggregation - PERIOD = 300 - - METRIC_NAMES = dict( - ceilometer=dict( - # The meter to report Airflow of physical server in ceilometer - host_airflow='hardware.ipmi.node.airflow', - # The meter to report inlet temperature of physical server - # in ceilometer - host_inlet_temp='hardware.ipmi.node.temperature', - # The meter to report system power of physical server in ceilometer - host_power='hardware.ipmi.node.power'), - gnocchi=dict( - # The meter to report Airflow of physical server in gnocchi - host_airflow='hardware.ipmi.node.airflow', - # The meter to report inlet temperature of physical server - # in gnocchi - host_inlet_temp='hardware.ipmi.node.temperature', - # The meter to report system power of physical server in gnocchi - host_power='hardware.ipmi.node.power'), - ) - - MIGRATION = "migrate" - - def __init__(self, config, osc=None): - """Using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: dict - :param osc: an OpenStackClients object - """ - super(UniformAirflow, self).__init__(config, osc) - # The migration plan will be triggered when the airflow reaches - # threshold - self.meter_name_airflow = self.METRIC_NAMES[ - self.config.datasource]['host_airflow'] - self.meter_name_inlet_t = self.METRIC_NAMES[ - self.config.datasource]['host_inlet_temp'] - self.meter_name_power = self.METRIC_NAMES[ - self.config.datasource]['host_power'] - self._ceilometer = None - self._gnocchi = None - self._period = self.PERIOD - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, g): - self._gnocchi = g - - @classmethod - def get_name(cls): - return "uniform_airflow" - - @classmethod - def get_display_name(cls): - return _("Uniform airflow migration strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Uniform airflow migration strategy" - - @classmethod - def get_goal_name(cls): - return "airflow_optimization" - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "threshold_airflow": { - "description": ("airflow threshold for migration, Unit is " - "0.1CFM"), - "type": "number", - "default": 400.0 - }, - "threshold_inlet_t": { - "description": ("inlet temperature threshold for " - "migration decision"), - "type": "number", - "default": 28.0 - }, - "threshold_power": { - "description": ("system power threshold for migration " - "decision"), - "type": "number", - "default": 350.0 - }, - "period": { - "description": "aggregate time period of ceilometer", - "type": "number", - "default": 300 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - def calculate_used_resource(self, node): - """Compute the used vcpus, memory and disk based on instance flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def choose_instance_to_migrate(self, hosts): - """Pick up an active instance instance to migrate from provided hosts - - :param hosts: the array of dict which contains node object - """ - instances_tobe_migrate = [] - for nodemap in hosts: - source_node = nodemap['node'] - source_instances = self.compute_model.get_node_instances( - source_node) - if source_instances: - if self.config.datasource == "ceilometer": - inlet_t = self.ceilometer.statistic_aggregation( - resource_id=source_node.uuid, - meter_name=self.meter_name_inlet_t, - period=self._period, - aggregate='avg') - power = self.ceilometer.statistic_aggregation( - resource_id=source_node.uuid, - meter_name=self.meter_name_power, - period=self._period, - aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self._period)) - inlet_t = self.gnocchi.statistic_aggregation( - resource_id=source_node.uuid, - metric=self.meter_name_inlet_t, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean') - power = self.gnocchi.statistic_aggregation( - resource_id=source_node.uuid, - metric=self.meter_name_power, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean') - if (power < self.threshold_power and - inlet_t < self.threshold_inlet_t): - # hardware issue, migrate all instances from this node - for instance in source_instances: - instances_tobe_migrate.append(instance) - return source_node, instances_tobe_migrate - else: - # migrate the first active instance - for instance in source_instances: - if (instance.state != - element.InstanceState.ACTIVE.value): - LOG.info( - "Instance not active, skipped: %s", - instance.uuid) - continue - instances_tobe_migrate.append(instance) - return source_node, instances_tobe_migrate - else: - LOG.info("Instance not found on node: %s", - source_node.uuid) - - def filter_destination_hosts(self, hosts, instances_to_migrate): - """Find instance and host with sufficient available resources""" - # large instances go first - instances_to_migrate = sorted( - instances_to_migrate, reverse=True, - key=lambda x: (x.vcpus)) - # find hosts for instances - destination_hosts = [] - for instance_to_migrate in instances_to_migrate: - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_mem = instance_to_migrate.memory - dest_migrate_info = {} - for nodemap in hosts: - host = nodemap['node'] - if 'cores_used' not in nodemap: - # calculate the available resources - nodemap['cores_used'], nodemap['mem_used'],\ - nodemap['disk_used'] = self.calculate_used_resource( - host) - cores_available = (host.vcpus - - nodemap['cores_used']) - disk_available = (host.disk - - nodemap['disk_used']) - mem_available = ( - host.memory - nodemap['mem_used']) - if (cores_available >= required_cores and - disk_available >= required_disk and - mem_available >= required_mem): - dest_migrate_info['instance'] = instance_to_migrate - dest_migrate_info['node'] = host - nodemap['cores_used'] += required_cores - nodemap['mem_used'] += required_mem - nodemap['disk_used'] += required_disk - destination_hosts.append(dest_migrate_info) - break - # check if all instances have target hosts - if len(destination_hosts) != len(instances_to_migrate): - LOG.warning("Not all target hosts could be found; it might " - "be because there is not enough resource") - return None - return destination_hosts - - def group_hosts_by_airflow(self): - """Group hosts based on airflow meters""" - - nodes = self.compute_model.get_all_compute_nodes() - if not nodes: - raise wexc.ClusterEmpty() - overload_hosts = [] - nonoverload_hosts = [] - for node_id in nodes: - airflow = None - node = self.compute_model.get_node_by_uuid( - node_id) - resource_id = node.uuid - if self.config.datasource == "ceilometer": - airflow = self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=self.meter_name_airflow, - period=self._period, - aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self._period)) - airflow = self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=self.meter_name_airflow, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean') - # some hosts may not have airflow meter, remove from target - if airflow is None: - LOG.warning("%s: no airflow data", resource_id) - continue - - LOG.debug("%s: airflow %f" % (resource_id, airflow)) - nodemap = {'node': node, 'airflow': airflow} - if airflow >= self.threshold_airflow: - # mark the node to release resources - overload_hosts.append(nodemap) - else: - nonoverload_hosts.append(nodemap) - return overload_hosts, nonoverload_hosts - - def pre_execute(self): - LOG.debug("Initializing Uniform Airflow Strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - self.threshold_airflow = self.input_parameters.threshold_airflow - self.threshold_inlet_t = self.input_parameters.threshold_inlet_t - self.threshold_power = self.input_parameters.threshold_power - self._period = self.input_parameters.period - source_nodes, target_nodes = self.group_hosts_by_airflow() - - if not source_nodes: - LOG.debug("No hosts require optimization") - return self.solution - - if not target_nodes: - LOG.warning("No hosts currently have airflow under %s, " - "therefore there are no possible target " - "hosts for any migration", - self.threshold_airflow) - return self.solution - - # migrate the instance from server with largest airflow first - source_nodes = sorted(source_nodes, - reverse=True, - key=lambda x: (x["airflow"])) - instances_to_migrate = self.choose_instance_to_migrate(source_nodes) - if not instances_to_migrate: - return self.solution - source_node, instances_src = instances_to_migrate - # sort host with airflow - target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"])) - # find the hosts that have enough resource - # for the instance to be migrated - destination_hosts = self.filter_destination_hosts( - target_nodes, instances_src) - if not destination_hosts: - LOG.warning("No target host could be found; it might " - "be because there is not enough resources") - return self.solution - # generate solution to migrate the instance to the dest server, - for info in destination_hosts: - instance = info['instance'] - destination_node = info['node'] - if self.compute_model.migrate_instance( - instance, source_node, destination_node): - parameters = {'migration_type': 'live', - 'source_node': source_node.uuid, - 'destination_node': destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance.uuid, - input_parameters=parameters) - - def post_execute(self): - self.solution.model = self.compute_model - # TODO(v-francoise): Add the indicators to the solution - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py b/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py deleted file mode 100755 index 681f34d..0000000 --- a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py +++ /dev/null @@ -1,651 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vojtech CIMA -# Bruno GRAZIOLI -# Sean MURPHY -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*VM Workload Consolidation Strategy* - -A load consolidation strategy based on heuristic first-fit -algorithm which focuses on measured CPU utilization and tries to -minimize hosts which have too much or too little load respecting -resource capacity constraints. - -This strategy produces a solution resulting in more efficient -utilization of cluster resources using following four phases: - -* Offload phase - handling over-utilized resources -* Consolidation phase - handling under-utilized resources -* Solution optimization - reducing number of migrations -* Disability of unused compute nodes - -A capacity coefficients (cc) might be used to adjust optimization -thresholds. Different resources may require different coefficient -values as well as setting up different coefficient values in both -phases may lead to to more efficient consolidation in the end. -If the cc equals 1 the full resource capacity may be used, cc -values lower than 1 will lead to resource under utilization and -values higher than 1 will lead to resource overbooking. -e.g. If targeted utilization is 80 percent of a compute node capacity, -the coefficient in the consolidation phase will be 0.8, but -may any lower value in the offloading phase. The lower it gets -the cluster will appear more released (distributed) for the -following consolidation phase. - -As this strategy leverages VM live migration to move the load -from one compute node to another, this feature needs to be set up -correctly on all compute nodes within the cluster. -This strategy assumes it is possible to live migrate any VM from -an active compute node to any other active compute node. -""" -import datetime - -from oslo_config import cfg -from oslo_log import log -import six - -from watcher._i18n import _ -from watcher.common import exception -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): - """VM Workload Consolidation Strategy""" - - HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent' - INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util' - - METRIC_NAMES = dict( - ceilometer=dict( - cpu_util_metric='cpu_util', - ram_util_metric='memory.usage', - ram_alloc_metric='memory', - disk_alloc_metric='disk.root.size'), - gnocchi=dict( - cpu_util_metric='cpu_util', - ram_util_metric='memory.usage', - ram_alloc_metric='memory', - disk_alloc_metric='disk.root.size'), - ) - - MIGRATION = "migrate" - CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" - - def __init__(self, config, osc=None): - super(VMWorkloadConsolidation, self).__init__(config, osc) - self._ceilometer = None - self._gnocchi = None - self.number_of_migrations = 0 - self.number_of_released_nodes = 0 - # self.ceilometer_instance_data_cache = dict() - self.datasource_instance_data_cache = dict() - - @classmethod - def get_name(cls): - return "vm_workload_consolidation" - - @classmethod - def get_display_name(cls): - return _("VM Workload Consolidation Strategy") - - @classmethod - def get_translatable_display_name(cls): - return "VM Workload Consolidation Strategy" - - @property - def period(self): - return self.input_parameters.get('period', 3600) - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, ceilometer): - self._ceilometer = ceilometer - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "period": { - "description": "The time interval in seconds for " - "getting statistic aggregation", - "type": "number", - "default": 3600 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - } - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - def get_instance_state_str(self, instance): - """Get instance state in string format. - - :param instance: - """ - if isinstance(instance.state, six.string_types): - return instance.state - elif isinstance(instance.state, element.InstanceState): - return instance.state.value - else: - LOG.error('Unexpected instance state type, ' - 'state=%(state)s, state_type=%(st)s.' % - dict(state=instance.state, - st=type(instance.state))) - raise exception.WatcherException - - def get_node_status_str(self, node): - """Get node status in string format. - - :param node: - """ - if isinstance(node.status, six.string_types): - return node.status - elif isinstance(node.status, element.ServiceState): - return node.status.value - else: - LOG.error('Unexpected node status type, ' - 'status=%(status)s, status_type=%(st)s.' % - dict(status=node.status, - st=type(node.status))) - raise exception.WatcherException - - def add_action_enable_compute_node(self, node): - """Add an action for node enabler into the solution. - - :param node: node object - :return: None - """ - params = {'state': element.ServiceState.ENABLED.value} - self.solution.add_action( - action_type=self.CHANGE_NOVA_SERVICE_STATE, - resource_id=node.uuid, - input_parameters=params) - self.number_of_released_nodes -= 1 - - def add_action_disable_node(self, node): - """Add an action for node disability into the solution. - - :param node: node object - :return: None - """ - params = {'state': element.ServiceState.DISABLED.value} - self.solution.add_action( - action_type=self.CHANGE_NOVA_SERVICE_STATE, - resource_id=node.uuid, - input_parameters=params) - self.number_of_released_nodes += 1 - - def add_migration(self, instance, source_node, destination_node): - """Add an action for VM migration into the solution. - - :param instance: instance object - :param source_node: node object - :param destination_node: node object - :return: None - """ - instance_state_str = self.get_instance_state_str(instance) - if instance_state_str != element.InstanceState.ACTIVE.value: - # Watcher currently only supports live VM migration and block live - # VM migration which both requires migrated VM to be active. - # When supported, the cold migration may be used as a fallback - # migration mechanism to move non active VMs. - LOG.error( - 'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' - 'state=%(instance_state)s.' % dict( - instance_uuid=instance.uuid, - instance_state=instance_state_str)) - return - - migration_type = 'live' - - # Here will makes repeated actions to enable the same compute node, - # when migrating VMs to the destination node which is disabled. - # Whether should we remove the same actions in the solution??? - destination_node_status_str = self.get_node_status_str( - destination_node) - if destination_node_status_str == element.ServiceState.DISABLED.value: - self.add_action_enable_compute_node(destination_node) - - if self.compute_model.migrate_instance( - instance, source_node, destination_node): - params = {'migration_type': migration_type, - 'source_node': source_node.uuid, - 'destination_node': destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance.uuid, - input_parameters=params) - self.number_of_migrations += 1 - - def disable_unused_nodes(self): - """Generate actions for disabling unused nodes. - - :return: None - """ - for node in self.compute_model.get_all_compute_nodes().values(): - if (len(self.compute_model.get_node_instances(node)) == 0 and - node.status != - element.ServiceState.DISABLED.value): - self.add_action_disable_node(node) - - def get_instance_utilization(self, instance): - """Collect cpu, ram and disk utilization statistics of a VM. - - :param instance: instance object - :param aggr: string - :return: dict(cpu(number of vcpus used), ram(MB used), disk(B used)) - """ - instance_cpu_util = None - instance_ram_util = None - instance_disk_util = None - - if instance.uuid in self.datasource_instance_data_cache.keys(): - return self.datasource_instance_data_cache.get(instance.uuid) - - cpu_util_metric = self.METRIC_NAMES[ - self.config.datasource]['cpu_util_metric'] - ram_util_metric = self.METRIC_NAMES[ - self.config.datasource]['ram_util_metric'] - ram_alloc_metric = self.METRIC_NAMES[ - self.config.datasource]['ram_alloc_metric'] - disk_alloc_metric = self.METRIC_NAMES[ - self.config.datasource]['disk_alloc_metric'] - - if self.config.datasource == "ceilometer": - instance_cpu_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=cpu_util_metric, - period=self.period, aggregate='avg') - instance_ram_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=ram_util_metric, - period=self.period, aggregate='avg') - if not instance_ram_util: - instance_ram_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=ram_alloc_metric, - period=self.period, aggregate='avg') - instance_disk_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, meter_name=disk_alloc_metric, - period=self.period, aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.period)) - instance_cpu_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=cpu_util_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - instance_ram_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=ram_util_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - if not instance_ram_util: - instance_ram_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=ram_alloc_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - instance_disk_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=disk_alloc_metric, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - if instance_cpu_util: - total_cpu_utilization = ( - instance.vcpus * (instance_cpu_util / 100.0)) - else: - total_cpu_utilization = instance.vcpus - - if not instance_ram_util: - instance_ram_util = instance.memory - LOG.warning('No values returned by %s for memory.usage, ' - 'use instance flavor ram value', instance.uuid) - - if not instance_disk_util: - instance_disk_util = instance.disk - LOG.warning('No values returned by %s for disk.root.size, ' - 'use instance flavor disk value', instance.uuid) - - self.datasource_instance_data_cache[instance.uuid] = dict( - cpu=total_cpu_utilization, ram=instance_ram_util, - disk=instance_disk_util) - return self.datasource_instance_data_cache.get(instance.uuid) - - def get_node_utilization(self, node): - """Collect cpu, ram and disk utilization statistics of a node. - - :param node: node object - :param aggr: string - :return: dict(cpu(number of cores used), ram(MB used), disk(B used)) - """ - node_instances = self.compute_model.get_node_instances(node) - node_ram_util = 0 - node_disk_util = 0 - node_cpu_util = 0 - for instance in node_instances: - instance_util = self.get_instance_utilization( - instance) - node_cpu_util += instance_util['cpu'] - node_ram_util += instance_util['ram'] - node_disk_util += instance_util['disk'] - - return dict(cpu=node_cpu_util, ram=node_ram_util, - disk=node_disk_util) - - def get_node_capacity(self, node): - """Collect cpu, ram and disk capacity of a node. - - :param node: node object - :return: dict(cpu(cores), ram(MB), disk(B)) - """ - return dict(cpu=node.vcpus, ram=node.memory, disk=node.disk_capacity) - - def get_relative_node_utilization(self, node): - """Return relative node utilization. - - :param node: node object - :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} - """ - relative_node_utilization = {} - util = self.get_node_utilization(node) - cap = self.get_node_capacity(node) - for k in util.keys(): - relative_node_utilization[k] = float(util[k]) / float(cap[k]) - return relative_node_utilization - - def get_relative_cluster_utilization(self): - """Calculate relative cluster utilization (rcu). - - RCU is an average of relative utilizations (rhu) of active nodes. - :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} - """ - nodes = self.compute_model.get_all_compute_nodes().values() - rcu = {} - counters = {} - for node in nodes: - node_status_str = self.get_node_status_str(node) - if node_status_str == element.ServiceState.ENABLED.value: - rhu = self.get_relative_node_utilization(node) - for k in rhu.keys(): - if k not in rcu: - rcu[k] = 0 - if k not in counters: - counters[k] = 0 - rcu[k] += rhu[k] - counters[k] += 1 - for k in rcu.keys(): - rcu[k] /= counters[k] - return rcu - - def is_overloaded(self, node, cc): - """Indicate whether a node is overloaded. - - This considers provided resource capacity coefficients (cc). - :param node: node object - :param cc: dictionary containing resource capacity coefficients - :return: [True, False] - """ - node_capacity = self.get_node_capacity(node) - node_utilization = self.get_node_utilization( - node) - metrics = ['cpu'] - for m in metrics: - if node_utilization[m] > node_capacity[m] * cc[m]: - return True - return False - - def instance_fits(self, instance, node, cc): - """Indicate whether is a node able to accommodate a VM. - - This considers provided resource capacity coefficients (cc). - :param instance: :py:class:`~.element.Instance` - :param node: node object - :param cc: dictionary containing resource capacity coefficients - :return: [True, False] - """ - node_capacity = self.get_node_capacity(node) - node_utilization = self.get_node_utilization(node) - instance_utilization = self.get_instance_utilization(instance) - metrics = ['cpu', 'ram', 'disk'] - for m in metrics: - if (instance_utilization[m] + node_utilization[m] > - node_capacity[m] * cc[m]): - return False - return True - - def optimize_solution(self): - """Optimize solution. - - This is done by eliminating unnecessary or circular set of migrations - which can be replaced by a more efficient solution. - e.g.: - - * A->B, B->C => replace migrations A->B, B->C with - a single migration A->C as both solution result in - VM running on node C which can be achieved with - one migration instead of two. - * A->B, B->A => remove A->B and B->A as they do not result - in a new VM placement. - """ - migrate_actions = ( - a for a in self.solution.actions if a[ - 'action_type'] == self.MIGRATION) - instance_to_be_migrated = ( - a['input_parameters']['resource_id'] for a in migrate_actions) - instance_uuids = list(set(instance_to_be_migrated)) - for instance_uuid in instance_uuids: - actions = list( - a for a in self.solution.actions if a[ - 'input_parameters'][ - 'resource_id'] == instance_uuid) - if len(actions) > 1: - src_uuid = actions[0]['input_parameters']['source_node'] - dst_uuid = actions[-1]['input_parameters']['destination_node'] - for a in actions: - self.solution.actions.remove(a) - self.number_of_migrations -= 1 - src_node = self.compute_model.get_node_by_uuid(src_uuid) - dst_node = self.compute_model.get_node_by_uuid(dst_uuid) - instance = self.compute_model.get_instance_by_uuid( - instance_uuid) - if self.compute_model.migrate_instance( - instance, dst_node, src_node): - self.add_migration(instance, src_node, dst_node) - - def offload_phase(self, cc): - """Perform offloading phase. - - This considers provided resource capacity coefficients. - Offload phase performing first-fit based bin packing to offload - overloaded nodes. This is done in a fashion of moving - the least CPU utilized VM first as live migration these - generally causes less troubles. This phase results in a cluster - with no overloaded nodes. - * This phase is be able to enable disabled nodes (if needed - and any available) in the case of the resource capacity provided by - active nodes is not able to accommodate all the load. - As the offload phase is later followed by the consolidation phase, - the node enabler in this phase doesn't necessarily results - in more enabled nodes in the final solution. - - :param cc: dictionary containing resource capacity coefficients - """ - sorted_nodes = sorted( - self.compute_model.get_all_compute_nodes().values(), - key=lambda x: self.get_node_utilization(x)['cpu']) - for node in reversed(sorted_nodes): - if self.is_overloaded(node, cc): - for instance in sorted( - self.compute_model.get_node_instances(node), - key=lambda x: self.get_instance_utilization( - x)['cpu'] - ): - for destination_node in reversed(sorted_nodes): - if self.instance_fits( - instance, destination_node, cc): - self.add_migration(instance, node, - destination_node) - break - if not self.is_overloaded(node, cc): - break - - def consolidation_phase(self, cc): - """Perform consolidation phase. - - This considers provided resource capacity coefficients. - Consolidation phase performing first-fit based bin packing. - First, nodes with the lowest cpu utilization are consolidated - by moving their load to nodes with the highest cpu utilization - which can accommodate the load. In this phase the most cpu utilized - VMs are prioritized as their load is more difficult to accommodate - in the system than less cpu utilized VMs which can be later used - to fill smaller CPU capacity gaps. - - :param cc: dictionary containing resource capacity coefficients - """ - sorted_nodes = sorted( - self.compute_model.get_all_compute_nodes().values(), - key=lambda x: self.get_node_utilization(x)['cpu']) - asc = 0 - for node in sorted_nodes: - instances = sorted( - self.compute_model.get_node_instances(node), - key=lambda x: self.get_instance_utilization(x)['cpu']) - for instance in reversed(instances): - dsc = len(sorted_nodes) - 1 - for destination_node in reversed(sorted_nodes): - if asc >= dsc: - break - if self.instance_fits( - instance, destination_node, cc): - self.add_migration(instance, node, - destination_node) - break - dsc -= 1 - asc += 1 - - def pre_execute(self): - if not self.compute_model: - raise exception.ClusterStateNotDefined() - - if self.compute_model.stale: - raise exception.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - """Execute strategy. - - This strategy produces a solution resulting in more - efficient utilization of cluster resources using following - four phases: - - * Offload phase - handling over-utilized resources - * Consolidation phase - handling under-utilized resources - * Solution optimization - reducing number of migrations - * Disability of unused nodes - - :param original_model: root_model object - """ - LOG.info('Executing Smart Strategy') - rcu = self.get_relative_cluster_utilization() - - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - - # Offloading phase - self.offload_phase(cc) - - # Consolidation phase - self.consolidation_phase(cc) - - # Optimize solution - self.optimize_solution() - - # disable unused nodes - self.disable_unused_nodes() - - rcu_after = self.get_relative_cluster_utilization() - info = { - "compute_nodes_count": len( - self.compute_model.get_all_compute_nodes()), - 'number_of_migrations': self.number_of_migrations, - 'number_of_released_nodes': - self.number_of_released_nodes, - 'relative_cluster_utilization_before': str(rcu), - 'relative_cluster_utilization_after': str(rcu_after) - } - - LOG.debug(info) - - def post_execute(self): - self.solution.set_efficacy_indicators( - compute_nodes_count=len( - self.compute_model.get_all_compute_nodes()), - released_compute_nodes_count=self.number_of_released_nodes, - instance_migrations_count=self.number_of_migrations, - ) - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/workload_balance.py b/watcher/decision_engine/strategy/strategies/workload_balance.py deleted file mode 100644 index 63e638c..0000000 --- a/watcher/decision_engine/strategy/strategies/workload_balance.py +++ /dev/null @@ -1,414 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*[PoC]Workload balance using live migration* - -*Description* - -This strategy migrates a VM based on the VM workload of the hosts. -It makes decision to migrate a workload whenever a host's CPU -utilization % is higher than the specified threshold. The VM to -be moved should make the host close to average workload of all -hosts nodes. - -*Requirements* - -* Hardware: compute node should use the same physical CPUs -* Software: Ceilometer component ceilometer-agent-compute - running in each compute node, and Ceilometer API can - report such telemetry "cpu_util" successfully. -* You must have at least 2 physical compute nodes to run - this strategy. - -*Limitations* - -- This is a proof of concept that is not meant to be used in - production. -- We cannot forecast how many servers should be migrated. - This is the reason why we only plan a single virtual - machine migration at a time. So it's better to use this - algorithm with `CONTINUOUS` audits. -""" - -from __future__ import division -import datetime - -from oslo_config import cfg -from oslo_log import log - -from watcher._i18n import _ -from watcher.common import exception as wexc -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) - - -class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): - """[PoC]Workload balance using live migration - - *Description* - - It is a migration strategy based on the VM workload of physical - servers. It generates solutions to move a workload whenever a server's - CPU utilization % is higher than the specified threshold. - The VM to be moved should make the host close to average workload - of all compute nodes. - - *Requirements* - - * Hardware: compute node should use the same physical CPUs - * Software: Ceilometer component ceilometer-agent-compute running - in each compute node, and Ceilometer API can report such telemetry - "cpu_util" successfully. - * You must have at least 2 physical compute nodes to run this strategy - - *Limitations* - - - This is a proof of concept that is not meant to be used in production - - We cannot forecast how many servers should be migrated. This is the - reason why we only plan a single virtual machine migration at a time. - So it's better to use this algorithm with `CONTINUOUS` audits. - - It assume that live migrations are possible - """ - - # The meter to report CPU utilization % of VM in ceilometer - METER_NAME = "cpu_util" - # Unit: %, value range is [0 , 100] - - MIGRATION = "migrate" - - def __init__(self, config, osc=None): - """Workload balance using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: :py:class:`~.Struct` instance - :param osc: :py:class:`~.OpenStackClients` instance - """ - super(WorkloadBalance, self).__init__(config, osc) - # the migration plan will be triggered when the CPU utilization % - # reaches threshold - self._meter = self.METER_NAME - self._ceilometer = None - self._gnocchi = None - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - @classmethod - def get_name(cls): - return "workload_balance" - - @classmethod - def get_display_name(cls): - return _("Workload Balance Migration Strategy") - - @classmethod - def get_translatable_display_name(cls): - return "Workload Balance Migration Strategy" - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - # Mandatory default setting for each element - return { - "properties": { - "threshold": { - "description": "workload threshold for migration", - "type": "number", - "default": 25.0 - }, - "period": { - "description": "aggregate time period of ceilometer", - "type": "number", - "default": 300 - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - }, - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - def calculate_used_resource(self, node): - """Calculate the used vcpus, memory and disk based on VM flavors""" - instances = self.compute_model.get_node_instances(node) - vcpus_used = 0 - memory_mb_used = 0 - disk_gb_used = 0 - for instance in instances: - vcpus_used += instance.vcpus - memory_mb_used += instance.memory - disk_gb_used += instance.disk - - return vcpus_used, memory_mb_used, disk_gb_used - - def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache): - """Pick up an active instance instance to migrate from provided hosts - - :param hosts: the array of dict which contains node object - :param avg_workload: the average workload value of all nodes - :param workload_cache: the map contains instance to workload mapping - """ - for instance_data in hosts: - source_node = instance_data['node'] - source_instances = self.compute_model.get_node_instances( - source_node) - if source_instances: - delta_workload = instance_data['workload'] - avg_workload - min_delta = 1000000 - instance_id = None - for instance in source_instances: - try: - # select the first active VM to migrate - if (instance.state != - element.InstanceState.ACTIVE.value): - LOG.debug("Instance not active, skipped: %s", - instance.uuid) - continue - current_delta = ( - delta_workload - workload_cache[instance.uuid]) - if 0 <= current_delta < min_delta: - min_delta = current_delta - instance_id = instance.uuid - except wexc.InstanceNotFound: - LOG.error("Instance not found; error: %s", - instance_id) - if instance_id: - return (source_node, - self.compute_model.get_instance_by_uuid( - instance_id)) - else: - LOG.info("VM not found from node: %s", - source_node.uuid) - - def filter_destination_hosts(self, hosts, instance_to_migrate, - avg_workload, workload_cache): - """Only return hosts with sufficient available resources""" - required_cores = instance_to_migrate.vcpus - required_disk = instance_to_migrate.disk - required_mem = instance_to_migrate.memory - - # filter nodes without enough resource - destination_hosts = [] - src_instance_workload = workload_cache[instance_to_migrate.uuid] - for instance_data in hosts: - host = instance_data['node'] - workload = instance_data['workload'] - # calculate the available resources - cores_used, mem_used, disk_used = self.calculate_used_resource( - host) - cores_available = host.vcpus - cores_used - disk_available = host.disk - disk_used - mem_available = host.memory - mem_used - if ( - cores_available >= required_cores and - disk_available >= required_disk and - mem_available >= required_mem and - ((src_instance_workload + workload) < - self.threshold / 100 * host.vcpus) - ): - destination_hosts.append(instance_data) - - return destination_hosts - - def group_hosts_by_cpu_util(self): - """Calculate the workloads of each node - - try to find out the nodes which have reached threshold - and the nodes which are under threshold. - and also calculate the average workload value of all nodes. - and also generate the instance workload map. - """ - - nodes = self.compute_model.get_all_compute_nodes() - cluster_size = len(nodes) - if not nodes: - raise wexc.ClusterEmpty() - overload_hosts = [] - nonoverload_hosts = [] - # total workload of cluster - cluster_workload = 0.0 - # use workload_cache to store the workload of VMs for reuse purpose - workload_cache = {} - for node_id in nodes: - node = self.compute_model.get_node_by_uuid(node_id) - instances = self.compute_model.get_node_instances(node) - node_workload = 0.0 - for instance in instances: - cpu_util = None - try: - if self.config.datasource == "ceilometer": - cpu_util = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=self._meter, - period=self._period, - aggregate='avg') - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self._period)) - cpu_util = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=self._meter, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - except Exception as exc: - LOG.exception(exc) - LOG.error("Can not get cpu_util from %s", - self.config.datasource) - continue - if cpu_util is None: - LOG.debug("Instance (%s): cpu_util is None", instance.uuid) - continue - workload_cache[instance.uuid] = cpu_util * instance.vcpus / 100 - node_workload += workload_cache[instance.uuid] - LOG.debug("VM (%s): cpu_util %f", instance.uuid, cpu_util) - node_cpu_util = node_workload / node.vcpus * 100 - - cluster_workload += node_workload - - instance_data = { - 'node': node, "cpu_util": node_cpu_util, - 'workload': node_workload} - if node_cpu_util >= self.threshold: - # mark the node to release resources - overload_hosts.append(instance_data) - else: - nonoverload_hosts.append(instance_data) - - avg_workload = cluster_workload / cluster_size - - return overload_hosts, nonoverload_hosts, avg_workload, workload_cache - - def pre_execute(self): - """Pre-execution phase - - This can be used to fetch some pre-requisites or data. - """ - LOG.info("Initializing Workload Balance Strategy") - - if not self.compute_model: - raise wexc.ClusterStateNotDefined() - - if self.compute_model.stale: - raise wexc.ClusterStateStale() - - LOG.debug(self.compute_model.to_string()) - - def do_execute(self): - """Strategy execution phase - - This phase is where you should put the main logic of your strategy. - """ - self.threshold = self.input_parameters.threshold - self._period = self.input_parameters.period - source_nodes, target_nodes, avg_workload, workload_cache = ( - self.group_hosts_by_cpu_util()) - - if not source_nodes: - LOG.debug("No hosts require optimization") - return self.solution - - if not target_nodes: - LOG.warning("No hosts current have CPU utilization under %s " - "percent, therefore there are no possible target " - "hosts for any migration", - self.threshold) - return self.solution - - # choose the server with largest cpu_util - source_nodes = sorted(source_nodes, - reverse=True, - key=lambda x: (x[self.METER_NAME])) - - instance_to_migrate = self.choose_instance_to_migrate( - source_nodes, avg_workload, workload_cache) - if not instance_to_migrate: - return self.solution - source_node, instance_src = instance_to_migrate - # find the hosts that have enough resource for the VM to be migrated - destination_hosts = self.filter_destination_hosts( - target_nodes, instance_src, avg_workload, workload_cache) - # sort the filtered result by workload - # pick up the lowest one as dest server - if not destination_hosts: - # for instance. - LOG.warning("No proper target host could be found, it might " - "be because of there's no enough CPU/Memory/DISK") - return self.solution - destination_hosts = sorted(destination_hosts, - key=lambda x: (x["cpu_util"])) - # always use the host with lowerest CPU utilization - mig_destination_node = destination_hosts[0]['node'] - # generate solution to migrate the instance to the dest server, - if self.compute_model.migrate_instance( - instance_src, source_node, mig_destination_node): - parameters = {'migration_type': 'live', - 'source_node': source_node.uuid, - 'destination_node': mig_destination_node.uuid} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=instance_src.uuid, - input_parameters=parameters) - - def post_execute(self): - """Post-execution phase - - This can be used to compute the global efficacy - """ - self.solution.model = self.compute_model - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/strategy/strategies/workload_stabilization.py b/watcher/decision_engine/strategy/strategies/workload_stabilization.py deleted file mode 100644 index 7e3e96f..0000000 --- a/watcher/decision_engine/strategy/strategies/workload_stabilization.py +++ /dev/null @@ -1,520 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica LLC -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -""" -*Workload Stabilization control using live migration* - -This is workload stabilization strategy based on standard deviation -algorithm. The goal is to determine if there is an overload in a cluster -and respond to it by migrating VMs to stabilize the cluster. - -It assumes that live migrations are possible in your cluster. - -""" - -import copy -import datetime -import itertools -import math -import random -import re - -import oslo_cache -from oslo_config import cfg -from oslo_log import log -import oslo_utils - -from watcher._i18n import _ -from watcher.common import exception -from watcher.datasource import ceilometer as ceil -from watcher.datasource import gnocchi as gnoc -from watcher.decision_engine.model import element -from watcher.decision_engine.strategy.strategies import base - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -def _set_memoize(conf): - oslo_cache.configure(conf) - region = oslo_cache.create_region() - configured_region = oslo_cache.configure_cache_region(conf, region) - return oslo_cache.core.get_memoization_decorator(conf, - configured_region, - 'cache') - - -class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy): - """Workload Stabilization control using live migration""" - - MIGRATION = "migrate" - MEMOIZE = _set_memoize(CONF) - - def __init__(self, config, osc=None): - """Workload Stabilization control using live migration - - :param config: A mapping containing the configuration of this strategy - :type config: :py:class:`~.Struct` instance - :param osc: :py:class:`~.OpenStackClients` instance - """ - super(WorkloadStabilization, self).__init__(config, osc) - self._ceilometer = None - self._gnocchi = None - self._nova = None - self.weights = None - self.metrics = None - self.thresholds = None - self.host_choice = None - self.instance_metrics = None - self.retry_count = None - self.periods = None - - @classmethod - def get_name(cls): - return "workload_stabilization" - - @classmethod - def get_display_name(cls): - return _("Workload stabilization") - - @classmethod - def get_translatable_display_name(cls): - return "Workload stabilization" - - @property - def granularity(self): - return self.input_parameters.get('granularity', 300) - - @classmethod - def get_schema(cls): - return { - "properties": { - "metrics": { - "description": "Metrics used as rates of cluster loads.", - "type": "array", - "default": ["cpu_util", "memory.resident"] - }, - "thresholds": { - "description": "Dict where key is a metric and value " - "is a trigger value.", - "type": "object", - "default": {"cpu_util": 0.2, "memory.resident": 0.2} - }, - "weights": { - "description": "These weights used to calculate " - "common standard deviation. Name of weight" - " contains meter name and _weight suffix.", - "type": "object", - "default": {"cpu_util_weight": 1.0, - "memory.resident_weight": 1.0} - }, - "instance_metrics": { - "description": "Mapping to get hardware statistics using" - " instance metrics", - "type": "object", - "default": {"cpu_util": "compute.node.cpu.percent", - "memory.resident": "hardware.memory.used"} - }, - "host_choice": { - "description": "Method of host's choice. There are cycle," - " retry and fullsearch methods. " - "Cycle will iterate hosts in cycle. " - "Retry will get some hosts random " - "(count defined in retry_count option). " - "Fullsearch will return each host " - "from list.", - "type": "string", - "default": "retry" - }, - "retry_count": { - "description": "Count of random returned hosts", - "type": "number", - "default": 1 - }, - "periods": { - "description": "These periods are used to get statistic " - "aggregation for instance and host " - "metrics. The period is simply a repeating" - " interval of time into which the samples" - " are grouped for aggregation. Watcher " - "uses only the last period of all received" - " ones.", - "type": "object", - "default": {"instance": 720, "node": 600} - }, - "granularity": { - "description": "The time between two measures in an " - "aggregated timeseries of a metric.", - "type": "number", - "default": 300 - }, - } - } - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt( - "datasource", - help="Data source to use in order to query the needed metrics", - default="ceilometer", - choices=["ceilometer", "gnocchi"]) - ] - - @property - def ceilometer(self): - if self._ceilometer is None: - self._ceilometer = ceil.CeilometerHelper(osc=self.osc) - return self._ceilometer - - @property - def nova(self): - if self._nova is None: - self._nova = self.osc.nova() - return self._nova - - @nova.setter - def nova(self, n): - self._nova = n - - @ceilometer.setter - def ceilometer(self, c): - self._ceilometer = c - - @property - def gnocchi(self): - if self._gnocchi is None: - self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) - return self._gnocchi - - @gnocchi.setter - def gnocchi(self, gnocchi): - self._gnocchi = gnocchi - - def transform_instance_cpu(self, instance_load, host_vcpus): - """Transform instance cpu utilization to overall host cpu utilization. - - :param instance_load: dict that contains instance uuid and - utilization info. - :param host_vcpus: int - :return: float value - """ - return (instance_load['cpu_util'] * - (instance_load['vcpus'] / float(host_vcpus))) - - @MEMOIZE - def get_instance_load(self, instance): - """Gathering instance load through ceilometer/gnocchi statistic. - - :param instance: instance for which statistic is gathered. - :return: dict - """ - LOG.debug('get_instance_load started') - instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus} - for meter in self.metrics: - avg_meter = None - if self.config.datasource == "ceilometer": - avg_meter = self.ceilometer.statistic_aggregation( - resource_id=instance.uuid, - meter_name=meter, - period=self.periods['instance'], - aggregate='min' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.periods['instance'])) - avg_meter = self.gnocchi.statistic_aggregation( - resource_id=instance.uuid, - metric=meter, - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - if avg_meter is None: - LOG.warning( - "No values returned by %(resource_id)s " - "for %(metric_name)s" % dict( - resource_id=instance.uuid, metric_name=meter)) - avg_meter = 0 - if meter == 'cpu_util': - avg_meter /= float(100) - instance_load[meter] = avg_meter - return instance_load - - def normalize_hosts_load(self, hosts): - normalized_hosts = copy.deepcopy(hosts) - for host in normalized_hosts: - if 'memory.resident' in normalized_hosts[host]: - node = self.compute_model.get_node_by_uuid(host) - normalized_hosts[host]['memory.resident'] /= float(node.memory) - - return normalized_hosts - - def get_available_nodes(self): - return {node_uuid: node for node_uuid, node in - self.compute_model.get_all_compute_nodes().items() - if node.state == element.ServiceState.ONLINE.value and - node.status == element.ServiceState.ENABLED.value} - - def get_hosts_load(self): - """Get load of every available host by gathering instances load""" - hosts_load = {} - for node_id, node in self.get_available_nodes().items(): - hosts_load[node_id] = {} - hosts_load[node_id]['vcpus'] = node.vcpus - for metric in self.metrics: - resource_id = '' - avg_meter = None - meter_name = self.instance_metrics[metric] - if re.match('^compute.node', meter_name) is not None: - resource_id = "%s_%s" % (node.uuid, node.hostname) - else: - resource_id = node_id - if self.config.datasource == "ceilometer": - avg_meter = self.ceilometer.statistic_aggregation( - resource_id=resource_id, - meter_name=self.instance_metrics[metric], - period=self.periods['node'], - aggregate='avg' - ) - elif self.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int(self.periods['node'])) - avg_meter = self.gnocchi.statistic_aggregation( - resource_id=resource_id, - metric=self.instance_metrics[metric], - granularity=self.granularity, - start_time=start_time, - stop_time=stop_time, - aggregation='mean' - ) - - if avg_meter is None: - if meter_name == 'hardware.memory.used': - avg_meter = node.memory - if meter_name == 'compute.node.cpu.percent': - avg_meter = 1 - LOG.warning('No values returned by node %s for %s', - node_id, meter_name) - else: - if meter_name == 'hardware.memory.used': - avg_meter /= oslo_utils.units.Ki - if meter_name == 'compute.node.cpu.percent': - avg_meter /= 100 - hosts_load[node_id][metric] = avg_meter - return hosts_load - - def get_sd(self, hosts, meter_name): - """Get standard deviation among hosts by specified meter""" - mean = 0 - variaton = 0 - for host_id in hosts: - mean += hosts[host_id][meter_name] - mean /= len(hosts) - for host_id in hosts: - variaton += (hosts[host_id][meter_name] - mean) ** 2 - variaton /= len(hosts) - sd = math.sqrt(variaton) - return sd - - def calculate_weighted_sd(self, sd_case): - """Calculate common standard deviation among meters on host""" - weighted_sd = 0 - for metric, value in zip(self.metrics, sd_case): - try: - weighted_sd += value * float(self.weights[metric + '_weight']) - except KeyError as exc: - LOG.exception(exc) - raise exception.WatcherException( - _("Incorrect mapping: could not find associated weight" - " for %s in weight dict.") % metric) - return weighted_sd - - def calculate_migration_case(self, hosts, instance, src_node, dst_node): - """Calculate migration case - - Return list of standard deviation values, that appearing in case of - migration of instance from source host to destination host - :param hosts: hosts with their workload - :param instance: the virtual machine - :param src_node: the source node - :param dst_node: the destination node - :return: list of standard deviation values - """ - migration_case = [] - new_hosts = copy.deepcopy(hosts) - instance_load = self.get_instance_load(instance) - s_host_vcpus = new_hosts[src_node.uuid]['vcpus'] - d_host_vcpus = new_hosts[dst_node.uuid]['vcpus'] - for metric in self.metrics: - if metric is 'cpu_util': - new_hosts[src_node.uuid][metric] -= ( - self.transform_instance_cpu(instance_load, s_host_vcpus)) - new_hosts[dst_node.uuid][metric] += ( - self.transform_instance_cpu(instance_load, d_host_vcpus)) - else: - new_hosts[src_node.uuid][metric] -= instance_load[metric] - new_hosts[dst_node.uuid][metric] += instance_load[metric] - normalized_hosts = self.normalize_hosts_load(new_hosts) - for metric in self.metrics: - migration_case.append(self.get_sd(normalized_hosts, metric)) - migration_case.append(new_hosts) - return migration_case - - def simulate_migrations(self, hosts): - """Make sorted list of pairs instance:dst_host""" - def yield_nodes(nodes): - if self.host_choice == 'cycle': - for i in itertools.cycle(nodes): - yield [i] - if self.host_choice == 'retry': - while True: - yield random.sample(nodes, self.retry_count) - if self.host_choice == 'fullsearch': - while True: - yield nodes - - instance_host_map = [] - nodes = list(self.get_available_nodes()) - for src_host in nodes: - src_node = self.compute_model.get_node_by_uuid(src_host) - c_nodes = copy.copy(nodes) - c_nodes.remove(src_host) - node_list = yield_nodes(c_nodes) - for instance in self.compute_model.get_node_instances(src_node): - min_sd_case = {'value': len(self.metrics)} - if instance.state not in [element.InstanceState.ACTIVE.value, - element.InstanceState.PAUSED.value]: - continue - for dst_host in next(node_list): - dst_node = self.compute_model.get_node_by_uuid(dst_host) - sd_case = self.calculate_migration_case( - hosts, instance, src_node, dst_node) - - weighted_sd = self.calculate_weighted_sd(sd_case[:-1]) - - if weighted_sd < min_sd_case['value']: - min_sd_case = { - 'host': dst_node.uuid, 'value': weighted_sd, - 's_host': src_node.uuid, 'instance': instance.uuid} - instance_host_map.append(min_sd_case) - return sorted(instance_host_map, key=lambda x: x['value']) - - def check_threshold(self): - """Check if cluster is needed in balancing""" - hosts_load = self.get_hosts_load() - normalized_load = self.normalize_hosts_load(hosts_load) - for metric in self.metrics: - metric_sd = self.get_sd(normalized_load, metric) - if metric_sd > float(self.thresholds[metric]): - return self.simulate_migrations(hosts_load) - - def add_migration(self, - resource_id, - migration_type, - source_node, - destination_node): - parameters = {'migration_type': migration_type, - 'source_node': source_node, - 'destination_node': destination_node} - self.solution.add_action(action_type=self.MIGRATION, - resource_id=resource_id, - input_parameters=parameters) - - def create_migration_instance(self, mig_instance, mig_source_node, - mig_destination_node): - """Create migration VM""" - if self.compute_model.migrate_instance( - mig_instance, mig_source_node, mig_destination_node): - self.add_migration(mig_instance.uuid, 'live', - mig_source_node.uuid, - mig_destination_node.uuid) - - def migrate(self, instance_uuid, src_host, dst_host): - mig_instance = self.compute_model.get_instance_by_uuid(instance_uuid) - mig_source_node = self.compute_model.get_node_by_uuid( - src_host) - mig_destination_node = self.compute_model.get_node_by_uuid( - dst_host) - self.create_migration_instance(mig_instance, mig_source_node, - mig_destination_node) - - def fill_solution(self): - self.solution.model = self.compute_model - return self.solution - - def pre_execute(self): - LOG.info("Initializing Workload Stabilization") - - if not self.compute_model: - raise exception.ClusterStateNotDefined() - - if self.compute_model.stale: - raise exception.ClusterStateStale() - - self.weights = self.input_parameters.weights - self.metrics = self.input_parameters.metrics - self.thresholds = self.input_parameters.thresholds - self.host_choice = self.input_parameters.host_choice - self.instance_metrics = self.input_parameters.instance_metrics - self.retry_count = self.input_parameters.retry_count - self.periods = self.input_parameters.periods - - def do_execute(self): - migration = self.check_threshold() - if migration: - hosts_load = self.get_hosts_load() - min_sd = 1 - balanced = False - for instance_host in migration: - instance = self.compute_model.get_instance_by_uuid( - instance_host['instance']) - src_node = self.compute_model.get_node_by_uuid( - instance_host['s_host']) - dst_node = self.compute_model.get_node_by_uuid( - instance_host['host']) - if instance.disk > dst_node.disk: - continue - instance_load = self.calculate_migration_case( - hosts_load, instance, src_node, dst_node) - weighted_sd = self.calculate_weighted_sd(instance_load[:-1]) - if weighted_sd < min_sd: - min_sd = weighted_sd - hosts_load = instance_load[-1] - self.migrate(instance_host['instance'], - instance_host['s_host'], - instance_host['host']) - - for metric, value in zip(self.metrics, instance_load[:-1]): - if value < float(self.thresholds[metric]): - balanced = True - break - if balanced: - break - - def post_execute(self): - """Post-execution phase - - This can be used to compute the global efficacy - """ - self.fill_solution() - - LOG.debug(self.compute_model.to_string()) diff --git a/watcher/decision_engine/sync.py b/watcher/decision_engine/sync.py deleted file mode 100644 index 17c3318..0000000 --- a/watcher/decision_engine/sync.py +++ /dev/null @@ -1,571 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import ast -import collections - -from oslo_log import log - -from watcher.common import context -from watcher.decision_engine.loading import default -from watcher.decision_engine.scoring import scoring_factory -from watcher import objects - -LOG = log.getLogger(__name__) - -GoalMapping = collections.namedtuple( - 'GoalMapping', ['name', 'display_name', 'efficacy_specification']) -StrategyMapping = collections.namedtuple( - 'StrategyMapping', - ['name', 'goal_name', 'display_name', 'parameters_spec']) -ScoringEngineMapping = collections.namedtuple( - 'ScoringEngineMapping', - ['name', 'description', 'metainfo']) - -IndicatorSpec = collections.namedtuple( - 'IndicatorSpec', ['name', 'description', 'unit', 'schema']) - - -class Syncer(object): - """Syncs all available goals and strategies with the Watcher DB""" - - def __init__(self): - self.ctx = context.make_context() - self.discovered_map = None - - self._available_goals = None - self._available_goals_map = None - - self._available_strategies = None - self._available_strategies_map = None - - self._available_scoringengines = None - self._available_scoringengines_map = None - - # This goal mapping maps stale goal IDs to the synced goal - self.goal_mapping = dict() - # This strategy mapping maps stale strategy IDs to the synced goal - self.strategy_mapping = dict() - # Maps stale scoring engine IDs to the synced scoring engines - self.se_mapping = dict() - - self.stale_audit_templates_map = {} - self.stale_audits_map = {} - self.stale_action_plans_map = {} - - @property - def available_goals(self): - """Goals loaded from DB""" - if self._available_goals is None: - self._available_goals = objects.Goal.list(self.ctx) - return self._available_goals - - @property - def available_strategies(self): - """Strategies loaded from DB""" - if self._available_strategies is None: - self._available_strategies = objects.Strategy.list(self.ctx) - return self._available_strategies - - @property - def available_scoringengines(self): - """Scoring Engines loaded from DB""" - if self._available_scoringengines is None: - self._available_scoringengines = (objects.ScoringEngine - .list(self.ctx)) - return self._available_scoringengines - - @property - def available_goals_map(self): - """Mapping of goals loaded from DB""" - if self._available_goals_map is None: - self._available_goals_map = { - GoalMapping( - name=g.name, - display_name=g.display_name, - efficacy_specification=tuple( - IndicatorSpec(**item) - for item in g.efficacy_specification)): g - for g in self.available_goals - } - return self._available_goals_map - - @property - def available_strategies_map(self): - if self._available_strategies_map is None: - goals_map = {g.id: g.name for g in self.available_goals} - self._available_strategies_map = { - StrategyMapping( - name=s.name, goal_name=goals_map[s.goal_id], - display_name=s.display_name, - parameters_spec=str(s.parameters_spec)): s - for s in self.available_strategies - } - return self._available_strategies_map - - @property - def available_scoringengines_map(self): - if self._available_scoringengines_map is None: - self._available_scoringengines_map = { - ScoringEngineMapping( - name=s.id, description=s.description, - metainfo=s.metainfo): s - for s in self.available_scoringengines - } - return self._available_scoringengines_map - - def sync(self): - self.discovered_map = self._discover() - goals_map = self.discovered_map["goals"] - strategies_map = self.discovered_map["strategies"] - scoringengines_map = self.discovered_map["scoringengines"] - - for goal_name, goal_map in goals_map.items(): - if goal_map in self.available_goals_map: - LOG.info("Goal %s already exists", goal_name) - continue - - self.goal_mapping.update(self._sync_goal(goal_map)) - - for strategy_name, strategy_map in strategies_map.items(): - if (strategy_map in self.available_strategies_map and - strategy_map.goal_name not in - [g.name for g in self.goal_mapping.values()]): - LOG.info("Strategy %s already exists", strategy_name) - continue - - self.strategy_mapping.update(self._sync_strategy(strategy_map)) - - for se_name, se_map in scoringengines_map.items(): - if se_map in self.available_scoringengines_map: - LOG.info("Scoring Engine %s already exists", - se_name) - continue - - self.se_mapping.update(self._sync_scoringengine(se_map)) - - self._sync_objects() - self._soft_delete_removed_scoringengines() - - def _sync_goal(self, goal_map): - goal_name = goal_map.name - goal_mapping = dict() - # Goals that are matching by name with the given discovered goal name - matching_goals = [g for g in self.available_goals - if g.name == goal_name] - stale_goals = self._soft_delete_stale_goals(goal_map, matching_goals) - - if stale_goals or not matching_goals: - goal = objects.Goal(self.ctx) - goal.name = goal_name - goal.display_name = goal_map.display_name - goal.efficacy_specification = [ - indicator._asdict() - for indicator in goal_map.efficacy_specification] - goal.create() - LOG.info("Goal %s created", goal_name) - - # Updating the internal states - self.available_goals_map[goal] = goal_map - # Map the old goal IDs to the new (equivalent) goal - for matching_goal in matching_goals: - goal_mapping[matching_goal.id] = goal - - return goal_mapping - - def _sync_strategy(self, strategy_map): - strategy_name = strategy_map.name - strategy_display_name = strategy_map.display_name - goal_name = strategy_map.goal_name - parameters_spec = strategy_map.parameters_spec - strategy_mapping = dict() - - # Strategies that are matching by name with the given - # discovered strategy name - matching_strategies = [s for s in self.available_strategies - if s.name == strategy_name] - stale_strategies = self._soft_delete_stale_strategies( - strategy_map, matching_strategies) - - if stale_strategies or not matching_strategies: - strategy = objects.Strategy(self.ctx) - strategy.name = strategy_name - strategy.display_name = strategy_display_name - strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id - strategy.parameters_spec = parameters_spec - strategy.create() - LOG.info("Strategy %s created", strategy_name) - - # Updating the internal states - self.available_strategies_map[strategy] = strategy_map - # Map the old strategy IDs to the new (equivalent) strategy - for matching_strategy in matching_strategies: - strategy_mapping[matching_strategy.id] = strategy - - return strategy_mapping - - def _sync_scoringengine(self, scoringengine_map): - scoringengine_name = scoringengine_map.name - se_mapping = dict() - # Scoring Engines matching by id with discovered Scoring engine - matching_scoringengines = [se for se in self.available_scoringengines - if se.name == scoringengine_name] - stale_scoringengines = self._soft_delete_stale_scoringengines( - scoringengine_map, matching_scoringengines) - - if stale_scoringengines or not matching_scoringengines: - scoringengine = objects.ScoringEngine(self.ctx) - scoringengine.name = scoringengine_name - scoringengine.description = scoringengine_map.description - scoringengine.metainfo = scoringengine_map.metainfo - scoringengine.create() - LOG.info("Scoring Engine %s created", scoringengine_name) - - # Updating the internal states - self.available_scoringengines_map[scoringengine] = \ - scoringengine_map - # Map the old scoring engine names to the new (equivalent) SE - for matching_scoringengine in matching_scoringengines: - se_mapping[matching_scoringengine.name] = scoringengine - - return se_mapping - - def _sync_objects(self): - # First we find audit templates, audits and action plans that are stale - # because their associated goal or strategy has been modified and we - # update them in-memory - self._find_stale_audit_templates_due_to_goal() - self._find_stale_audit_templates_due_to_strategy() - - self._find_stale_audits_due_to_goal() - self._find_stale_audits_due_to_strategy() - - self._find_stale_action_plans_due_to_strategy() - self._find_stale_action_plans_due_to_audit() - - # Then we handle the case where an audit template, an audit or an - # action plan becomes stale because its related goal does not - # exist anymore. - self._soft_delete_removed_goals() - # Then we handle the case where an audit template, an audit or an - # action plan becomes stale because its related strategy does not - # exist anymore. - self._soft_delete_removed_strategies() - - # Finally, we save into the DB the updated stale audit templates - # and soft delete stale audits and action plans - for stale_audit_template in self.stale_audit_templates_map.values(): - stale_audit_template.save() - LOG.info("Audit Template '%s' synced", - stale_audit_template.name) - - for stale_audit in self.stale_audits_map.values(): - stale_audit.save() - LOG.info("Stale audit '%s' synced and cancelled", - stale_audit.uuid) - - for stale_action_plan in self.stale_action_plans_map.values(): - stale_action_plan.save() - LOG.info("Stale action plan '%s' synced and cancelled", - stale_action_plan.uuid) - - def _find_stale_audit_templates_due_to_goal(self): - for goal_id, synced_goal in self.goal_mapping.items(): - filters = {"goal_id": goal_id} - stale_audit_templates = objects.AuditTemplate.list( - self.ctx, filters=filters) - - # Update the goal ID for the stale audit templates (w/o saving) - for audit_template in stale_audit_templates: - if audit_template.id not in self.stale_audit_templates_map: - audit_template.goal_id = synced_goal.id - self.stale_audit_templates_map[audit_template.id] = ( - audit_template) - else: - self.stale_audit_templates_map[ - audit_template.id].goal_id = synced_goal.id - - def _find_stale_audit_templates_due_to_strategy(self): - for strategy_id, synced_strategy in self.strategy_mapping.items(): - filters = {"strategy_id": strategy_id} - stale_audit_templates = objects.AuditTemplate.list( - self.ctx, filters=filters) - - # Update strategy IDs for all stale audit templates (w/o saving) - for audit_template in stale_audit_templates: - if audit_template.id not in self.stale_audit_templates_map: - audit_template.strategy_id = synced_strategy.id - self.stale_audit_templates_map[audit_template.id] = ( - audit_template) - else: - self.stale_audit_templates_map[ - audit_template.id].strategy_id = synced_strategy.id - - def _find_stale_audits_due_to_goal(self): - for goal_id, synced_goal in self.goal_mapping.items(): - filters = {"goal_id": goal_id} - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - - # Update the goal ID for the stale audits (w/o saving) - for audit in stale_audits: - if audit.id not in self.stale_audits_map: - audit.goal_id = synced_goal.id - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[audit.id].goal_id = synced_goal.id - - def _find_stale_audits_due_to_strategy(self): - for strategy_id, synced_strategy in self.strategy_mapping.items(): - filters = {"strategy_id": strategy_id} - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - # Update strategy IDs for all stale audits (w/o saving) - for audit in stale_audits: - if audit.id not in self.stale_audits_map: - audit.strategy_id = synced_strategy.id - audit.state = objects.audit.State.CANCELLED - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[ - audit.id].strategy_id = synced_strategy.id - self.stale_audits_map[ - audit.id].state = objects.audit.State.CANCELLED - - def _find_stale_action_plans_due_to_strategy(self): - for strategy_id, synced_strategy in self.strategy_mapping.items(): - filters = {"strategy_id": strategy_id} - stale_action_plans = objects.ActionPlan.list( - self.ctx, filters=filters, eager=True) - - # Update strategy IDs for all stale action plans (w/o saving) - for action_plan in stale_action_plans: - if action_plan.id not in self.stale_action_plans_map: - action_plan.strategy_id = synced_strategy.id - action_plan.state = objects.action_plan.State.CANCELLED - self.stale_action_plans_map[action_plan.id] = action_plan - else: - self.stale_action_plans_map[ - action_plan.id].strategy_id = synced_strategy.id - self.stale_action_plans_map[ - action_plan.id].state = ( - objects.action_plan.State.CANCELLED) - - def _find_stale_action_plans_due_to_audit(self): - for audit_id, synced_audit in self.stale_audits_map.items(): - filters = {"audit_id": audit_id} - stale_action_plans = objects.ActionPlan.list( - self.ctx, filters=filters, eager=True) - - # Update audit IDs for all stale action plans (w/o saving) - for action_plan in stale_action_plans: - if action_plan.id not in self.stale_action_plans_map: - action_plan.audit_id = synced_audit.id - action_plan.state = objects.action_plan.State.CANCELLED - self.stale_action_plans_map[action_plan.id] = action_plan - else: - self.stale_action_plans_map[ - action_plan.id].audit_id = synced_audit.id - self.stale_action_plans_map[ - action_plan.id].state = ( - objects.action_plan.State.CANCELLED) - - def _soft_delete_removed_goals(self): - removed_goals = [ - g for g in self.available_goals - if g.name not in self.discovered_map['goals']] - for removed_goal in removed_goals: - removed_goal.soft_delete() - filters = {"goal_id": removed_goal.id} - - invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) - for at in invalid_ats: - LOG.warning( - "Audit Template '%(audit_template)s' references a " - "goal that does not exist", audit_template=at.uuid) - - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - for audit in stale_audits: - LOG.warning( - "Audit '%(audit)s' references a " - "goal that does not exist", audit=audit.uuid) - if audit.id not in self.stale_audits_map: - audit.state = objects.audit.State.CANCELLED - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[ - audit.id].state = objects.audit.State.CANCELLED - - def _soft_delete_removed_strategies(self): - removed_strategies = [ - s for s in self.available_strategies - if s.name not in self.discovered_map['strategies']] - - for removed_strategy in removed_strategies: - removed_strategy.soft_delete() - filters = {"strategy_id": removed_strategy.id} - invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) - for at in invalid_ats: - LOG.info( - "Audit Template '%(audit_template)s' references a " - "strategy that does not exist", - audit_template=at.uuid) - # In this case we can reset the strategy ID to None - # so the audit template can still achieve the same goal - # but with a different strategy - if at.id not in self.stale_audit_templates_map: - at.strategy_id = None - self.stale_audit_templates_map[at.id] = at - else: - self.stale_audit_templates_map[at.id].strategy_id = None - - stale_audits = objects.Audit.list( - self.ctx, filters=filters, eager=True) - for audit in stale_audits: - LOG.warning( - "Audit '%(audit)s' references a " - "strategy that does not exist", audit=audit.uuid) - if audit.id not in self.stale_audits_map: - audit.state = objects.audit.State.CANCELLED - self.stale_audits_map[audit.id] = audit - else: - self.stale_audits_map[ - audit.id].state = objects.audit.State.CANCELLED - - stale_action_plans = objects.ActionPlan.list( - self.ctx, filters=filters, eager=True) - for action_plan in stale_action_plans: - LOG.warning( - "Action Plan '%(action_plan)s' references a " - "strategy that does not exist", - action_plan=action_plan.uuid) - if action_plan.id not in self.stale_action_plans_map: - action_plan.state = objects.action_plan.State.CANCELLED - self.stale_action_plans_map[action_plan.id] = action_plan - else: - self.stale_action_plans_map[ - action_plan.id].state = ( - objects.action_plan.State.CANCELLED) - - def _soft_delete_removed_scoringengines(self): - removed_se = [ - se for se in self.available_scoringengines - if se.name not in self.discovered_map['scoringengines']] - for se in removed_se: - LOG.info("Scoring Engine %s removed", se.name) - se.soft_delete() - - def _discover(self): - strategies_map = {} - goals_map = {} - scoringengines_map = {} - discovered_map = { - "goals": goals_map, - "strategies": strategies_map, - "scoringengines": scoringengines_map} - goal_loader = default.DefaultGoalLoader() - implemented_goals = goal_loader.list_available() - - strategy_loader = default.DefaultStrategyLoader() - implemented_strategies = strategy_loader.list_available() - - for goal_cls in implemented_goals.values(): - goals_map[goal_cls.get_name()] = GoalMapping( - name=goal_cls.get_name(), - display_name=goal_cls.get_translatable_display_name(), - efficacy_specification=tuple( - IndicatorSpec(**indicator.to_dict()) - for indicator in goal_cls.get_efficacy_specification( - ).get_indicators_specifications())) - - for strategy_cls in implemented_strategies.values(): - strategies_map[strategy_cls.get_name()] = StrategyMapping( - name=strategy_cls.get_name(), - goal_name=strategy_cls.get_goal_name(), - display_name=strategy_cls.get_translatable_display_name(), - parameters_spec=str(strategy_cls.get_schema())) - - for se in scoring_factory.get_scoring_engine_list(): - scoringengines_map[se.get_name()] = ScoringEngineMapping( - name=se.get_name(), - description=se.get_description(), - metainfo=se.get_metainfo()) - - return discovered_map - - def _soft_delete_stale_goals(self, goal_map, matching_goals): - """Soft delete the stale goals - - :param goal_map: discovered goal map - :type goal_map: :py:class:`~.GoalMapping` instance - :param matching_goals: list of DB goals matching the goal_map - :type matching_goals: list of :py:class:`~.objects.Goal` instances - :returns: A list of soft deleted DB goals (subset of matching goals) - :rtype: list of :py:class:`~.objects.Goal` instances - """ - goal_display_name = goal_map.display_name - goal_name = goal_map.name - goal_efficacy_spec = goal_map.efficacy_specification - - stale_goals = [] - for matching_goal in matching_goals: - if (matching_goal.efficacy_specification == goal_efficacy_spec and - matching_goal.display_name == goal_display_name): - LOG.info("Goal %s unchanged", goal_name) - else: - LOG.info("Goal %s modified", goal_name) - matching_goal.soft_delete() - stale_goals.append(matching_goal) - - return stale_goals - - def _soft_delete_stale_strategies(self, strategy_map, matching_strategies): - strategy_name = strategy_map.name - strategy_display_name = strategy_map.display_name - parameters_spec = strategy_map.parameters_spec - - stale_strategies = [] - for matching_strategy in matching_strategies: - if (matching_strategy.display_name == strategy_display_name and - matching_strategy.goal_id not in self.goal_mapping and - matching_strategy.parameters_spec == - ast.literal_eval(parameters_spec)): - LOG.info("Strategy %s unchanged", strategy_name) - else: - LOG.info("Strategy %s modified", strategy_name) - matching_strategy.soft_delete() - stale_strategies.append(matching_strategy) - - return stale_strategies - - def _soft_delete_stale_scoringengines( - self, scoringengine_map, matching_scoringengines): - se_name = scoringengine_map.name - se_description = scoringengine_map.description - se_metainfo = scoringengine_map.metainfo - - stale_scoringengines = [] - for matching_scoringengine in matching_scoringengines: - if (matching_scoringengine.description == se_description and - matching_scoringengine.metainfo == se_metainfo): - LOG.info("Scoring Engine %s unchanged", se_name) - else: - LOG.info("Scoring Engine %s modified", se_name) - matching_scoringengine.soft_delete() - stale_scoringengines.append(matching_scoringengine) - - return stale_scoringengines diff --git a/watcher/hacking/__init__.py b/watcher/hacking/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/hacking/checks.py b/watcher/hacking/checks.py deleted file mode 100644 index 6d4fab1..0000000 --- a/watcher/hacking/checks.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - - -def flake8ext(f): - """Decorator to indicate flake8 extension. - - This is borrowed from hacking.core.flake8ext(), but at now it is used - only for unit tests to know which are watcher flake8 extensions. - """ - f.name = __name__ - return f - - -# Guidelines for writing new hacking checks -# -# - Use only for Watcher specific tests. OpenStack general tests -# should be submitted to the common 'hacking' module. -# - Pick numbers in the range N3xx. Find the current test with -# the highest allocated number and then pick the next value. -# - Keep the test method code in the source file ordered based -# on the N3xx value. -# - List the new rule in the top level HACKING.rst file - -_all_log_levels = { - 'reserved': '_', # this should never be used with a log unless - # it is a variable used for a log message and - # a exception - 'error': '_LE', - 'info': '_LI', - 'warning': '_LW', - 'critical': '_LC', - 'exception': '_LE', -} -_all_hints = set(_all_log_levels.values()) - - -def _regex_for_level(level, hint): - return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % { - 'level': level, - 'wrong_hints': '|'.join(_all_hints - set([hint])), - } - - -log_warn = re.compile( - r"(.)*LOG\.(warn)\(\s*('|\"|_)") -unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b") -unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b") - - -@flake8ext -def use_jsonutils(logical_line, filename): - msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" - - # Skip list is currently empty. - json_check_skipped_patterns = [] - - for pattern in json_check_skipped_patterns: - if pattern in filename: - return - - if "json." in logical_line: - json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] - for f in json_funcs: - pos = logical_line.find('json.%s' % f) - if pos != -1: - yield (pos, msg % {'fun': f[:-1]}) - - -@flake8ext -def no_translate_debug_logs(logical_line, filename): - """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' - - As per our translation policy, - https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation - we shouldn't translate debug level logs. - - * This check assumes that 'LOG' is a logger. - N319 - """ - for hint in _all_hints: - if logical_line.startswith("LOG.debug(%s(" % hint): - yield(0, "N319 Don't translate debug level logs") - - -@flake8ext -def check_assert_called_once_with(logical_line, filename): - # Try to detect unintended calls of nonexistent mock methods like: - # assert_called_once - # assertCalledOnceWith - # assert_has_called - # called_once_with - if 'watcher/tests/' in filename: - if '.assert_called_once_with(' in logical_line: - return - uncased_line = logical_line.lower().replace('_', '') - - check_calls = ['.assertcalledonce', '.calledoncewith'] - if any(x for x in check_calls if x in uncased_line): - msg = ("N322: Possible use of no-op mock method. " - "please use assert_called_once_with.") - yield (0, msg) - - if '.asserthascalled' in uncased_line: - msg = ("N322: Possible use of no-op mock method. " - "please use assert_has_calls.") - yield (0, msg) - - -@flake8ext -def check_python3_xrange(logical_line): - if re.search(r"\bxrange\s*\(", logical_line): - yield(0, "N325: Do not use xrange. Use range, or six.moves.range for " - "large loops.") - - -@flake8ext -def check_no_basestring(logical_line): - if re.search(r"\bbasestring\b", logical_line): - msg = ("N326: basestring is not Python3-compatible, use " - "six.string_types instead.") - yield(0, msg) - - -@flake8ext -def check_python3_no_iteritems(logical_line): - if re.search(r".*\.iteritems\(\)", logical_line): - msg = ("N327: Use six.iteritems() instead of dict.iteritems().") - yield(0, msg) - - -@flake8ext -def check_asserttrue(logical_line, filename): - if 'watcher/tests/' in filename: - if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertTrue(observed) instead of " - "assertEqual(True, observed)") - yield (0, msg) - if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertTrue(observed) instead of " - "assertEqual(True, observed)") - yield (0, msg) - - -@flake8ext -def check_assertfalse(logical_line, filename): - if 'watcher/tests/' in filename: - if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertFalse(observed) instead of " - "assertEqual(False, observed)") - yield (0, msg) - if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertFalse(observed) instead of " - "assertEqual(False, observed)") - yield (0, msg) - - -@flake8ext -def check_assertempty(logical_line, filename): - if 'watcher/tests/' in filename: - msg = ("N330: Use assertEqual(*empty*, observed) instead of " - "assertEqual(observed, *empty*). *empty* contains " - "{}, [], (), set(), '', \"\"") - empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" - reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties - if re.search(reg, logical_line): - yield (0, msg) - - -@flake8ext -def check_assertisinstance(logical_line, filename): - if 'watcher/tests/' in filename: - if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", - logical_line): - msg = ("N331: Use assertIsInstance(observed, type) instead " - "of assertTrue(isinstance(observed, type))") - yield (0, msg) - - -@flake8ext -def check_assertequal_for_httpcode(logical_line, filename): - msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " - "instead of assertEqual(observed_http_code, expected_http_code)") - if 'watcher/tests/' in filename: - if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", - logical_line): - yield (0, msg) - - -@flake8ext -def check_log_warn_deprecated(logical_line, filename): - msg = "N333: Use LOG.warning due to compatibility with py3" - if log_warn.match(logical_line): - yield (0, msg) - - -@flake8ext -def check_oslo_i18n_wrapper(logical_line, filename, noqa): - """Check for watcher.i18n usage. - - N340(watcher/foo/bar.py): from watcher.i18n import _ - Okay(watcher/foo/bar.py): from watcher.i18n import _ # noqa - """ - - if noqa: - return - - split_line = logical_line.split() - modulename = os.path.normpath(filename).split('/')[0] - bad_i18n_module = '%s.i18n' % modulename - - if (len(split_line) > 1 and split_line[0] in ('import', 'from')): - if (split_line[1] == bad_i18n_module or - modulename != 'watcher' and split_line[1] in ('watcher.i18n', - 'watcher._i18n')): - msg = ("N340: %(found)s is found. Use %(module)s._i18n instead." - % {'found': split_line[1], 'module': modulename}) - yield (0, msg) - - -@flake8ext -def check_builtins_gettext(logical_line, tokens, filename, lines, noqa): - """Check usage of builtins gettext _(). - - N341(watcher/foo.py): _('foo') - Okay(watcher/i18n.py): _('foo') - Okay(watcher/_i18n.py): _('foo') - Okay(watcher/foo.py): _('foo') # noqa - """ - - if noqa: - return - - modulename = os.path.normpath(filename).split('/')[0] - - if '%s/tests' % modulename in filename: - return - - if os.path.basename(filename) in ('i18n.py', '_i18n.py'): - return - - token_values = [t[1] for t in tokens] - i18n_wrapper = '%s._i18n' % modulename - - if '_' in token_values: - i18n_import_line_found = False - for line in lines: - split_line = [elm.rstrip(',') for elm in line.split()] - if (len(split_line) > 1 and split_line[0] == 'from' and - split_line[1] == i18n_wrapper and - '_' in split_line): - i18n_import_line_found = True - break - if not i18n_import_line_found: - msg = ("N341: _ from python builtins module is used. " - "Use _ from %s instead." % i18n_wrapper) - yield (0, msg) - - -def factory(register): - register(use_jsonutils) - register(check_assert_called_once_with) - register(no_translate_debug_logs) - register(check_python3_xrange) - register(check_no_basestring) - register(check_python3_no_iteritems) - register(check_asserttrue) - register(check_assertfalse) - register(check_assertempty) - register(check_assertisinstance) - register(check_assertequal_for_httpcode) - register(check_log_warn_deprecated) - register(check_oslo_i18n_wrapper) - register(check_builtins_gettext) diff --git a/watcher/notifications/__init__.py b/watcher/notifications/__init__.py deleted file mode 100644 index cfed437..0000000 --- a/watcher/notifications/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Note(gibi): Importing publicly called functions so the caller code does not -# need to be changed after we moved these function inside the package -# Todo(gibi): remove these imports after legacy notifications using these are -# transformed to versioned notifications -from watcher.notifications import action # noqa -from watcher.notifications import action_plan # noqa -from watcher.notifications import audit # noqa -from watcher.notifications import exception # noqa -from watcher.notifications import goal # noqa -from watcher.notifications import service # noqa -from watcher.notifications import strategy # noqa diff --git a/watcher/notifications/action.py b/watcher/notifications/action.py deleted file mode 100644 index 449a012..0000000 --- a/watcher/notifications/action.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.common import context as wcontext -from watcher.common import exception -from watcher.notifications import action_plan as ap_notifications -from watcher.notifications import base as notificationbase -from watcher.notifications import exception as exception_notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class ActionPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('action', 'uuid'), - - 'action_type': ('action', 'action_type'), - 'input_parameters': ('action', 'input_parameters'), - 'state': ('action', 'state'), - 'parents': ('action', 'parents'), - - 'created_at': ('action', 'created_at'), - 'updated_at': ('action', 'updated_at'), - 'deleted_at': ('action', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'action_type': wfields.StringField(nullable=False), - 'input_parameters': wfields.DictField(nullable=False, default={}), - 'state': wfields.StringField(nullable=False), - 'parents': wfields.ListOfUUIDsField(nullable=False, default=[]), - 'action_plan_uuid': wfields.UUIDField(), - 'action_plan': wfields.ObjectField('TerseActionPlanPayload'), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, action, **kwargs): - super(ActionPayload, self).__init__(**kwargs) - self.populate_schema(action=action) - - -@base.WatcherObjectRegistry.register_notification -class ActionStateUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class ActionCreatePayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action, action_plan): - super(ActionCreatePayload, self).__init__( - action=action, - action_plan=action_plan) - - -@base.WatcherObjectRegistry.register_notification -class ActionUpdatePayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'state_update': wfields.ObjectField('ActionStateUpdatePayload'), - } - - def __init__(self, action, state_update, action_plan): - super(ActionUpdatePayload, self).__init__( - action=action, - state_update=state_update, - action_plan=action_plan) - - -@base.WatcherObjectRegistry.register_notification -class ActionExecutionPayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, action, action_plan, **kwargs): - super(ActionExecutionPayload, self).__init__( - action=action, - action_plan=action_plan, - **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class ActionDeletePayload(ActionPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action, action_plan): - super(ActionDeletePayload, self).__init__( - action=action, - action_plan=action_plan) - - -@notificationbase.notification_sample('action-execution-error.json') -@notificationbase.notification_sample('action-execution-end.json') -@notificationbase.notification_sample('action-execution-start.json') -@base.WatcherObjectRegistry.register_notification -class ActionExecutionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionExecutionPayload') - } - - -@notificationbase.notification_sample('action-create.json') -@base.WatcherObjectRegistry.register_notification -class ActionCreateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionCreatePayload') - } - - -@notificationbase.notification_sample('action-update.json') -@base.WatcherObjectRegistry.register_notification -class ActionUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionUpdatePayload') - } - - -@notificationbase.notification_sample('action-delete.json') -@base.WatcherObjectRegistry.register_notification -class ActionDeleteNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionDeletePayload') - } - - -def _get_action_plan_payload(action): - action_plan = None - strategy_uuid = None - audit = None - try: - action_plan = action.action_plan - audit = objects.Audit.get(wcontext.make_context(show_deleted=True), - action_plan.audit_id) - if audit.strategy_id: - strategy_uuid = objects.Strategy.get( - wcontext.make_context(show_deleted=True), - audit.strategy_id).uuid - except NotImplementedError: - raise exception.EagerlyLoadedActionRequired(action=action.uuid) - - action_plan_payload = ap_notifications.TerseActionPlanPayload( - action_plan=action_plan, - audit_uuid=audit.uuid, strategy_uuid=strategy_uuid) - - return action_plan_payload - - -def send_create(context, action, service='infra-optim', host=None): - """Emit an action.create notification.""" - action_plan_payload = _get_action_plan_payload(action) - - versioned_payload = ActionCreatePayload( - action=action, - action_plan=action_plan_payload, - ) - - notification = ActionCreateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action', - action=wfields.NotificationAction.CREATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_update(context, action, service='infra-optim', - host=None, old_state=None): - """Emit an action.update notification.""" - action_plan_payload = _get_action_plan_payload(action) - - state_update = ActionStateUpdatePayload( - old_state=old_state, - state=action.state if old_state else None) - - versioned_payload = ActionUpdatePayload( - action=action, - state_update=state_update, - action_plan=action_plan_payload, - ) - - notification = ActionUpdateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_delete(context, action, service='infra-optim', host=None): - """Emit an action.delete notification.""" - action_plan_payload = _get_action_plan_payload(action) - - versioned_payload = ActionDeletePayload( - action=action, - action_plan=action_plan_payload, - ) - - notification = ActionDeleteNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action', - action=wfields.NotificationAction.DELETE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_execution_notification(context, action, notification_action, phase, - priority=wfields.NotificationPriority.INFO, - service='infra-optim', host=None): - """Emit an action execution notification.""" - action_plan_payload = _get_action_plan_payload(action) - - fault = None - if phase == wfields.NotificationPhase.ERROR: - fault = exception_notifications.ExceptionPayload.from_exception() - - versioned_payload = ActionExecutionPayload( - action=action, - action_plan=action_plan_payload, - fault=fault, - ) - - notification = ActionExecutionNotification( - priority=priority, - event_type=notificationbase.EventType( - object='action', - action=notification_action, - phase=phase), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/action_plan.py b/watcher/notifications/action_plan.py deleted file mode 100644 index 97b714b..0000000 --- a/watcher/notifications/action_plan.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.common import context as wcontext -from watcher.common import exception -from watcher.notifications import audit as audit_notifications -from watcher.notifications import base as notificationbase -from watcher.notifications import exception as exception_notifications -from watcher.notifications import strategy as strategy_notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class TerseActionPlanPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('action_plan', 'uuid'), - - 'state': ('action_plan', 'state'), - 'global_efficacy': ('action_plan', 'global_efficacy'), - - 'created_at': ('action_plan', 'created_at'), - 'updated_at': ('action_plan', 'updated_at'), - 'deleted_at': ('action_plan', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'state': wfields.StringField(), - 'global_efficacy': wfields.FlexibleDictField(nullable=True), - 'audit_uuid': wfields.UUIDField(), - 'strategy_uuid': wfields.UUIDField(nullable=True), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, action_plan, audit=None, strategy=None, **kwargs): - super(TerseActionPlanPayload, self).__init__(audit=audit, - strategy=strategy, - **kwargs) - self.populate_schema(action_plan=action_plan) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanPayload(TerseActionPlanPayload): - SCHEMA = { - 'uuid': ('action_plan', 'uuid'), - - 'state': ('action_plan', 'state'), - 'global_efficacy': ('action_plan', 'global_efficacy'), - - 'created_at': ('action_plan', 'created_at'), - 'updated_at': ('action_plan', 'updated_at'), - 'deleted_at': ('action_plan', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'audit': wfields.ObjectField('TerseAuditPayload'), - 'strategy': wfields.ObjectField('StrategyPayload'), - } - - def __init__(self, action_plan, audit, strategy, **kwargs): - if not kwargs.get('audit_uuid'): - kwargs['audit_uuid'] = audit.uuid - - if strategy and not kwargs.get('strategy_uuid'): - kwargs['strategy_uuid'] = strategy.uuid - - super(ActionPlanPayload, self).__init__( - action_plan, audit=audit, strategy=strategy, **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanStateUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanCreatePayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action_plan, audit, strategy): - super(ActionPlanCreatePayload, self).__init__( - action_plan=action_plan, - audit=audit, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanUpdatePayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'state_update': wfields.ObjectField('ActionPlanStateUpdatePayload'), - } - - def __init__(self, action_plan, state_update, audit, strategy): - super(ActionPlanUpdatePayload, self).__init__( - action_plan=action_plan, - state_update=state_update, - audit=audit, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanActionPayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, action_plan, audit, strategy, **kwargs): - super(ActionPlanActionPayload, self).__init__( - action_plan=action_plan, - audit=audit, - strategy=strategy, - **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class ActionPlanDeletePayload(ActionPlanPayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = {} - - def __init__(self, action_plan, audit, strategy): - super(ActionPlanDeletePayload, self).__init__( - action_plan=action_plan, - audit=audit, - strategy=strategy) - - -@notificationbase.notification_sample('action_plan-execution-error.json') -@notificationbase.notification_sample('action_plan-execution-end.json') -@notificationbase.notification_sample('action_plan-execution-start.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanActionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanActionPayload') - } - - -@notificationbase.notification_sample('action_plan-create.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanCreateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanCreatePayload') - } - - -@notificationbase.notification_sample('action_plan-update.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanUpdatePayload') - } - - -@notificationbase.notification_sample('action_plan-delete.json') -@base.WatcherObjectRegistry.register_notification -class ActionPlanDeleteNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ActionPlanDeletePayload') - } - - -def _get_common_payload(action_plan): - audit = None - strategy = None - try: - audit = action_plan.audit - strategy = action_plan.strategy - except NotImplementedError: - raise exception.EagerlyLoadedActionPlanRequired( - action_plan=action_plan.uuid) - - goal = objects.Goal.get( - wcontext.make_context(show_deleted=True), audit.goal_id) - audit_payload = audit_notifications.TerseAuditPayload( - audit=audit, goal_uuid=goal.uuid) - - strategy_payload = strategy_notifications.StrategyPayload( - strategy=strategy) - - return audit_payload, strategy_payload - - -def send_create(context, action_plan, service='infra-optim', host=None): - """Emit an action_plan.create notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - versioned_payload = ActionPlanCreatePayload( - action_plan=action_plan, - audit=audit_payload, - strategy=strategy_payload, - ) - - notification = ActionPlanCreateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action_plan', - action=wfields.NotificationAction.CREATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_update(context, action_plan, service='infra-optim', - host=None, old_state=None): - """Emit an action_plan.update notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - state_update = ActionPlanStateUpdatePayload( - old_state=old_state, - state=action_plan.state if old_state else None) - - versioned_payload = ActionPlanUpdatePayload( - action_plan=action_plan, - state_update=state_update, - audit=audit_payload, - strategy=strategy_payload, - ) - - notification = ActionPlanUpdateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action_plan', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_delete(context, action_plan, service='infra-optim', host=None): - """Emit an action_plan.delete notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - versioned_payload = ActionPlanDeletePayload( - action_plan=action_plan, - audit=audit_payload, - strategy=strategy_payload, - ) - - notification = ActionPlanDeleteNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='action_plan', - action=wfields.NotificationAction.DELETE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_action_notification(context, action_plan, action, phase=None, - priority=wfields.NotificationPriority.INFO, - service='infra-optim', host=None): - """Emit an action_plan action notification.""" - audit_payload, strategy_payload = _get_common_payload(action_plan) - - fault = None - if phase == wfields.NotificationPhase.ERROR: - fault = exception_notifications.ExceptionPayload.from_exception() - - versioned_payload = ActionPlanActionPayload( - action_plan=action_plan, - audit=audit_payload, - strategy=strategy_payload, - fault=fault, - ) - - notification = ActionPlanActionNotification( - priority=priority, - event_type=notificationbase.EventType( - object='action_plan', - action=action, - phase=phase), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/audit.py b/watcher/notifications/audit.py deleted file mode 100644 index 83ec80d..0000000 --- a/watcher/notifications/audit.py +++ /dev/null @@ -1,368 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.common import exception -from watcher.notifications import base as notificationbase -from watcher.notifications import exception as exception_notifications -from watcher.notifications import goal as goal_notifications -from watcher.notifications import strategy as strategy_notifications -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class TerseAuditPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('audit', 'uuid'), - - 'audit_type': ('audit', 'audit_type'), - 'state': ('audit', 'state'), - 'parameters': ('audit', 'parameters'), - 'interval': ('audit', 'interval'), - 'scope': ('audit', 'scope'), - 'auto_trigger': ('audit', 'auto_trigger'), - 'next_run_time': ('audit', 'next_run_time'), - - 'created_at': ('audit', 'created_at'), - 'updated_at': ('audit', 'updated_at'), - 'deleted_at': ('audit', 'deleted_at'), - } - - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' boolean field, - # Added 'next_run_time' DateTime field, - # 'interval' type has been changed from Integer to String - VERSION = '1.1' - - fields = { - 'uuid': wfields.UUIDField(), - 'audit_type': wfields.StringField(), - 'state': wfields.StringField(), - 'parameters': wfields.FlexibleDictField(nullable=True), - 'interval': wfields.StringField(nullable=True), - 'scope': wfields.FlexibleListOfDictField(nullable=True), - 'goal_uuid': wfields.UUIDField(), - 'strategy_uuid': wfields.UUIDField(nullable=True), - 'auto_trigger': wfields.BooleanField(), - 'next_run_time': wfields.DateTimeField(nullable=True), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, audit, goal_uuid, strategy_uuid=None, **kwargs): - super(TerseAuditPayload, self).__init__( - goal_uuid=goal_uuid, strategy_uuid=strategy_uuid, **kwargs) - self.populate_schema(audit=audit) - - -@base.WatcherObjectRegistry.register_notification -class AuditPayload(TerseAuditPayload): - SCHEMA = { - 'uuid': ('audit', 'uuid'), - - 'audit_type': ('audit', 'audit_type'), - 'state': ('audit', 'state'), - 'parameters': ('audit', 'parameters'), - 'interval': ('audit', 'interval'), - 'scope': ('audit', 'scope'), - 'auto_trigger': ('audit', 'auto_trigger'), - 'next_run_time': ('audit', 'next_run_time'), - - 'created_at': ('audit', 'created_at'), - 'updated_at': ('audit', 'updated_at'), - 'deleted_at': ('audit', 'deleted_at'), - } - - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - - fields = { - 'goal': wfields.ObjectField('GoalPayload'), - 'strategy': wfields.ObjectField('StrategyPayload', nullable=True), - } - - def __init__(self, audit, goal, strategy=None, **kwargs): - if not kwargs.get('goal_uuid'): - kwargs['goal_uuid'] = goal.uuid - - if strategy and not kwargs.get('strategy_uuid'): - kwargs['strategy_uuid'] = strategy.uuid - - super(AuditPayload, self).__init__( - audit=audit, goal=goal, - strategy=strategy, **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class AuditStateUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class AuditCreatePayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = {} - - def __init__(self, audit, goal, strategy): - super(AuditCreatePayload, self).__init__( - audit=audit, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class AuditUpdatePayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = { - 'state_update': wfields.ObjectField('AuditStateUpdatePayload'), - } - - def __init__(self, audit, state_update, goal, strategy): - super(AuditUpdatePayload, self).__init__( - audit=audit, - state_update=state_update, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy) - - -@base.WatcherObjectRegistry.register_notification -class AuditActionPayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = { - 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), - } - - def __init__(self, audit, goal, strategy, **kwargs): - super(AuditActionPayload, self).__init__( - audit=audit, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy, - **kwargs) - - -@base.WatcherObjectRegistry.register_notification -class AuditDeletePayload(AuditPayload): - # Version 1.0: Initial version - # Version 1.1: Added 'auto_trigger' field, - # Added 'next_run_time' field - VERSION = '1.1' - fields = {} - - def __init__(self, audit, goal, strategy): - super(AuditDeletePayload, self).__init__( - audit=audit, - goal=goal, - goal_uuid=goal.uuid, - strategy=strategy) - - -@notificationbase.notification_sample('audit-strategy-error.json') -@notificationbase.notification_sample('audit-strategy-end.json') -@notificationbase.notification_sample('audit-strategy-start.json') -@base.WatcherObjectRegistry.register_notification -class AuditActionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditActionPayload') - } - - -@notificationbase.notification_sample('audit-create.json') -@base.WatcherObjectRegistry.register_notification -class AuditCreateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditCreatePayload') - } - - -@notificationbase.notification_sample('audit-update.json') -@base.WatcherObjectRegistry.register_notification -class AuditUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditUpdatePayload') - } - - -@notificationbase.notification_sample('audit-delete.json') -@base.WatcherObjectRegistry.register_notification -class AuditDeleteNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('AuditDeletePayload') - } - - -def _get_common_payload(audit): - goal = None - strategy = None - try: - goal = audit.goal - if audit.strategy_id: - strategy = audit.strategy - except NotImplementedError: - raise exception.EagerlyLoadedAuditRequired(audit=audit.uuid) - - goal_payload = goal_notifications.GoalPayload(goal=goal) - - strategy_payload = None - if strategy: - strategy_payload = strategy_notifications.StrategyPayload( - strategy=strategy) - - return goal_payload, strategy_payload - - -def send_create(context, audit, service='infra-optim', host=None): - """Emit an audit.create notification.""" - goal_payload, strategy_payload = _get_common_payload(audit) - - versioned_payload = AuditCreatePayload( - audit=audit, - goal=goal_payload, - strategy=strategy_payload, - ) - - notification = AuditCreateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='audit', - action=wfields.NotificationAction.CREATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_update(context, audit, service='infra-optim', - host=None, old_state=None): - """Emit an audit.update notification.""" - goal_payload, strategy_payload = _get_common_payload(audit) - - state_update = AuditStateUpdatePayload( - old_state=old_state, - state=audit.state if old_state else None) - - versioned_payload = AuditUpdatePayload( - audit=audit, - state_update=state_update, - goal=goal_payload, - strategy=strategy_payload, - ) - - notification = AuditUpdateNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='audit', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_delete(context, audit, service='infra-optim', host=None): - goal_payload, strategy_payload = _get_common_payload(audit) - - versioned_payload = AuditDeletePayload( - audit=audit, - goal=goal_payload, - strategy=strategy_payload, - ) - - notification = AuditDeleteNotification( - priority=wfields.NotificationPriority.INFO, - event_type=notificationbase.EventType( - object='audit', - action=wfields.NotificationAction.DELETE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) - - -def send_action_notification(context, audit, action, phase=None, - priority=wfields.NotificationPriority.INFO, - service='infra-optim', host=None): - """Emit an audit action notification.""" - goal_payload, strategy_payload = _get_common_payload(audit) - - fault = None - if phase == wfields.NotificationPhase.ERROR: - fault = exception_notifications.ExceptionPayload.from_exception() - - versioned_payload = AuditActionPayload( - audit=audit, - goal=goal_payload, - strategy=strategy_payload, - fault=fault, - ) - - notification = AuditActionNotification( - priority=priority, - event_type=notificationbase.EventType( - object='audit', - action=action, - phase=phase), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/base.py b/watcher/notifications/base.py deleted file mode 100644 index d1c2d0e..0000000 --- a/watcher/notifications/base.py +++ /dev/null @@ -1,216 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from watcher.common import exception -from watcher.common import rpc -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -# Definition of notification levels in increasing order of severity -NOTIFY_LEVELS = { - wfields.NotificationPriority.DEBUG: 0, - wfields.NotificationPriority.INFO: 1, - wfields.NotificationPriority.WARNING: 2, - wfields.NotificationPriority.ERROR: 3, - wfields.NotificationPriority.CRITICAL: 4 -} - - -@base.WatcherObjectRegistry.register_if(False) -class NotificationObject(base.WatcherObject): - """Base class for every notification related versioned object.""" - - # Version 1.0: Initial version - VERSION = '1.0' - - def __init__(self, **kwargs): - super(NotificationObject, self).__init__(**kwargs) - # The notification objects are created on the fly when watcher emits - # the notification. This causes that every object shows every field as - # changed. We don't want to send this meaningless information so we - # reset the object after creation. - self.obj_reset_changes(recursive=False) - - def save(self, context): - raise exception.UnsupportedError() - - def obj_load_attr(self, attrname): - raise exception.UnsupportedError() - - -@base.WatcherObjectRegistry.register_notification -class EventType(NotificationObject): - - # Version 1.0: Initial version - # Version 1.1: Added STRATEGY action in NotificationAction enum - # Version 1.2: Added PLANNER action in NotificationAction enum - # Version 1.3: Added EXECUTION action in NotificationAction enum - VERSION = '1.3' - - fields = { - 'object': wfields.StringField(), - 'action': wfields.NotificationActionField(), - 'phase': wfields.NotificationPhaseField(nullable=True), - } - - def to_notification_event_type_field(self): - """Serialize the object to the wire format.""" - s = '%s.%s' % (self.object, self.action) - if self.obj_attr_is_set('phase'): - s += '.%s' % self.phase - return s - - -@base.WatcherObjectRegistry.register_if(False) -class NotificationPayloadBase(NotificationObject): - """Base class for the payload of versioned notifications.""" - # SCHEMA defines how to populate the payload fields. It is a dictionary - # where every key value pair has the following format: - # : (, - # ) - # The is the name where the data will be stored in the - # payload object, this field has to be defined as a field of the payload. - # The shall refer to name of the parameter passed as - # kwarg to the payload's populate_schema() call and this object will be - # used as the source of the data. The shall be - # a valid field of the passed argument. - # The SCHEMA needs to be applied with the populate_schema() call before the - # notification can be emitted. - # The value of the payload. field will be set by the - # . field. The - # will not be part of the payload object internal or - # external representation. - # Payload fields that are not set by the SCHEMA can be filled in the same - # way as in any versioned object. - SCHEMA = {} - # Version 1.0: Initial version - VERSION = '1.0' - - def __init__(self, **kwargs): - super(NotificationPayloadBase, self).__init__(**kwargs) - self.populated = not self.SCHEMA - - def populate_schema(self, **kwargs): - """Populate the object based on the SCHEMA and the source objects - - :param kwargs: A dict contains the source object at the key defined in - the SCHEMA - """ - for key, (obj, field) in self.SCHEMA.items(): - source = kwargs[obj] - if source.obj_attr_is_set(field): - setattr(self, key, getattr(source, field)) - self.populated = True - - # the schema population will create changed fields but we don't need - # this information in the notification - self.obj_reset_changes(recursive=False) - - -@base.WatcherObjectRegistry.register_notification -class NotificationPublisher(NotificationObject): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'host': wfields.StringField(nullable=False), - 'binary': wfields.StringField(nullable=False), - } - - -@base.WatcherObjectRegistry.register_if(False) -class NotificationBase(NotificationObject): - """Base class for versioned notifications. - - Every subclass shall define a 'payload' field. - """ - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'priority': wfields.NotificationPriorityField(), - 'event_type': wfields.ObjectField('EventType'), - 'publisher': wfields.ObjectField('NotificationPublisher'), - } - - def save(self, context): - raise exception.UnsupportedError() - - def obj_load_attr(self, attrname): - raise exception.UnsupportedError() - - def _should_notify(self): - """Determine whether the notification should be sent. - - A notification is sent when the level of the notification is - greater than or equal to the level specified in the - configuration, in the increasing order of DEBUG, INFO, WARNING, - ERROR, CRITICAL. - :return: True if notification should be sent, False otherwise. - """ - if not CONF.notification_level: - return False - return (NOTIFY_LEVELS[self.priority] >= - NOTIFY_LEVELS[CONF.notification_level]) - - def _emit(self, context, event_type, publisher_id, payload): - notifier = rpc.get_notifier(publisher_id) - notify = getattr(notifier, self.priority) - LOG.debug("Emitting notification `%s`", event_type) - notify(context, event_type=event_type, payload=payload) - - def emit(self, context): - """Send the notification.""" - if not self._should_notify(): - return - if not self.payload.populated: - raise exception.NotificationPayloadError( - class_name=self.__class__.__name__) - # Note(gibi): notification payload will be a newly populated object - # therefore every field of it will look changed so this does not carry - # any extra information so we drop this from the payload. - self.payload.obj_reset_changes(recursive=False) - - self._emit( - context, - event_type=self.event_type.to_notification_event_type_field(), - publisher_id='%s:%s' % (self.publisher.binary, - self.publisher.host), - payload=self.payload.obj_to_primitive()) - - -def notification_sample(sample): - """Provide a notification sample of the decorated notification. - - Class decorator to attach the notification sample information - to the notification object for documentation generation purposes. - - :param sample: the path of the sample json file relative to the - doc/notification_samples/ directory in the watcher - repository root. - """ - def wrap(cls): - if not getattr(cls, 'samples', None): - cls.samples = [sample] - else: - cls.samples.append(sample) - return cls - return wrap diff --git a/watcher/notifications/exception.py b/watcher/notifications/exception.py deleted file mode 100644 index 68fc1eb..0000000 --- a/watcher/notifications/exception.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import sys - -import six - -from watcher.notifications import base as notificationbase -from watcher.objects import base as base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register_notification -class ExceptionPayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'module_name': wfields.StringField(), - 'function_name': wfields.StringField(), - 'exception': wfields.StringField(), - 'exception_message': wfields.StringField() - } - - @classmethod - def from_exception(cls, fault=None): - fault = fault or sys.exc_info()[1] - trace = inspect.trace()[-1] - # TODO(gibi): apply strutils.mask_password on exception_message and - # consider emitting the exception_message only if the safe flag is - # true in the exception like in the REST API - return cls( - function_name=trace[3], - module_name=inspect.getmodule(trace[0]).__name__, - exception=fault.__class__.__name__, - exception_message=six.text_type(fault)) - - -@notificationbase.notification_sample('infra-optim-exception.json') -@base.WatcherObjectRegistry.register_notification -class ExceptionNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'payload': wfields.ObjectField('ExceptionPayload') - } diff --git a/watcher/notifications/goal.py b/watcher/notifications/goal.py deleted file mode 100644 index 8c76bad..0000000 --- a/watcher/notifications/goal.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register_notification -class GoalPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('goal', 'uuid'), - 'name': ('goal', 'name'), - 'display_name': ('goal', 'display_name'), - 'efficacy_specification': ('goal', 'efficacy_specification'), - - 'created_at': ('goal', 'created_at'), - 'updated_at': ('goal', 'updated_at'), - 'deleted_at': ('goal', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'efficacy_specification': wfields.FlexibleListOfDictField(), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, goal, **kwargs): - super(GoalPayload, self).__init__(**kwargs) - self.populate_schema(goal=goal) diff --git a/watcher/notifications/service.py b/watcher/notifications/service.py deleted file mode 100644 index 1d2ab8a..0000000 --- a/watcher/notifications/service.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo_config import cfg - -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields -from watcher.objects import service as o_service - -CONF = cfg.CONF - - -@base.WatcherObjectRegistry.register_notification -class ServicePayload(notificationbase.NotificationPayloadBase): - - SCHEMA = { - 'sevice_host': ('failed_service', 'host'), - 'name': ('failed_service', 'name'), - 'last_seen_up': ('failed_service', 'last_seen_up'), - } - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'sevice_host': wfields.StringField(), - 'name': wfields.StringField(), - 'last_seen_up': wfields.DateTimeField(nullable=True), - } - - def __init__(self, failed_service, status_update, **kwargs): - super(ServicePayload, self).__init__( - failed_service=failed_service, - status_update=status_update, **kwargs) - self.populate_schema(failed_service=failed_service) - - -@base.WatcherObjectRegistry.register_notification -class ServiceStatusUpdatePayload(notificationbase.NotificationPayloadBase): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'old_state': wfields.StringField(nullable=True), - 'state': wfields.StringField(nullable=True), - } - - -@base.WatcherObjectRegistry.register_notification -class ServiceUpdatePayload(ServicePayload): - # Version 1.0: Initial version - VERSION = '1.0' - fields = { - 'status_update': wfields.ObjectField('ServiceStatusUpdatePayload'), - } - - def __init__(self, failed_service, status_update): - super(ServiceUpdatePayload, self).__init__( - failed_service=failed_service, - status_update=status_update) - - -@notificationbase.notification_sample('service-update.json') -@base.WatcherObjectRegistry.register_notification -class ServiceUpdateNotification(notificationbase.NotificationBase): - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'payload': wfields.ObjectField('ServiceUpdatePayload') - } - - -def send_service_update(context, failed_service, state, - service='infra-optim', - host=None): - """Emit an service failed notification.""" - if state == o_service.ServiceStatus.FAILED: - priority = wfields.NotificationPriority.WARNING - status_update = ServiceStatusUpdatePayload( - old_state=o_service.ServiceStatus.ACTIVE, - state=o_service.ServiceStatus.FAILED) - else: - priority = wfields.NotificationPriority.INFO - status_update = ServiceStatusUpdatePayload( - old_state=o_service.ServiceStatus.FAILED, - state=o_service.ServiceStatus.ACTIVE) - versioned_payload = ServiceUpdatePayload( - failed_service=failed_service, - status_update=status_update - ) - - notification = ServiceUpdateNotification( - priority=priority, - event_type=notificationbase.EventType( - object='service', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host=host or CONF.host, - binary=service), - payload=versioned_payload) - - notification.emit(context) diff --git a/watcher/notifications/strategy.py b/watcher/notifications/strategy.py deleted file mode 100644 index f7da109..0000000 --- a/watcher/notifications/strategy.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register_notification -class StrategyPayload(notificationbase.NotificationPayloadBase): - SCHEMA = { - 'uuid': ('strategy', 'uuid'), - 'name': ('strategy', 'name'), - 'display_name': ('strategy', 'display_name'), - 'parameters_spec': ('strategy', 'parameters_spec'), - - 'created_at': ('strategy', 'created_at'), - 'updated_at': ('strategy', 'updated_at'), - 'deleted_at': ('strategy', 'deleted_at'), - } - - # Version 1.0: Initial version - VERSION = '1.0' - - fields = { - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'parameters_spec': wfields.FlexibleDictField(nullable=True), - - 'created_at': wfields.DateTimeField(nullable=True), - 'updated_at': wfields.DateTimeField(nullable=True), - 'deleted_at': wfields.DateTimeField(nullable=True), - } - - def __init__(self, strategy, **kwargs): - super(StrategyPayload, self).__init__(**kwargs) - self.populate_schema(strategy=strategy) diff --git a/watcher/objects/__init__.py b/watcher/objects/__init__.py deleted file mode 100644 index 11c8a86..0000000 --- a/watcher/objects/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(comstud): You may scratch your head as you see code that imports -# this module and then accesses attributes for objects such as Node, -# etc, yet you do not see these attributes in here. Never fear, there is -# a little bit of magic. When objects are registered, an attribute is set -# on this module automatically, pointing to the newest/latest version of -# the object. - - -def register_all(): - # NOTE(danms): You must make sure your object gets imported in this - # function in order for it to be registered by services that may - # need to receive it via RPC. - __import__('watcher.objects.goal') - __import__('watcher.objects.strategy') - __import__('watcher.objects.audit_template') - __import__('watcher.objects.audit') - __import__('watcher.objects.action_plan') - __import__('watcher.objects.action') - __import__('watcher.objects.efficacy_indicator') - __import__('watcher.objects.scoring_engine') - __import__('watcher.objects.service') diff --git a/watcher/objects/action.py b/watcher/objects/action.py deleted file mode 100644 index 95f923a..0000000 --- a/watcher/objects/action.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -class State(object): - PENDING = 'PENDING' - ONGOING = 'ONGOING' - FAILED = 'FAILED' - SUCCEEDED = 'SUCCEEDED' - DELETED = 'DELETED' - CANCELLED = 'CANCELLED' - CANCELLING = 'CANCELLING' - - -@base.WatcherObjectRegistry.register -class Action(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'action_plan' object field - # Version 2.0: Removed 'next' object field, Added 'parents' object field - VERSION = '2.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'action_plan_id': wfields.IntegerField(), - 'action_type': wfields.StringField(nullable=True), - 'input_parameters': wfields.DictField(nullable=True), - 'state': wfields.StringField(nullable=True), - 'parents': wfields.ListOfStringsField(nullable=True), - - 'action_plan': wfields.ObjectField('ActionPlan', nullable=True), - } - object_fields = { - 'action_plan': (objects.ActionPlan, 'action_plan_id'), - } - - @base.remotable_classmethod - def get(cls, context, action_id, eager=False): - """Find a action based on its id or uuid and return a Action object. - - :param action_id: the id *or* uuid of a action. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - if utils.is_int_like(action_id): - return cls.get_by_id(context, action_id, eager=eager) - elif utils.is_uuid_like(action_id): - return cls.get_by_uuid(context, action_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=action_id) - - @base.remotable_classmethod - def get_by_id(cls, context, action_id, eager=False): - """Find a action based on its integer id and return a Action object. - - :param action_id: the id of a action. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - db_action = cls.dbapi.get_action_by_id(context, action_id, eager=eager) - action = cls._from_db_object(cls(context), db_action, eager=eager) - return action - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a action based on uuid and return a :class:`Action` object. - - :param uuid: the uuid of a action. - :param context: Security context - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - db_action = cls.dbapi.get_action_by_uuid(context, uuid, eager=eager) - action = cls._from_db_object(cls(context), db_action, eager=eager) - return action - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of Action objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`Action` object. - """ - db_actions = cls.dbapi.get_action_list(context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_actions] - - @base.remotable - def create(self): - """Create an :class:`Action` record in the DB. - - :returns: An :class:`Action` object. - """ - values = self.obj_get_changes() - db_action = self.dbapi.create_action(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_action, eager=True) - - notifications.action.send_create(self.obj_context, self) - - def destroy(self): - """Delete the Action from the DB""" - self.dbapi.destroy_action(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this Action. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_action(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - notifications.action.send_update(self.obj_context, self) - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this Action. - - Loads a action with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded action column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the Audit from the DB""" - self.state = State.DELETED - self.save() - db_obj = self.dbapi.soft_delete_action(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - notifications.action.send_delete(self.obj_context, self) diff --git a/watcher/objects/action_plan.py b/watcher/objects/action_plan.py deleted file mode 100644 index 4618ec9..0000000 --- a/watcher/objects/action_plan.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Action Plan ` is a flow of -:ref:`Actions ` that should be executed in order to satisfy -a given :ref:`Goal `. - -An :ref:`Action Plan ` is generated by Watcher when an -:ref:`Audit ` is successful which implies that the -:ref:`Strategy ` -which was used has found a :ref:`Solution ` to achieve the -:ref:`Goal ` of this :ref:`Audit `. - -In the default implementation of Watcher, an -:ref:`Action Plan ` -is only composed of successive :ref:`Actions ` -(i.e., a Workflow of :ref:`Actions ` belonging to a unique -branch). - -However, Watcher provides abstract interfaces for many of its components, -allowing other implementations to generate and handle more complex -:ref:`Action Plan(s) ` -composed of two types of Action Item(s): - -- simple :ref:`Actions `: atomic tasks, which means it - can not be split into smaller tasks or commands from an OpenStack point of - view. -- composite Actions: which are composed of several simple - :ref:`Actions ` - ordered in sequential and/or parallel flows. - -An :ref:`Action Plan ` may be described using -standard workflow model description formats such as -`Business Process Model and Notation 2.0 (BPMN 2.0) -`_ or `Unified Modeling Language (UML) -`_. - -An :ref:`Action Plan ` has a life-cycle and its current -state may be one of the following: - -- **RECOMMENDED** : the :ref:`Action Plan ` is waiting - for a validation from the :ref:`Administrator ` -- **ONGOING** : the :ref:`Action Plan ` is currently - being processed by the :ref:`Watcher Applier ` -- **SUCCEEDED** : the :ref:`Action Plan ` has been - executed successfully (i.e. all :ref:`Actions ` that it - contains have been executed successfully) -- **FAILED** : an error occurred while executing the - :ref:`Action Plan ` -- **DELETED** : the :ref:`Action Plan ` is still - stored in the :ref:`Watcher database ` but is - not returned any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Action Plan ` was in - **PENDING** or **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUPERSEDED** : the :ref:`Action Plan ` was in - **RECOMMENDED** state and was superseded by the - :ref:`Administrator ` -""" -import datetime - -from watcher.common import exception -from watcher.common import utils -from watcher import conf -from watcher.db import api as db_api -from watcher import notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - -CONF = conf.CONF - - -class State(object): - RECOMMENDED = 'RECOMMENDED' - PENDING = 'PENDING' - ONGOING = 'ONGOING' - FAILED = 'FAILED' - SUCCEEDED = 'SUCCEEDED' - DELETED = 'DELETED' - CANCELLED = 'CANCELLED' - SUPERSEDED = 'SUPERSEDED' - CANCELLING = 'CANCELLING' - - -@base.WatcherObjectRegistry.register -class ActionPlan(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'audit' and 'strategy' object field - # Version 1.2: audit_id is not nullable anymore - # Version 2.0: Removed 'first_action_id' object field - VERSION = '2.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'audit_id': wfields.IntegerField(), - 'strategy_id': wfields.IntegerField(), - 'state': wfields.StringField(nullable=True), - 'global_efficacy': wfields.FlexibleDictField(nullable=True), - - 'audit': wfields.ObjectField('Audit', nullable=True), - 'strategy': wfields.ObjectField('Strategy', nullable=True), - } - - object_fields = { - 'audit': (objects.Audit, 'audit_id'), - 'strategy': (objects.Strategy, 'strategy_id'), - } - - # Proxified field so we can keep the previous value after an update - _state = None - _old_state = None - - # NOTE(v-francoise): The way oslo.versionedobjects works is by using a - # __new__ that will automatically create the attributes referenced in - # fields. These attributes are properties that raise an exception if no - # value has been assigned, which means that they store the actual field - # value in an "_obj_%(field)s" attribute. So because we want to proxify a - # value that is already proxified, we have to do what you see below. - @property - def _obj_state(self): - return self._state - - @property - def _obj_old_state(self): - return self._old_state - - @property - def old_state(self): - return self._old_state - - @_obj_old_state.setter - def _obj_old_state(self, value): - self._old_state = value - - @_obj_state.setter - def _obj_state(self, value): - if self._old_state is None and self._state is None: - self._state = value - else: - self._old_state, self._state = self._state, value - - @base.remotable_classmethod - def get(cls, context, action_plan_id, eager=False): - """Find a action_plan based on its id or uuid and return a Action object. - - :param action_plan_id: the id *or* uuid of a action_plan. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Action` object. - """ - if utils.is_int_like(action_plan_id): - return cls.get_by_id(context, action_plan_id, eager=eager) - elif utils.is_uuid_like(action_plan_id): - return cls.get_by_uuid(context, action_plan_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=action_plan_id) - - @base.remotable_classmethod - def get_by_id(cls, context, action_plan_id, eager=False): - """Find a action_plan based on its integer id and return a ActionPlan object. - - :param action_plan_id: the id of a action_plan. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`ActionPlan` object. - """ - db_action_plan = cls.dbapi.get_action_plan_by_id( - context, action_plan_id, eager=eager) - action_plan = cls._from_db_object( - cls(context), db_action_plan, eager=eager) - return action_plan - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a action_plan based on uuid and return a :class:`ActionPlan` object. - - :param uuid: the uuid of a action_plan. - :param context: Security context - :param eager: Load object fields if True (Default: False) - :returns: a :class:`ActionPlan` object. - """ - db_action_plan = cls.dbapi.get_action_plan_by_uuid( - context, uuid, eager=eager) - action_plan = cls._from_db_object( - cls(context), db_action_plan, eager=eager) - return action_plan - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of ActionPlan objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`ActionPlan` object. - """ - db_action_plans = cls.dbapi.get_action_plan_list(context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_action_plans] - - @base.remotable - def create(self): - """Create an :class:`ActionPlan` record in the DB. - - :returns: An :class:`ActionPlan` object. - """ - values = self.obj_get_changes() - db_action_plan = self.dbapi.create_action_plan(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_action_plan, eager=True) - - def _notify(): - notifications.action_plan.send_create(self._context, self) - - _notify() - - @base.remotable - def destroy(self): - """Delete the action plan from the DB""" - related_efficacy_indicators = objects.EfficacyIndicator.list( - context=self._context, - filters={"action_plan_uuid": self.uuid}) - - # Cascade soft_delete of related efficacy indicators - for related_efficacy_indicator in related_efficacy_indicators: - related_efficacy_indicator.destroy() - - self.dbapi.destroy_action_plan(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this Action plan. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_action_plan(self.uuid, updates) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.action_plan.send_update( - self._context, self, old_state=self.old_state) - - _notify() - - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this Action plan. - - Loads a action_plan with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded action_plan column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the Action plan from the DB""" - related_actions = objects.Action.list( - context=self._context, - filters={"action_plan_uuid": self.uuid}, - eager=True) - - # Cascade soft_delete of related actions - for related_action in related_actions: - related_action.soft_delete() - - related_efficacy_indicators = objects.EfficacyIndicator.list( - context=self._context, - filters={"action_plan_uuid": self.uuid}) - - # Cascade soft_delete of related efficacy indicators - for related_efficacy_indicator in related_efficacy_indicators: - related_efficacy_indicator.soft_delete() - - self.state = State.DELETED - self.save() - db_obj = self.dbapi.soft_delete_action_plan(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.action_plan.send_delete(self._context, self) - - _notify() - - -class StateManager(object): - def check_expired(self, context): - action_plan_expiry = ( - CONF.watcher_decision_engine.action_plan_expiry) - date_created = datetime.datetime.utcnow() - datetime.timedelta( - hours=action_plan_expiry) - filters = {'state__eq': State.RECOMMENDED, - 'created_at__lt': date_created} - action_plans = objects.ActionPlan.list( - context, filters=filters, eager=True) - for action_plan in action_plans: - action_plan.state = State.SUPERSEDED - action_plan.save() diff --git a/watcher/objects/audit.py b/watcher/objects/audit.py deleted file mode 100644 index d0a1c10..0000000 --- a/watcher/objects/audit.py +++ /dev/null @@ -1,328 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -In the Watcher system, an :ref:`Audit ` is a request for -optimizing a :ref:`Cluster `. - -The optimization is done in order to satisfy one :ref:`Goal ` -on a given :ref:`Cluster `. - -For each :ref:`Audit `, the Watcher system generates an -:ref:`Action Plan `. - -An :ref:`Audit ` has a life-cycle and its current state may -be one of the following: - -- **PENDING** : a request for an :ref:`Audit ` has been - submitted (either manually by the - :ref:`Administrator ` or automatically via some - event handling mechanism) and is in the queue for being processed by the - :ref:`Watcher Decision Engine ` -- **ONGOING** : the :ref:`Audit ` is currently being - processed by the - :ref:`Watcher Decision Engine ` -- **SUCCEEDED** : the :ref:`Audit ` has been executed - successfully (note that it may not necessarily produce a - :ref:`Solution `). -- **FAILED** : an error occurred while executing the - :ref:`Audit ` -- **DELETED** : the :ref:`Audit ` is still stored in the - :ref:`Watcher database ` but is not returned - any more through the Watcher APIs. -- **CANCELLED** : the :ref:`Audit ` was in **PENDING** or - **ONGOING** state and was cancelled by the - :ref:`Administrator ` -- **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** - state and was suspended by the - :ref:`Administrator ` -""" - -import enum - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import notifications -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -class State(object): - ONGOING = 'ONGOING' - SUCCEEDED = 'SUCCEEDED' - FAILED = 'FAILED' - CANCELLED = 'CANCELLED' - DELETED = 'DELETED' - PENDING = 'PENDING' - SUSPENDED = 'SUSPENDED' - - -class AuditType(enum.Enum): - ONESHOT = 'ONESHOT' - CONTINUOUS = 'CONTINUOUS' - - -@base.WatcherObjectRegistry.register -class Audit(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'goal' and 'strategy' object field - # Version 1.2: Added 'auto_trigger' boolean field - # Version 1.3: Added 'next_run_time' DateTime field, - # 'interval' type has been changed from Integer to String - VERSION = '1.3' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'audit_type': wfields.StringField(), - 'state': wfields.StringField(), - 'parameters': wfields.FlexibleDictField(nullable=True), - 'interval': wfields.StringField(nullable=True), - 'scope': wfields.FlexibleListOfDictField(nullable=True), - 'goal_id': wfields.IntegerField(), - 'strategy_id': wfields.IntegerField(nullable=True), - 'auto_trigger': wfields.BooleanField(), - 'next_run_time': wfields.DateTimeField(nullable=True, - tzinfo_aware=False), - - 'goal': wfields.ObjectField('Goal', nullable=True), - 'strategy': wfields.ObjectField('Strategy', nullable=True), - } - - object_fields = { - 'goal': (objects.Goal, 'goal_id'), - 'strategy': (objects.Strategy, 'strategy_id'), - } - - # Proxified field so we can keep the previous value after an update - _state = None - _old_state = None - - # NOTE(v-francoise): The way oslo.versionedobjects works is by using a - # __new__ that will automatically create the attributes referenced in - # fields. These attributes are properties that raise an exception if no - # value has been assigned, which means that they store the actual field - # value in an "_obj_%(field)s" attribute. So because we want to proxify a - # value that is already proxified, we have to do what you see below. - @property - def _obj_state(self): - return self._state - - @property - def _obj_old_state(self): - return self._old_state - - @property - def old_state(self): - return self._old_state - - @_obj_old_state.setter - def _obj_old_state(self, value): - self._old_state = value - - @_obj_state.setter - def _obj_state(self, value): - if self._old_state is None and self._state is None: - self._state = value - else: - self._old_state, self._state = self._state, value - - @base.remotable_classmethod - def get(cls, context, audit_id, eager=False): - """Find a audit based on its id or uuid and return a Audit object. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param audit_id: the id *or* uuid of a audit. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Audit` object. - """ - if utils.is_int_like(audit_id): - return cls.get_by_id(context, audit_id, eager=eager) - elif utils.is_uuid_like(audit_id): - return cls.get_by_uuid(context, audit_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=audit_id) - - @base.remotable_classmethod - def get_by_id(cls, context, audit_id, eager=False): - """Find a audit based on its integer id and return a Audit object. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param audit_id: the id of a audit. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Audit` object. - """ - db_audit = cls.dbapi.get_audit_by_id(context, audit_id, eager=eager) - audit = cls._from_db_object(cls(context), db_audit, eager=eager) - return audit - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a audit based on uuid and return a :class:`Audit` object. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param uuid: the uuid of a audit. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`Audit` object. - """ - - db_audit = cls.dbapi.get_audit_by_uuid(context, uuid, eager=eager) - audit = cls._from_db_object(cls(context), db_audit, eager=eager) - return audit - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of Audit objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`Audit` object. - - """ - db_audits = cls.dbapi.get_audit_list(context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_audits] - - @base.remotable - def create(self): - """Create an :class:`Audit` record in the DB. - - :returns: An :class:`Audit` object. - """ - values = self.obj_get_changes() - db_audit = self.dbapi.create_audit(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_audit, eager=True) - - def _notify(): - notifications.audit.send_create(self._context, self) - - _notify() - - @base.remotable - def destroy(self): - """Delete the Audit from the DB.""" - self.dbapi.destroy_audit(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this Audit. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_audit(self.uuid, updates) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.audit.send_update( - self._context, self, old_state=self.old_state) - - _notify() - - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this Audit. - - Loads a audit with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded audit column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the Audit from the DB.""" - self.state = State.DELETED - self.save() - db_obj = self.dbapi.soft_delete_audit(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) - - def _notify(): - notifications.audit.send_delete(self._context, self) - - _notify() - - -class AuditStateTransitionManager(object): - - TRANSITIONS = { - State.PENDING: [State.ONGOING, State.CANCELLED], - State.ONGOING: [State.FAILED, State.SUCCEEDED, - State.CANCELLED, State.SUSPENDED], - State.FAILED: [State.DELETED], - State.SUCCEEDED: [State.DELETED], - State.CANCELLED: [State.DELETED], - State.SUSPENDED: [State.ONGOING, State.DELETED], - } - - INACTIVE_STATES = (State.CANCELLED, State.DELETED, - State.FAILED, State.SUSPENDED) - - def check_transition(self, initial, new): - return new in self.TRANSITIONS.get(initial, []) - - def is_inactive(self, audit): - return audit.state in self.INACTIVE_STATES diff --git a/watcher/objects/audit_template.py b/watcher/objects/audit_template.py deleted file mode 100644 index 11a0c2d..0000000 --- a/watcher/objects/audit_template.py +++ /dev/null @@ -1,241 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An :ref:`Audit ` may be launched several times with the same -settings (:ref:`Goal `, thresholds, ...). Therefore it makes -sense to save those settings in some sort of Audit preset object, which is -known as an :ref:`Audit Template `. - -An :ref:`Audit Template ` contains at least the -:ref:`Goal ` of the :ref:`Audit `. - -It may also contain some error handling settings indicating whether: - -- :ref:`Watcher Applier ` stops the - entire operation -- :ref:`Watcher Applier ` performs a rollback - -and how many retries should be attempted before failure occurs (also the latter -can be complex: for example the scenario in which there are many first-time -failures on ultimately successful :ref:`Actions `). - -Moreover, an :ref:`Audit Template ` may contain some -settings related to the level of automation for the -:ref:`Action Plan ` that will be generated by the -:ref:`Audit `. -A flag will indicate whether the :ref:`Action Plan ` -will be launched automatically or will need a manual confirmation from the -:ref:`Administrator `. - -Last but not least, an :ref:`Audit Template ` may -contain a list of extra parameters related to the -:ref:`Strategy ` configuration. These parameters can be -provided as a list of key-value pairs. -""" - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class AuditTemplate(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added 'goal' and 'strategy' object field - VERSION = '1.1' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'description': wfields.StringField(nullable=True), - 'scope': wfields.FlexibleListOfDictField(nullable=True), - 'goal_id': wfields.IntegerField(), - 'strategy_id': wfields.IntegerField(nullable=True), - - 'goal': wfields.ObjectField('Goal', nullable=True), - 'strategy': wfields.ObjectField('Strategy', nullable=True), - } - - object_fields = { - 'goal': (objects.Goal, 'goal_id'), - 'strategy': (objects.Strategy, 'strategy_id'), - } - - @base.remotable_classmethod - def get(cls, context, audit_template_id, eager=False): - """Find an audit template based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param audit_template_id: the id *or* uuid of a audit_template. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - if utils.is_int_like(audit_template_id): - return cls.get_by_id(context, audit_template_id, eager=eager) - elif utils.is_uuid_like(audit_template_id): - return cls.get_by_uuid(context, audit_template_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=audit_template_id) - - @base.remotable_classmethod - def get_by_id(cls, context, audit_template_id, eager=False): - """Find an audit template based on its integer id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param audit_template_id: the id of a audit_template. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - db_audit_template = cls.dbapi.get_audit_template_by_id( - context, audit_template_id, eager=eager) - audit_template = cls._from_db_object( - cls(context), db_audit_template, eager=eager) - return audit_template - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find an audit template based on uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param uuid: the uuid of a audit_template. - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - db_audit_template = cls.dbapi.get_audit_template_by_uuid( - context, uuid, eager=eager) - audit_template = cls._from_db_object( - cls(context), db_audit_template, eager=eager) - return audit_template - - @base.remotable_classmethod - def get_by_name(cls, context, name, eager=False): - """Find an audit template based on name - - :param name: the logical name of a audit_template. - :param context: Security context - :param eager: Load object fields if True (Default: False) - :returns: a :class:`AuditTemplate` object. - """ - db_audit_template = cls.dbapi.get_audit_template_by_name( - context, name, eager=eager) - audit_template = cls._from_db_object( - cls(context), db_audit_template, eager=eager) - return audit_template - - @base.remotable_classmethod - def list(cls, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of :class:`AuditTemplate` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: AuditTemplate(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`AuditTemplate` object. - """ - db_audit_templates = cls.dbapi.get_audit_template_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir, - eager=eager) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_audit_templates] - - @base.remotable - def create(self): - """Create a :class:`AuditTemplate` record in the DB - - :returns: An :class:`AuditTemplate` object. - """ - values = self.obj_get_changes() - db_audit_template = self.dbapi.create_audit_template(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_audit_template, eager=True) - - def destroy(self): - """Delete the :class:`AuditTemplate` from the DB""" - self.dbapi.destroy_audit_template(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this :class:`AuditTemplate`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_audit_template(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - @base.remotable - def refresh(self, eager=False): - """Loads updates for this :class:`AuditTemplate`. - - Loads a audit_template with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded audit_template column by column, if there are any updates. - :param eager: Load object fields if True (Default: False) - """ - current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the :class:`AuditTemplate` from the DB""" - db_obj = self.dbapi.soft_delete_audit_template(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/base.py b/watcher/objects/base.py deleted file mode 100644 index 8b93418..0000000 --- a/watcher/objects/base.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Watcher common internal object model""" - -from oslo_utils import versionutils -from oslo_versionedobjects import base as ovo_base -from oslo_versionedobjects import fields as ovo_fields - -from watcher import objects - -remotable_classmethod = ovo_base.remotable_classmethod -remotable = ovo_base.remotable - - -def get_attrname(name): - """Return the mangled name of the attribute's underlying storage.""" - # FIXME(danms): This is just until we use o.vo's class properties - # and object base. - return '_obj_' + name - - -class WatcherObjectRegistry(ovo_base.VersionedObjectRegistry): - notification_classes = [] - - def registration_hook(self, cls, index): - # NOTE(danms): This is called when an object is registered, - # and is responsible for maintaining watcher.objects.$OBJECT - # as the highest-versioned implementation of a given object. - version = versionutils.convert_version_to_tuple(cls.VERSION) - if not hasattr(objects, cls.obj_name()): - setattr(objects, cls.obj_name(), cls) - else: - cur_version = versionutils.convert_version_to_tuple( - getattr(objects, cls.obj_name()).VERSION) - if version >= cur_version: - setattr(objects, cls.obj_name(), cls) - - @classmethod - def register_notification(cls, notification_cls): - """Register a class as notification. - - Use only to register concrete notification or payload classes, - do not register base classes intended for inheritance only. - """ - cls.register_if(False)(notification_cls) - cls.notification_classes.append(notification_cls) - return notification_cls - - @classmethod - def register_notification_objects(cls): - """Register previously decorated notification as normal ovos. - - This is not intended for production use but only for testing and - document generation purposes. - """ - for notification_cls in cls.notification_classes: - cls.register(notification_cls) - - -class WatcherObject(ovo_base.VersionedObject): - """Base class and object factory. - - This forms the base of all objects that can be remoted or instantiated - via RPC. Simply defining a class that inherits from this base class - will make it remotely instantiatable. Objects should implement the - necessary "get" classmethod routines as well as "save" object methods - as appropriate. - """ - - OBJ_SERIAL_NAMESPACE = 'watcher_object' - OBJ_PROJECT_NAMESPACE = 'watcher' - - def as_dict(self): - return { - k: getattr(self, k) for k in self.fields - if self.obj_attr_is_set(k)} - - -class WatcherObjectDictCompat(ovo_base.VersionedObjectDictCompat): - pass - - -class WatcherComparableObject(ovo_base.ComparableVersionedObject): - pass - - -class WatcherPersistentObject(object): - """Mixin class for Persistent objects. - - This adds the fields that we use in common for all persistent objects. - """ - fields = { - 'created_at': ovo_fields.DateTimeField(nullable=True), - 'updated_at': ovo_fields.DateTimeField(nullable=True), - 'deleted_at': ovo_fields.DateTimeField(nullable=True), - } - - # Mapping between the object field name and a 2-tuple pair composed of - # its object type (e.g. objects.RelatedObject) and the name of the - # model field related ID (or UUID) foreign key field. - # e.g.: - # - # fields = { - # # [...] - # 'related_object_id': fields.IntegerField(), # Foreign key - # 'related_object': wfields.ObjectField('RelatedObject'), - # } - # {'related_object': (objects.RelatedObject, 'related_object_id')} - object_fields = {} - - def obj_refresh(self, loaded_object): - """Applies updates for objects that inherit from base.WatcherObject. - - Checks for updated attributes in an object. Updates are applied from - the loaded object column by column in comparison with the current - object. - """ - fields = (field for field in self.fields - if field not in self.object_fields) - for field in fields: - if (self.obj_attr_is_set(field) and - self[field] != loaded_object[field]): - self[field] = loaded_object[field] - - @staticmethod - def _from_db_object(obj, db_object, eager=False): - """Converts a database entity to a formal object. - - :param obj: An object of the class. - :param db_object: A DB model of the object - :param eager: Enable the loading of object fields (Default: False) - :return: The object of the class with the database entity added - - """ - obj_class = type(obj) - object_fields = obj_class.object_fields - - for field in obj.fields: - if field not in object_fields: - obj[field] = db_object[field] - - if eager: - # Load object fields - context = obj._context - loadable_fields = ( - (obj_field, related_obj_cls, rel_id) - for obj_field, (related_obj_cls, rel_id) - in object_fields.items() - if obj[rel_id] - ) - for obj_field, related_obj_cls, rel_id in loadable_fields: - if getattr(db_object, obj_field, None) and obj[rel_id]: - # The object field data was eagerly loaded alongside - # the main object data - obj[obj_field] = related_obj_cls._from_db_object( - related_obj_cls(context), db_object[obj_field]) - else: - # The object field data wasn't loaded yet - obj[obj_field] = related_obj_cls.get(context, obj[rel_id]) - - obj.obj_reset_changes() - return obj - - -class WatcherObjectSerializer(ovo_base.VersionedObjectSerializer): - # Base class to use for object hydration - OBJ_BASE_CLASS = WatcherObject diff --git a/watcher/objects/efficacy_indicator.py b/watcher/objects/efficacy_indicator.py deleted file mode 100644 index 13027e1..0000000 --- a/watcher/objects/efficacy_indicator.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class EfficacyIndicator(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'action_plan_id': wfields.IntegerField(), - 'name': wfields.StringField(), - 'description': wfields.StringField(nullable=True), - 'unit': wfields.StringField(nullable=True), - 'value': wfields.NumericField(), - } - - @base.remotable_classmethod - def get(cls, context, efficacy_indicator_id): - """Find an efficacy indicator object given its ID or UUID - - :param efficacy_indicator_id: the ID or UUID of an efficacy indicator. - :returns: a :class:`EfficacyIndicator` object. - """ - if utils.is_int_like(efficacy_indicator_id): - return cls.get_by_id(context, efficacy_indicator_id) - elif utils.is_uuid_like(efficacy_indicator_id): - return cls.get_by_uuid(context, efficacy_indicator_id) - else: - raise exception.InvalidIdentity(identity=efficacy_indicator_id) - - @base.remotable_classmethod - def get_by_id(cls, context, efficacy_indicator_id): - """Find an efficacy indicator given its integer ID - - :param efficacy_indicator_id: the id of an efficacy indicator. - :returns: a :class:`EfficacyIndicator` object. - """ - db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_id( - context, efficacy_indicator_id) - efficacy_indicator = EfficacyIndicator._from_db_object( - cls(context), db_efficacy_indicator) - return efficacy_indicator - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find an efficacy indicator given its UUID - - :param uuid: the uuid of an efficacy indicator. - :param context: Security context - :returns: a :class:`EfficacyIndicator` object. - """ - db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_uuid( - context, uuid) - efficacy_indicator = EfficacyIndicator._from_db_object( - cls(context), db_efficacy_indicator) - return efficacy_indicator - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None): - """Return a list of EfficacyIndicator objects. - - :param context: Security context. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: Filters to apply. Defaults to None. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`EfficacyIndicator` object. - - """ - db_efficacy_indicators = cls.dbapi.get_efficacy_indicator_list( - context, - limit=limit, - marker=marker, - filters=filters, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj) - for obj in db_efficacy_indicators] - - @base.remotable - def create(self, context=None): - """Create a EfficacyIndicator record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - - """ - values = self.obj_get_changes() - db_efficacy_indicator = self.dbapi.create_efficacy_indicator(values) - self._from_db_object(self, db_efficacy_indicator) - - def destroy(self, context=None): - """Delete the EfficacyIndicator from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - """ - self.dbapi.destroy_efficacy_indicator(self.uuid) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this EfficacyIndicator. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_efficacy_indicator(self.uuid, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None): - """Loads updates for this EfficacyIndicator. - - Loads an efficacy indicator with the same uuid from the database and - checks for updated attributes. Updates are applied to the loaded - efficacy indicator column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: EfficacyIndicator(context) - """ - current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self, context=None): - """Soft Delete the efficacy indicator from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Audit(context) - """ - self.dbapi.soft_delete_efficacy_indicator(self.uuid) diff --git a/watcher/objects/fields.py b/watcher/objects/fields.py deleted file mode 100644 index d0df854..0000000 --- a/watcher/objects/fields.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for objects""" - -import ast -import six - -from oslo_serialization import jsonutils -from oslo_versionedobjects import fields - - -BaseEnumField = fields.BaseEnumField -BooleanField = fields.BooleanField -DateTimeField = fields.DateTimeField -Enum = fields.Enum -FloatField = fields.FloatField -IntegerField = fields.IntegerField -ListOfStringsField = fields.ListOfStringsField -NonNegativeFloatField = fields.NonNegativeFloatField -NonNegativeIntegerField = fields.NonNegativeIntegerField -ObjectField = fields.ObjectField -StringField = fields.StringField -UnspecifiedDefault = fields.UnspecifiedDefault -UUIDField = fields.UUIDField - - -class Numeric(fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if value is None: - return value - f_value = float(value) - return f_value if not f_value.is_integer() else value - - -class NumericField(fields.AutoTypedField): - AUTO_TYPE = Numeric() - - -class DictField(fields.AutoTypedField): - AUTO_TYPE = fields.Dict(fields.FieldType()) - - -class ListOfUUIDsField(fields.AutoTypedField): - AUTO_TYPE = fields.List(fields.UUID()) - - -class FlexibleDict(fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if isinstance(value, six.string_types): - value = ast.literal_eval(value) - return dict(value) - - -class FlexibleDictField(fields.AutoTypedField): - AUTO_TYPE = FlexibleDict() - - # TODO(lucasagomes): In our code we've always translated None to {}, - # this method makes this field to work like this. But probably won't - # be accepted as-is in the oslo_versionedobjects library - def _null(self, obj, attr): - if self.nullable: - return {} - super(FlexibleDictField, self)._null(obj, attr) - - -class FlexibleListOfDict(fields.FieldType): - @staticmethod - def coerce(obj, attr, value): - if isinstance(value, six.string_types): - value = ast.literal_eval(value) - return list(value) - - -class FlexibleListOfDictField(fields.AutoTypedField): - AUTO_TYPE = FlexibleListOfDict() - - # TODO(lucasagomes): In our code we've always translated None to {}, - # this method makes this field to work like this. But probably won't - # be accepted as-is in the oslo_versionedobjects library - def _null(self, obj, attr): - if self.nullable: - return [] - super(FlexibleListOfDictField, self)._null(obj, attr) - - -class Json(fields.FieldType): - def coerce(self, obj, attr, value): - if isinstance(value, six.string_types): - loaded = jsonutils.loads(value) - return loaded - return value - - def from_primitive(self, obj, attr, value): - return self.coerce(obj, attr, value) - - def to_primitive(self, obj, attr, value): - return jsonutils.dumps(value) - - -class JsonField(fields.AutoTypedField): - AUTO_TYPE = Json() - -# ### Notification fields ### # - - -class BaseWatcherEnum(Enum): - - ALL = () - - def __init__(self, **kwargs): - super(BaseWatcherEnum, self).__init__(valid_values=self.__class__.ALL) - - -class NotificationPriority(BaseWatcherEnum): - DEBUG = 'debug' - INFO = 'info' - WARNING = 'warning' - ERROR = 'error' - CRITICAL = 'critical' - - ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) - - -class NotificationPhase(BaseWatcherEnum): - START = 'start' - END = 'end' - ERROR = 'error' - - ALL = (START, END, ERROR) - - -class NotificationAction(BaseWatcherEnum): - CREATE = 'create' - UPDATE = 'update' - EXCEPTION = 'exception' - DELETE = 'delete' - - STRATEGY = 'strategy' - PLANNER = 'planner' - EXECUTION = 'execution' - - ALL = (CREATE, UPDATE, EXCEPTION, DELETE, STRATEGY, PLANNER, EXECUTION) - - -class NotificationPriorityField(BaseEnumField): - AUTO_TYPE = NotificationPriority() - - -class NotificationPhaseField(BaseEnumField): - AUTO_TYPE = NotificationPhase() - - -class NotificationActionField(BaseEnumField): - AUTO_TYPE = NotificationAction() diff --git a/watcher/objects/goal.py b/watcher/objects/goal.py deleted file mode 100644 index e947ff6..0000000 --- a/watcher/objects/goal.py +++ /dev/null @@ -1,176 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class Goal(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'efficacy_specification': wfields.FlexibleListOfDictField(), - } - - @base.remotable_classmethod - def get(cls, context, goal_id): - """Find a goal based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param goal_id: the id *or* uuid of a goal. - :returns: a :class:`Goal` object. - """ - if utils.is_int_like(goal_id): - return cls.get_by_id(context, goal_id) - elif utils.is_uuid_like(goal_id): - return cls.get_by_uuid(context, goal_id) - else: - raise exception.InvalidIdentity(identity=goal_id) - - @base.remotable_classmethod - def get_by_id(cls, context, goal_id): - """Find a goal based on its integer id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param goal_id: the id *or* uuid of a goal. - :returns: a :class:`Goal` object. - """ - db_goal = cls.dbapi.get_goal_by_id(context, goal_id) - goal = cls._from_db_object(cls(context), db_goal) - return goal - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - """Find a goal based on uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param uuid: the uuid of a goal. - :returns: a :class:`Goal` object. - """ - db_goal = cls.dbapi.get_goal_by_uuid(context, uuid) - goal = cls._from_db_object(cls(context), db_goal) - return goal - - @base.remotable_classmethod - def get_by_name(cls, context, name): - """Find a goal based on name - - :param name: the name of a goal. - :param context: Security context - :returns: a :class:`Goal` object. - """ - db_goal = cls.dbapi.get_goal_by_name(context, name) - goal = cls._from_db_object(cls(context), db_goal) - return goal - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None): - """Return a list of :class:`Goal` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Goal(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`Goal` object. - """ - db_goals = cls.dbapi.get_goal_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj) for obj in db_goals] - - @base.remotable - def create(self): - """Create a :class:`Goal` record in the DB""" - values = self.obj_get_changes() - db_goal = self.dbapi.create_goal(values) - self._from_db_object(self, db_goal) - - def destroy(self): - """Delete the :class:`Goal` from the DB""" - self.dbapi.destroy_goal(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this :class:`Goal`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_goal(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - @base.remotable - def refresh(self): - """Loads updates for this :class:`Goal`. - - Loads a goal with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded goal column by column, if there are any updates. - """ - current = self.get_by_uuid(self._context, uuid=self.uuid) - self.obj_refresh(current) - - @base.remotable - def soft_delete(self): - """Soft Delete the :class:`Goal` from the DB""" - db_obj = self.dbapi.soft_delete_goal(self.uuid) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/scoring_engine.py b/watcher/objects/scoring_engine.py deleted file mode 100644 index 0c95e72..0000000 --- a/watcher/objects/scoring_engine.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2016 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A :ref:`Scoring Engine ` is an instance of a data -model, to which a learning data was applied. - -Because there might be multiple algorithms used to build a particular data -model (and therefore a scoring engine), the usage of scoring engine might -vary. A metainfo field is supposed to contain any information which might -be needed by the user of a given scoring engine. -""" - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class ScoringEngine(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'description': wfields.StringField(nullable=True), - 'metainfo': wfields.StringField(nullable=True), - } - - @base.remotable_classmethod - def get(cls, context, scoring_engine_id): - """Find a scoring engine based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_name: the name of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - if utils.is_int_like(scoring_engine_id): - return cls.get_by_id(context, scoring_engine_id) - elif utils.is_uuid_like(scoring_engine_id): - return cls.get_by_uuid(context, scoring_engine_id) - else: - raise exception.InvalidIdentity(identity=scoring_engine_id) - - @base.remotable_classmethod - def get_by_id(cls, context, scoring_engine_id): - """Find a scoring engine based on its id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_id: the id of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - db_scoring_engine = cls.dbapi.get_scoring_engine_by_id( - context, - scoring_engine_id) - scoring_engine = ScoringEngine._from_db_object(cls(context), - db_scoring_engine) - return scoring_engine - - @base.remotable_classmethod - def get_by_uuid(cls, context, scoring_engine_uuid): - """Find a scoring engine based on its uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_uuid: the uuid of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - db_scoring_engine = cls.dbapi.get_scoring_engine_by_uuid( - context, - scoring_engine_uuid) - scoring_engine = ScoringEngine._from_db_object(cls(context), - db_scoring_engine) - return scoring_engine - - @base.remotable_classmethod - def get_by_name(cls, context, scoring_engine_name): - """Find a scoring engine based on its name - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param scoring_engine_name: the name of a scoring_engine. - :returns: a :class:`ScoringEngine` object. - """ - db_scoring_engine = cls.dbapi.get_scoring_engine_by_name( - context, - scoring_engine_name) - scoring_engine = ScoringEngine._from_db_object(cls(context), - db_scoring_engine) - return scoring_engine - - @base.remotable_classmethod - def list(cls, context, filters=None, limit=None, marker=None, - sort_key=None, sort_dir=None): - """Return a list of :class:`ScoringEngine` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: ScoringEngine(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`ScoringEngine` objects. - """ - db_scoring_engines = cls.dbapi.get_scoring_engine_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - return [cls._from_db_object(cls(context), obj) - for obj in db_scoring_engines] - - @base.remotable - def create(self): - """Create a :class:`ScoringEngine` record in the DB.""" - values = self.obj_get_changes() - db_scoring_engine = self.dbapi.create_scoring_engine(values) - self._from_db_object(self, db_scoring_engine) - - def destroy(self): - """Delete the :class:`ScoringEngine` from the DB""" - self.dbapi.destroy_scoring_engine(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self): - """Save updates to this :class:`ScoringEngine`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_scoring_engine(self.uuid, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - def refresh(self): - """Loads updates for this :class:`ScoringEngine`. - - Loads a scoring_engine with the same id from the database and - checks for updated attributes. Updates are applied from - the loaded scoring_engine column by column, if there are any updates. - """ - current = self.get_by_id(self._context, scoring_engine_id=self.id) - self.obj_refresh(current) - - def soft_delete(self): - """Soft Delete the :class:`ScoringEngine` from the DB""" - db_obj = self.dbapi.soft_delete_scoring_engine(self.id) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/service.py b/watcher/objects/service.py deleted file mode 100644 index 0b261b8..0000000 --- a/watcher/objects/service.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher.objects import base -from watcher.objects import fields as wfields - - -class ServiceStatus(object): - ACTIVE = 'ACTIVE' - FAILED = 'FAILED' - - -@base.WatcherObjectRegistry.register -class Service(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'name': wfields.StringField(), - 'host': wfields.StringField(), - 'last_seen_up': wfields.DateTimeField( - tzinfo_aware=False, nullable=True), - } - - @base.remotable_classmethod - def get(cls, context, service_id): - """Find a service based on its id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Service(context) - :param service_id: the id of a service. - :returns: a :class:`Service` object. - """ - if utils.is_int_like(service_id): - db_service = cls.dbapi.get_service_by_id(context, service_id) - service = Service._from_db_object(cls(context), db_service) - return service - else: - raise exception.InvalidIdentity(identity=service_id) - - @base.remotable_classmethod - def get_by_name(cls, context, name): - """Find a service based on name - - :param name: the name of a service. - :param context: Security context - :returns: a :class:`Service` object. - """ - - db_service = cls.dbapi.get_service_by_name(context, name) - service = cls._from_db_object(cls(context), db_service) - return service - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None): - """Return a list of :class:`Service` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Service(context) - :param filters: dict mapping the filter key to a value. - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc". - :returns: a list of :class:`Service` object. - """ - db_services = cls.dbapi.get_service_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj) for obj in db_services] - - @base.remotable - def create(self): - """Create a :class:`Service` record in the DB.""" - values = self.obj_get_changes() - db_service = self.dbapi.create_service(values) - self._from_db_object(self, db_service) - - @base.remotable - def save(self): - """Save updates to this :class:`Service`. - - Updates will be made column by column based on the result - of self.what_changed(). - """ - updates = self.obj_get_changes() - db_obj = self.dbapi.update_service(self.id, updates) - obj = self._from_db_object(self, db_obj, eager=False) - self.obj_refresh(obj) - self.obj_reset_changes() - - def refresh(self): - """Loads updates for this :class:`Service`. - - Loads a service with the same id from the database and - checks for updated attributes. Updates are applied from - the loaded service column by column, if there are any updates. - """ - current = self.get(self._context, service_id=self.id) - for field in self.fields: - if (hasattr(self, base.get_attrname(field)) and - self[field] != current[field]): - self[field] = current[field] - - def soft_delete(self): - """Soft Delete the :class:`Service` from the DB.""" - db_obj = self.dbapi.soft_delete_service(self.id) - obj = self._from_db_object( - self.__class__(self._context), db_obj, eager=False) - self.obj_refresh(obj) diff --git a/watcher/objects/strategy.py b/watcher/objects/strategy.py deleted file mode 100644 index 584c8ff..0000000 --- a/watcher/objects/strategy.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import exception -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.objects import base -from watcher.objects import fields as wfields - - -@base.WatcherObjectRegistry.register -class Strategy(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - - # Version 1.0: Initial version - # Version 1.1: Added Goal object field - VERSION = '1.1' - - dbapi = db_api.get_instance() - - fields = { - 'id': wfields.IntegerField(), - 'uuid': wfields.UUIDField(), - 'name': wfields.StringField(), - 'display_name': wfields.StringField(), - 'goal_id': wfields.IntegerField(), - 'parameters_spec': wfields.FlexibleDictField(nullable=True), - 'goal': wfields.ObjectField('Goal', nullable=True), - } - - object_fields = {'goal': (objects.Goal, 'goal_id')} - - @base.remotable_classmethod - def get(cls, context, strategy_id, eager=False): - """Find a strategy based on its id or uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param strategy_id: the id *or* uuid of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - if utils.is_int_like(strategy_id): - return cls.get_by_id(context, strategy_id, eager=eager) - elif utils.is_uuid_like(strategy_id): - return cls.get_by_uuid(context, strategy_id, eager=eager) - else: - raise exception.InvalidIdentity(identity=strategy_id) - - @base.remotable_classmethod - def get_by_id(cls, context, strategy_id, eager=False): - """Find a strategy based on its integer id - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param strategy_id: the id of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - db_strategy = cls.dbapi.get_strategy_by_id( - context, strategy_id, eager=eager) - strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) - return strategy - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, eager=False): - """Find a strategy based on uuid - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param uuid: the uuid of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - - db_strategy = cls.dbapi.get_strategy_by_uuid( - context, uuid, eager=eager) - strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) - return strategy - - @base.remotable_classmethod - def get_by_name(cls, context, name, eager=False): - """Find a strategy based on name - - :param context: Security context - :param name: the name of a strategy. - :param eager: Load object fields if True (Default: False) - :returns: A :class:`Strategy` object. - """ - - db_strategy = cls.dbapi.get_strategy_by_name( - context, name, eager=eager) - strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) - return strategy - - @base.remotable_classmethod - def list(cls, context, limit=None, marker=None, filters=None, - sort_key=None, sort_dir=None, eager=False): - """Return a list of :class:`Strategy` objects. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param limit: maximum number of resources to return in a single result. - :param marker: pagination marker for large data sets. - :param filters: dict mapping the filter key to a value. - :param sort_key: column to sort results by. - :param sort_dir: direction to sort. "asc" or "desc`". - :param eager: Load object fields if True (Default: False) - :returns: a list of :class:`Strategy` object. - """ - db_strategies = cls.dbapi.get_strategy_list( - context, - filters=filters, - limit=limit, - marker=marker, - sort_key=sort_key, - sort_dir=sort_dir) - - return [cls._from_db_object(cls(context), obj, eager=eager) - for obj in db_strategies] - - @base.remotable - def create(self, context=None): - """Create a :class:`Strategy` record in the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :returns: A :class:`Strategy` object. - """ - - values = self.obj_get_changes() - db_strategy = self.dbapi.create_strategy(values) - # Note(v-francoise): Always load eagerly upon creation so we can send - # notifications containing information about the related relationships - self._from_db_object(self, db_strategy, eager=True) - - def destroy(self, context=None): - """Delete the :class:`Strategy` from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - """ - self.dbapi.destroy_strategy(self.id) - self.obj_reset_changes() - - @base.remotable - def save(self, context=None): - """Save updates to this :class:`Strategy`. - - Updates will be made column by column based on the result - of self.what_changed(). - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - """ - updates = self.obj_get_changes() - self.dbapi.update_strategy(self.id, updates) - - self.obj_reset_changes() - - @base.remotable - def refresh(self, context=None, eager=False): - """Loads updates for this :class:`Strategy`. - - Loads a strategy with the same uuid from the database and - checks for updated attributes. Updates are applied from - the loaded strategy column by column, if there are any updates. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - :param eager: Load object fields if True (Default: False) - """ - current = self.__class__.get_by_id( - self._context, strategy_id=self.id, eager=eager) - for field in self.fields: - if (hasattr(self, base.get_attrname(field)) and - self[field] != current[field]): - self[field] = current[field] - - @base.remotable - def soft_delete(self, context=None): - """Soft Delete the :class:`Strategy` from the DB. - - :param context: Security context. NOTE: This should only - be used internally by the indirection_api. - Unfortunately, RPC requires context as the first - argument, even though we don't use it. - A context should be set when instantiating the - object, e.g.: Strategy(context) - """ - self.dbapi.soft_delete_strategy(self.id) diff --git a/watcher/objects/utils.py b/watcher/objects/utils.py deleted file mode 100644 index 1146832..0000000 --- a/watcher/objects/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility methods for objects""" - -import ast -import datetime - -import iso8601 -import netaddr -from oslo_utils import timeutils -import six - -from watcher._i18n import _ - - -def datetime_or_none(value, tzinfo_aware=False): - """Validate a datetime or None value.""" - if value is None: - return None - if isinstance(value, six.string_types): - # NOTE(danms): Being tolerant of isotime strings here will help us - # during our objects transition - value = timeutils.parse_isotime(value) - elif not isinstance(value, datetime.datetime): - raise ValueError( - _("A datetime.datetime is required here. Got %s"), value) - - if value.utcoffset() is None and tzinfo_aware: - # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, - # but are returned without a timezone attached. - # As a transitional aid, assume a tz-naive object is in UTC. - value = value.replace(tzinfo=iso8601.iso8601.Utc()) - elif not tzinfo_aware: - value = value.replace(tzinfo=None) - - return value - - -def datetime_or_str_or_none(val, tzinfo_aware=False): - if isinstance(val, six.string_types): - return timeutils.parse_isotime(val) - return datetime_or_none(val, tzinfo_aware=tzinfo_aware) - - -def numeric_or_none(val): - """Attempt to parse an integer value, or None.""" - if val is None: - return val - else: - f_val = float(val) - return f_val if not f_val.is_integer() else val - - -def int_or_none(val): - """Attempt to parse an integer value, or None.""" - if val is None: - return val - else: - return int(val) - - -def str_or_none(val): - """Attempt to stringify a value to unicode, or None.""" - if val is None: - return val - else: - return six.text_type(val) - - -def dict_or_none(val): - """Attempt to dictify a value, or None.""" - if val is None: - return {} - elif isinstance(val, six.string_types): - return dict(ast.literal_eval(val)) - else: - try: - return dict(val) - except ValueError: - return {} - - -def list_or_none(val): - """Attempt to listify a value, or None.""" - if val is None: - return [] - elif isinstance(val, six.string_types): - return list(ast.literal_eval(val)) - else: - try: - return list(val) - except ValueError: - return [] - - -def ip_or_none(version): - """Return a version-specific IP address validator.""" - def validator(val, version=version): - if val is None: - return val - else: - return netaddr.IPAddress(val, version=version) - return validator - - -def nested_object_or_none(objclass): - def validator(val, objclass=objclass): - if val is None or isinstance(val, objclass): - return val - raise ValueError(_("An object of class %s is required here") - % objclass) - return validator - - -def dt_serializer(name): - """Return a datetime serializer for a named attribute.""" - def serializer(self, name=name): - if getattr(self, name) is not None: - return datetime.datetime.isoformat(getattr(self, name)) - else: - return None - return serializer - - -def dt_deserializer(val): - """A deserializer method for datetime attributes.""" - if val is None: - return None - else: - return timeutils.parse_isotime(val) - - -def obj_serializer(name): - def serializer(self, name=name): - if getattr(self, name) is not None: - return getattr(self, name).obj_to_primitive() - else: - return None - return serializer diff --git a/watcher/tests/__init__.py b/watcher/tests/__init__.py deleted file mode 100644 index cdc336c..0000000 --- a/watcher/tests/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher import objects - -# NOTE(comstud): Make sure we have all of the objects loaded. We do this -# at module import time, because we may be using mock decorators in our -# tests that run at import time. -objects.register_all() diff --git a/watcher/tests/api/__init__.py b/watcher/tests/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/api/base.py b/watcher/tests/api/base.py deleted file mode 100644 index 6979347..0000000 --- a/watcher/tests/api/base.py +++ /dev/null @@ -1,291 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base classes for API tests.""" - -# NOTE: Ported from ceilometer/tests/api.py (subsequently moved to -# ceilometer/tests/api/__init__.py). This should be oslo'ified: -# https://bugs.launchpad.net/watcher/+bug/1255115. - -# NOTE(deva): import auth_token so we can override a config option - -import copy -import mock - -from oslo_config import cfg -import pecan -import pecan.testing -from six.moves.urllib import parse as urlparse - -from watcher.api import hooks -from watcher.common import context as watcher_context -from watcher.notifications import service as n_service -from watcher.tests.db import base - -PATH_PREFIX = '/v1' - - -class FunctionalTest(base.DbTestCase): - """Pecan controller functional testing class. - - Used for functional tests of Pecan controllers where you need to - test your literal application and its integration with the - framework. - """ - - SOURCE_DATA = {'test_source': {'somekey': '666'}} - - def setUp(self): - super(FunctionalTest, self).setUp() - cfg.CONF.set_override("auth_version", "v2.0", - group='keystone_authtoken') - cfg.CONF.set_override("admin_user", "admin", - group='keystone_authtoken') - - p_services = mock.patch.object(n_service, "send_service_update", - new_callable=mock.PropertyMock) - self.m_services = p_services.start() - self.addCleanup(p_services.stop) - - self.app = self._make_app() - - def reset_pecan(): - pecan.set_config({}, overwrite=True) - - self.addCleanup(reset_pecan) - - def _make_app(self, enable_acl=False): - # Determine where we are so we can set up paths in the config - root_dir = self.get_path() - - self.config = { - 'app': { - 'root': 'watcher.api.controllers.root.RootController', - 'modules': ['watcher.api'], - 'hooks': [ - hooks.ContextHook(), - hooks.NoExceptionTracebackHook() - ], - 'static_root': '%s/public' % root_dir, - 'template_path': '%s/api/templates' % root_dir, - 'enable_acl': enable_acl, - 'acl_public_routes': ['/', '/v1'], - }, - } - - return pecan.testing.load_test_app(self.config) - - def _request_json(self, path, params, expect_errors=False, headers=None, - method="post", extra_environ=None, status=None, - path_prefix=PATH_PREFIX): - """Sends simulated HTTP request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param method: Request method type. Appropriate method function call - should be used rather than passing attribute in. - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - :param path_prefix: prefix of the url path - """ - full_path = path_prefix + path - print('%s: %s %s' % (method.upper(), full_path, params)) - - response = getattr(self.app, "%s_json" % method)( - str(full_path), - params=params, - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors - ) - print('GOT:%s' % response) - return response - - def put_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PUT request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="put") - - def post_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP POST request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="post") - - def patch_json(self, path, params, expect_errors=False, headers=None, - extra_environ=None, status=None): - """Sends simulated HTTP PATCH request to Pecan test app. - - :param path: url path of target service - :param params: content for wsgi.input of request - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - """ - return self._request_json(path=path, params=params, - expect_errors=expect_errors, - headers=headers, extra_environ=extra_environ, - status=status, method="patch") - - def delete(self, path, expect_errors=False, headers=None, - extra_environ=None, status=None, path_prefix=PATH_PREFIX): - """Sends simulated HTTP DELETE request to Pecan test app. - - :param path: url path of target service - :param expect_errors: Boolean value; whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param status: expected status code of response - :param path_prefix: prefix of the url path - """ - full_path = path_prefix + path - print('DELETE: %s' % (full_path)) - response = self.app.delete(str(full_path), - headers=headers, - status=status, - extra_environ=extra_environ, - expect_errors=expect_errors) - print('GOT:%s' % response) - return response - - def get_json(self, path, expect_errors=False, headers=None, - extra_environ=None, q=[], path_prefix=PATH_PREFIX, **params): - """Sends simulated HTTP GET request to Pecan test app. - - :param path: url path of target service - :param expect_errors: Boolean value;whether an error is expected based - on request - :param headers: a dictionary of headers to send along with the request - :param extra_environ: a dictionary of environ variables to send along - with the request - :param q: list of queries consisting of: field, value, op, and type - keys - :param path_prefix: prefix of the url path - :param params: content for wsgi.input of request - """ - full_path = path_prefix + path - query_params = {'q.field': [], - 'q.value': [], - 'q.op': [], - } - for query in q: - for name in ['field', 'op', 'value']: - query_params['q.%s' % name].append(query.get(name, '')) - all_params = {} - all_params.update(params) - if q: - all_params.update(query_params) - print('GET: %s %r' % (full_path, all_params)) - - response = self.app.get(full_path, - params=all_params, - headers=headers, - extra_environ=extra_environ, - expect_errors=expect_errors) - if not expect_errors: - response = response.json - print('GOT:%s' % response) - return response - - def validate_link(self, link, bookmark=False): - """Checks if the given link can get correct data.""" - # removes the scheme and net location parts of the link - url_parts = list(urlparse.urlparse(link)) - url_parts[0] = url_parts[1] = '' - - # bookmark link should not have the version in the URL - if bookmark and url_parts[2].startswith(PATH_PREFIX): - return False - - full_path = urlparse.urlunparse(url_parts) - try: - self.get_json(full_path, path_prefix='') - return True - except Exception: - return False - - -class AdminRoleTest(base.DbTestCase): - def setUp(self): - super(AdminRoleTest, self).setUp() - token_info = { - 'token': { - 'project': { - 'id': 'admin' - }, - 'user': { - 'id': 'admin' - } - } - } - self.context = watcher_context.RequestContext( - auth_token_info=token_info, - project_id='admin', - user_id='admin') - - def make_context(*args, **kwargs): - # If context hasn't been constructed with token_info - if not kwargs.get('auth_token_info'): - kwargs['auth_token_info'] = copy.deepcopy(token_info) - if not kwargs.get('project_id'): - kwargs['project_id'] = 'admin' - if not kwargs.get('user_id'): - kwargs['user_id'] = 'admin' - if not kwargs.get('roles'): - kwargs['roles'] = ['admin'] - - context = watcher_context.RequestContext(*args, **kwargs) - return watcher_context.RequestContext.from_dict(context.to_dict()) - - p = mock.patch.object(watcher_context, 'make_context', - side_effect=make_context) - self.mock_make_context = p.start() - self.addCleanup(p.stop) diff --git a/watcher/tests/api/test_base.py b/watcher/tests/api/test_base.py deleted file mode 100644 index 8e5860c..0000000 --- a/watcher/tests/api/test_base.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher.tests.api import base - - -class TestBase(base.FunctionalTest): - - def test_api_setup(self): - pass - - def test_bad_uri(self): - response = self.get_json('/bad/path', - expect_errors=True, - headers={"Accept": "application/json"}) - self.assertEqual(404, response.status_int) - self.assertEqual("application/json", response.content_type) - self.assertTrue(response.json['error_message']) diff --git a/watcher/tests/api/test_hooks.py b/watcher/tests/api/test_hooks.py deleted file mode 100644 index 34df0cb..0000000 --- a/watcher/tests/api/test_hooks.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the Pecan API hooks.""" - -from __future__ import unicode_literals - -import mock -from oslo_config import cfg -import oslo_messaging as messaging -from oslo_serialization import jsonutils -import six -from six.moves import http_client - -from watcher.api.controllers import root -from watcher.api import hooks -from watcher.common import context -from watcher.tests.api import base - - -class FakeRequest(object): - def __init__(self, headers, context, environ): - self.headers = headers - self.context = context - self.environ = environ or {} - self.version = (1, 0) - self.host_url = 'http://127.0.0.1:6385' - - -class FakeRequestState(object): - def __init__(self, headers=None, context=None, environ=None): - self.request = FakeRequest(headers, context, environ) - self.response = FakeRequest(headers, context, environ) - - def set_context(self): - headers = self.request.headers - creds = { - 'user': headers.get('X-User') or headers.get('X-User-Id'), - 'domain_id': headers.get('X-User-Domain-Id'), - 'domain_name': headers.get('X-User-Domain-Name'), - 'auth_token': headers.get('X-Auth-Token'), - 'roles': headers.get('X-Roles', '').split(','), - } - is_admin = ('admin' in creds['roles'] or - 'administrator' in creds['roles']) - is_public_api = self.request.environ.get('is_public_api', False) - - self.request.context = context.RequestContext( - is_admin=is_admin, is_public_api=is_public_api, **creds) - - -def fake_headers(admin=False): - headers = { - 'X-Auth-Token': '8d9f235ca7464dd7ba46f81515797ea0', - 'X-Domain-Id': 'None', - 'X-Domain-Name': 'None', - 'X-Project-Domain-Id': 'default', - 'X-Project-Domain-Name': 'Default', - 'X-Role': '_member_,admin', - 'X-Roles': '_member_,admin', - # 'X-Tenant': 'foo', - # 'X-Tenant-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', - # 'X-Tenant-Name': 'foo', - 'X-User': 'foo', - 'X-User-Domain-Id': 'default', - 'X-User-Domain-Name': 'Default', - 'X-User-Id': '604ab2a197c442c2a84aba66708a9e1e', - 'X-User-Name': 'foo', - } - if admin: - headers.update({ - 'X-Project-Name': 'admin', - 'X-Role': '_member_,admin', - 'X-Roles': '_member_,admin', - 'X-Tenant': 'admin', - # 'X-Tenant-Name': 'admin', - # 'X-Tenant': 'admin' - 'X-Tenant-Name': 'admin', - 'X-Tenant-Id': 'c2a3a69d456a412376efdd9dac38', - 'X-Project-Name': 'admin', - 'X-Project-Id': 'c2a3a69d456a412376efdd9dac38', - }) - else: - headers.update({ - 'X-Role': '_member_', - 'X-Roles': '_member_', - 'X-Tenant': 'foo', - 'X-Tenant-Name': 'foo', - 'X-Tenant-Id': 'b4efa69d,4ffa4973863f2eefc094f7f8', - 'X-Project-Name': 'foo', - 'X-Project-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', - }) - return headers - - -class TestNoExceptionTracebackHook(base.FunctionalTest): - - TRACE = ['Traceback (most recent call last):', - ' File "/opt/stack/watcher/watcher/common/rpc/amqp.py",' - ' line 434, in _process_data\\n **args)', - ' File "/opt/stack/watcher/watcher/common/rpc/' - 'dispatcher.py", line 172, in dispatch\\n result =' - ' getattr(proxyobj, method)(ctxt, **kwargs)'] - MSG_WITHOUT_TRACE = "Test exception message." - MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) - - def setUp(self): - super(TestNoExceptionTracebackHook, self).setUp() - p = mock.patch.object(root.Root, 'convert') - self.root_convert_mock = p.start() - self.addCleanup(p.stop) - cfg.CONF.set_override('debug', False) - - def test_hook_exception_success(self): - self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) - - def test_hook_remote_error_success(self): - test_exc_type = 'TestException' - self.root_convert_mock.side_effect = messaging.rpc.RemoteError( - test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - # NOTE(max_lobur): For RemoteError the client message will still have - # some garbage because in RemoteError traceback is serialized as a list - # instead of'\n'.join(trace). But since RemoteError is kind of very - # rare thing (happens due to wrong deserialization settings etc.) - # we don't care about this garbage. - expected_msg = ("Remote error: %s %s" - % (test_exc_type, self.MSG_WITHOUT_TRACE) - + ("\n[u'" if six.PY2 else "\n['")) - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - self.assertEqual(expected_msg, actual_msg) - - def _test_hook_without_traceback(self): - msg = "Error message without traceback \n but \n multiline" - self.root_convert_mock.side_effect = Exception(msg) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - self.assertEqual(msg, actual_msg) - - def test_hook_without_traceback(self): - self._test_hook_without_traceback() - - def test_hook_without_traceback_debug(self): - cfg.CONF.set_override('debug', True) - self._test_hook_without_traceback() - - def _test_hook_on_serverfault(self): - self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - return actual_msg - - def test_hook_on_serverfault(self): - cfg.CONF.set_override('debug', False) - msg = self._test_hook_on_serverfault() - self.assertEqual(self.MSG_WITHOUT_TRACE, msg) - - def test_hook_on_serverfault_debug(self): - cfg.CONF.set_override('debug', True) - msg = self._test_hook_on_serverfault() - self.assertEqual(self.MSG_WITH_TRACE, msg) - - def _test_hook_on_clientfault(self): - client_error = Exception(self.MSG_WITH_TRACE) - client_error.code = http_client.BAD_REQUEST - self.root_convert_mock.side_effect = client_error - - response = self.get_json('/', path_prefix='', expect_errors=True) - - actual_msg = jsonutils.loads( - response.json['error_message'])['faultstring'] - return actual_msg - - def test_hook_on_clientfault(self): - msg = self._test_hook_on_clientfault() - self.assertEqual(self.MSG_WITHOUT_TRACE, msg) - - def test_hook_on_clientfault_debug_tracebacks(self): - cfg.CONF.set_override('debug', True) - msg = self._test_hook_on_clientfault() - self.assertEqual(self.MSG_WITH_TRACE, msg) - - -class TestContextHook(base.FunctionalTest): - @mock.patch.object(context, 'RequestContext') - def test_context_hook_not_admin(self, mock_ctx): - cfg.CONF.set_override( - 'auth_type', 'password', group='watcher_clients_auth') - headers = fake_headers(admin=False) - reqstate = FakeRequestState(headers=headers) - context_hook = hooks.ContextHook() - context_hook.before(reqstate) - mock_ctx.assert_called_with( - auth_token=headers['X-Auth-Token'], - user=headers['X-User'], - user_id=headers['X-User-Id'], - domain_id=headers['X-User-Domain-Id'], - domain_name=headers['X-User-Domain-Name'], - auth_url=cfg.CONF.keystone_authtoken.auth_uri, - project=headers['X-Project-Name'], - project_id=headers['X-Project-Id'], - show_deleted=None, - auth_token_info=self.token_info, - roles=headers['X-Roles'].split(',')) - - @mock.patch.object(context, 'RequestContext') - def test_context_hook_admin(self, mock_ctx): - cfg.CONF.set_override( - 'auth_type', 'password', group='watcher_clients_auth') - headers = fake_headers(admin=True) - reqstate = FakeRequestState(headers=headers) - context_hook = hooks.ContextHook() - context_hook.before(reqstate) - mock_ctx.assert_called_with( - auth_token=headers['X-Auth-Token'], - user=headers['X-User'], - user_id=headers['X-User-Id'], - domain_id=headers['X-User-Domain-Id'], - domain_name=headers['X-User-Domain-Name'], - auth_url=cfg.CONF.keystone_authtoken.auth_uri, - project=headers['X-Project-Name'], - project_id=headers['X-Project-Id'], - show_deleted=None, - auth_token_info=self.token_info, - roles=headers['X-Roles'].split(',')) - - @mock.patch.object(context, 'RequestContext') - def test_context_hook_public_api(self, mock_ctx): - cfg.CONF.set_override( - 'auth_type', 'password', group='watcher_clients_auth') - headers = fake_headers(admin=True) - env = {'is_public_api': True} - reqstate = FakeRequestState(headers=headers, environ=env) - context_hook = hooks.ContextHook() - context_hook.before(reqstate) - mock_ctx.assert_called_with( - auth_token=headers['X-Auth-Token'], - user=headers['X-User'], - user_id=headers['X-User-Id'], - domain_id=headers['X-User-Domain-Id'], - domain_name=headers['X-User-Domain-Name'], - auth_url=cfg.CONF.keystone_authtoken.auth_uri, - project=headers['X-Project-Name'], - project_id=headers['X-Project-Id'], - show_deleted=None, - auth_token_info=self.token_info, - roles=headers['X-Roles'].split(',')) diff --git a/watcher/tests/api/test_root.py b/watcher/tests/api/test_root.py deleted file mode 100644 index 7d93c30..0000000 --- a/watcher/tests/api/test_root.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher.tests.api import base - - -class TestRoot(base.FunctionalTest): - - def test_get_root(self): - data = self.get_json('/', path_prefix='') - self.assertEqual('v1', data['default_version']['id']) - # Check fields are not empty - [self.assertNotIn(f, ['', []]) for f in data.keys()] - - -class TestV1Root(base.FunctionalTest): - - def test_get_v1_root(self): - data = self.get_json('/') - self.assertEqual('v1', data['id']) - # Check fields are not empty - for f in data.keys(): - self.assertNotIn(f, ['', []]) - # Check if all known resources are present and there are no extra ones. - not_resources = ('id', 'links', 'media_types') - actual_resources = tuple(set(data.keys()) - set(not_resources)) - expected_resources = ('audit_templates', 'audits', 'actions', - 'action_plans', 'scoring_engines', - 'services') - self.assertEqual(sorted(expected_resources), sorted(actual_resources)) - - self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', - 'base': 'application/json'}, data['media_types']) diff --git a/watcher/tests/api/test_scheduling.py b/watcher/tests/api/test_scheduling.py deleted file mode 100644 index b857afa..0000000 --- a/watcher/tests/api/test_scheduling.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from apscheduler.schedulers import background -import datetime -import freezegun -import mock - -from watcher.api import scheduling -from watcher.notifications import service -from watcher import objects -from watcher.tests import base -from watcher.tests.db import base as db_base -from watcher.tests.db import utils - - -class TestSchedulingService(base.TestCase): - - @mock.patch.object(background.BackgroundScheduler, 'start') - def test_start_scheduling_service(self, m_start): - scheduler = scheduling.APISchedulingService() - scheduler.start() - m_start.assert_called_once_with(scheduler) - jobs = scheduler.get_jobs() - self.assertEqual(1, len(jobs)) - - -class TestSchedulingServiceFunctions(db_base.DbTestCase): - - def setUp(self): - super(TestSchedulingServiceFunctions, self).setUp() - fake_service = utils.get_test_service( - created_at=datetime.datetime.utcnow()) - self.fake_service = objects.Service(**fake_service) - - @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(service, 'send_service_update') - def test_get_services_status_without_services_in_list( - self, mock_service_update, mock_get_list, mock_service_status): - scheduler = scheduling.APISchedulingService() - mock_get_list.return_value = [self.fake_service] - mock_service_status.return_value = 'ACTIVE' - scheduler.get_services_status(mock.ANY) - mock_service_status.assert_called_once_with(mock.ANY, - self.fake_service.id) - - mock_service_update.assert_not_called() - - @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(service, 'send_service_update') - def test_get_services_status_with_services_in_list_same_status( - self, mock_service_update, mock_get_list, mock_service_status): - scheduler = scheduling.APISchedulingService() - mock_get_list.return_value = [self.fake_service] - scheduler.services_status = {1: 'ACTIVE'} - mock_service_status.return_value = 'ACTIVE' - scheduler.get_services_status(mock.ANY) - mock_service_status.assert_called_once_with(mock.ANY, - self.fake_service.id) - - mock_service_update.assert_not_called() - - @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(service, 'send_service_update') - def test_get_services_status_with_services_in_list_diff_status( - self, mock_service_update, mock_get_list, mock_service_status): - scheduler = scheduling.APISchedulingService() - mock_get_list.return_value = [self.fake_service] - scheduler.services_status = {1: 'FAILED'} - mock_service_status.return_value = 'ACTIVE' - scheduler.get_services_status(mock.ANY) - mock_service_status.assert_called_once_with(mock.ANY, - self.fake_service.id) - - mock_service_update.assert_called_once_with(mock.ANY, - self.fake_service, - state='ACTIVE') - - @mock.patch.object(objects.Service, 'get') - def test_get_service_status_failed_service( - self, mock_get): - scheduler = scheduling.APISchedulingService() - mock_get.return_value = self.fake_service - service_status = scheduler.get_service_status(mock.ANY, - self.fake_service.id) - mock_get.assert_called_once_with(mock.ANY, - self.fake_service.id) - self.assertEqual('FAILED', service_status) - - @freezegun.freeze_time('2016-09-22T08:32:26.219414') - @mock.patch.object(objects.Service, 'get') - def test_get_service_status_failed_active( - self, mock_get): - scheduler = scheduling.APISchedulingService() - mock_get.return_value = self.fake_service - service_status = scheduler.get_service_status(mock.ANY, - self.fake_service.id) - mock_get.assert_called_once_with(mock.ANY, - self.fake_service.id) - self.assertEqual('ACTIVE', service_status) diff --git a/watcher/tests/api/test_utils.py b/watcher/tests/api/test_utils.py deleted file mode 100644 index 9c6d0ce..0000000 --- a/watcher/tests/api/test_utils.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_config import cfg -import wsme - -from watcher.api.controllers.v1 import utils as v1_utils -from watcher.tests import base - - -class TestApiUtilsValidScenarios(base.TestCase): - - scenarios = [ - ("limit=None + max_limit=None", - {"limit": None, "max_limit": None, "expected": None}), - ("limit=None + max_limit=1", - {"limit": None, "max_limit": 1, "expected": 1}), - # ("limit=0 + max_limit=None", - # {"limit": 0, "max_limit": None, "expected": 0}), - ("limit=1 + max_limit=None", - {"limit": 1, "max_limit": None, "expected": 1}), - ("limit=1 + max_limit=1", - {"limit": 1, "max_limit": 1, "expected": 1}), - ("limit=2 + max_limit=1", - {"limit": 2, "max_limit": 1, "expected": 1}), - ] - - def test_validate_limit(self): - cfg.CONF.set_override("max_limit", self.max_limit, group="api") - actual_limit = v1_utils.validate_limit(self.limit) - self.assertEqual(self.expected, actual_limit) - - -class TestApiUtilsInvalidScenarios(base.TestCase): - - scenarios = [ - ("limit=0 + max_limit=None", {"limit": 0, "max_limit": None}), - ] - - def test_validate_limit_invalid_cases(self): - cfg.CONF.set_override("max_limit", self.max_limit, group="api") - self.assertRaises( - wsme.exc.ClientSideError, v1_utils.validate_limit, self.limit - ) diff --git a/watcher/tests/api/utils.py b/watcher/tests/api/utils.py deleted file mode 100644 index 221eb76..0000000 --- a/watcher/tests/api/utils.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utils for testing the API service. -""" - -import datetime -from oslo_serialization import jsonutils - -from watcher.api.controllers.v1 import action as action_ctrl -from watcher.api.controllers.v1 import action_plan as action_plan_ctrl -from watcher.api.controllers.v1 import audit as audit_ctrl -from watcher.api.controllers.v1 import audit_template as audit_template_ctrl -from watcher.tests.db import utils as db_utils - - -ADMIN_TOKEN = '4562138218392831' -MEMBER_TOKEN = '4562138218392832' - - -class FakeMemcache(object): - """Fake cache that is used for keystone tokens lookup.""" - - _cache = { - 'tokens/%s' % ADMIN_TOKEN: { - 'access': { - 'token': {'id': ADMIN_TOKEN, - 'expires': '2100-09-11T00:00:00'}, - 'user': {'id': 'user_id1', - 'name': 'user_name1', - 'tenantId': '123i2910', - 'tenantName': 'mytenant', - 'roles': [{'name': 'admin'}] - }, - } - }, - 'tokens/%s' % MEMBER_TOKEN: { - 'access': { - 'token': {'id': MEMBER_TOKEN, - 'expires': '2100-09-11T00:00:00'}, - 'user': {'id': 'user_id2', - 'name': 'user-good', - 'tenantId': 'project-good', - 'tenantName': 'goodies', - 'roles': [{'name': 'Member'}] - } - } - } - } - - def __init__(self): - self.set_key = None - self.set_value = None - self.token_expiration = None - - def get(self, key): - dt = datetime.datetime.utcnow() + datetime.timedelta(minutes=5) - return jsonutils.dumps((self._cache.get(key), dt.isoformat())) - - def set(self, key, value, time=0, min_compress_len=0): - self.set_value = value - self.set_key = key - - -def remove_internal(values, internal): - # NOTE(yuriyz): internal attributes should not be posted, except uuid - int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] - return dict( - (k, v) for (k, v) in values.items() if k not in int_attr - ) - - -def audit_post_data(**kw): - audit = db_utils.get_test_audit(**kw) - internal = audit_ctrl.AuditPatchType.internal_attrs() - return remove_internal(audit, internal) - - -def audit_template_post_data(**kw): - attrs = audit_template_ctrl.AuditTemplatePostType._wsme_attributes - audit_template = db_utils.get_test_audit_template() - fields = [field.key for field in attrs] - post_data = {k: v for k, v in audit_template.items() if k in fields} - post_data.update({k: v for k, v in kw.items() if k in fields}) - return post_data - - -def action_post_data(**kw): - action = db_utils.get_test_action(**kw) - internal = action_ctrl.ActionPatchType.internal_attrs() - return remove_internal(action, internal) - - -def action_plan_post_data(**kw): - act_plan = db_utils.get_test_action_plan(**kw) - internal = action_plan_ctrl.ActionPlanPatchType.internal_attrs() - return remove_internal(act_plan, internal) diff --git a/watcher/tests/api/v1/__init__.py b/watcher/tests/api/v1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/api/v1/test_actions.py b/watcher/tests/api/v1/test_actions.py deleted file mode 100644 index ca5881f..0000000 --- a/watcher/tests/api/v1/test_actions.py +++ /dev/null @@ -1,509 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils -from wsme import types as wtypes - -from watcher.api.controllers.v1 import action as api_action -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.api import utils as api_utils -from watcher.tests import base -from watcher.tests.db import utils as db_utils -from watcher.tests.objects import utils as obj_utils - - -def post_get_test_action(**kw): - action = api_utils.action_post_data(**kw) - action_plan = db_utils.get_test_action_plan() - del action['action_plan_id'] - action['action_plan_uuid'] = kw.get('action_plan_uuid', - action_plan['uuid']) - action['parents'] = None - return action - - -class TestActionObject(base.TestCase): - - def test_action_init(self): - action_dict = api_utils.action_post_data(action_plan_id=None, - parents=None) - del action_dict['state'] - action = api_action.Action(**action_dict) - self.assertEqual(wtypes.Unset, action.state) - - -class TestListAction(api_base.FunctionalTest): - - def setUp(self): - super(TestListAction, self).setUp() - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan(self.context) - - def test_empty(self): - response = self.get_json('/actions') - self.assertEqual([], response['actions']) - - def _assert_action_fields(self, action): - action_fields = ['uuid', 'state', 'action_plan_uuid', 'action_type'] - for field in action_fields: - self.assertIn(field, action) - - def test_one(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions') - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - def test_one_soft_deleted(self): - action = obj_utils.create_test_action(self.context, parents=None) - action.soft_delete() - response = self.get_json('/actions', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - response = self.get_json('/actions') - self.assertEqual([], response['actions']) - - def test_get_one(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions/%s' % action['uuid']) - self.assertEqual(action.uuid, response['uuid']) - self.assertEqual(action.action_type, response['action_type']) - self.assertEqual(action.input_parameters, response['input_parameters']) - self._assert_action_fields(response) - - def test_get_one_soft_deleted(self): - action = obj_utils.create_test_action(self.context, parents=None) - action.soft_delete() - response = self.get_json('/actions/%s' % action['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action.uuid, response['uuid']) - self._assert_action_fields(response) - - response = self.get_json('/actions/%s' % action['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions/detail') - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - def test_detail_soft_deleted(self): - action = obj_utils.create_test_action(self.context, parents=None) - action.soft_delete() - response = self.get_json('/actions/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action.uuid, response['actions'][0]["uuid"]) - self._assert_action_fields(response['actions'][0]) - - response = self.get_json('/actions/detail') - self.assertEqual([], response['actions']) - - def test_detail_against_single(self): - action = obj_utils.create_test_action(self.context, parents=None) - response = self.get_json('/actions/%s/detail' % action['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - action_list = [] - for id_ in range(5): - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - response = self.get_json('/actions') - self.assertEqual(len(action_list), len(response['actions'])) - uuids = [s['uuid'] for s in response['actions']] - self.assertEqual(sorted(action_list), sorted(uuids)) - - def test_many_with_action_plan_uuid(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - id=2, - uuid=utils.generate_uuid(), - audit_id=1) - action_list = [] - for id_ in range(5): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=2, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - response = self.get_json('/actions') - self.assertEqual(len(action_list), len(response['actions'])) - for action in response['actions']: - self.assertEqual(action_plan.uuid, action['action_plan_uuid']) - - def test_filter_by_audit_uuid(self): - action_plan_1 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid()) - action_list = [] - - for id_ in range(3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_1.id, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - - audit2 = obj_utils.create_test_audit( - self.context, id=2, uuid=utils.generate_uuid()) - action_plan_2 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=audit2.id) - - for id_ in range(4, 5, 6): - obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_2.id, - uuid=utils.generate_uuid()) - - response = self.get_json('/actions?audit_uuid=%s' % self.audit.uuid) - self.assertEqual(len(action_list), len(response['actions'])) - for action in response['actions']: - self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) - - def test_filter_by_action_plan_uuid(self): - action_plan_1 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - action_list = [] - - for id_ in range(3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_1.id, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - - action_plan_2 = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - - for id_ in range(4, 5, 6): - obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan_2.id, - uuid=utils.generate_uuid()) - - response = self.get_json( - '/actions?action_plan_uuid=%s' % action_plan_1.uuid) - self.assertEqual(len(action_list), len(response['actions'])) - for action in response['actions']: - self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) - - response = self.get_json( - '/actions?action_plan_uuid=%s' % action_plan_2.uuid) - for action in response['actions']: - self.assertEqual(action_plan_2.uuid, action['action_plan_uuid']) - - def test_details_and_filter_by_action_plan_uuid(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - - for id_ in range(1, 3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan.id, - uuid=utils.generate_uuid()) - - response = self.get_json( - '/actions/detail?action_plan_uuid=%s' % action_plan.uuid) - for action in response['actions']: - self.assertEqual(action_plan.uuid, action['action_plan_uuid']) - - def test_details_and_filter_by_audit_uuid(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - - for id_ in range(1, 3): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan.id, - uuid=utils.generate_uuid()) - - response = self.get_json( - '/actions/detail?audit_uuid=%s' % self.audit.uuid) - for action in response['actions']: - self.assertEqual(action_plan.uuid, action['action_plan_uuid']) - - def test_filter_by_action_plan_and_audit_uuids(self): - action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=utils.generate_uuid(), - audit_id=self.audit.id) - url = '/actions?action_plan_uuid=%s&audit_uuid=%s' % ( - action_plan.uuid, self.audit.uuid) - response = self.get_json(url, expect_errors=True) - self.assertEqual(400, response.status_int) - - def test_many_with_soft_deleted_action_plan_uuid(self): - action_plan1 = obj_utils.create_test_action_plan( - self.context, - id=2, - uuid=utils.generate_uuid(), - audit_id=1) - action_plan2 = obj_utils.create_test_action_plan( - self.context, - id=3, - uuid=utils.generate_uuid(), - audit_id=1) - - ap1_action_list = [] - ap2_action_list = [] - - for id_ in range(0, 2): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan1.id, - uuid=utils.generate_uuid()) - ap1_action_list.append(action) - - for id_ in range(2, 4): - action = obj_utils.create_test_action( - self.context, id=id_, - action_plan_id=action_plan2.id, - uuid=utils.generate_uuid()) - ap2_action_list.append(action) - - self.delete('/action_plans/%s' % action_plan1.uuid) - - response = self.get_json('/actions') - # We deleted the actions from the 1st action plan so we've got 2 left - self.assertEqual(len(ap2_action_list), len(response['actions'])) - - # We deleted them so that's normal - self.assertEqual([], - [act for act in response['actions'] - if act['action_plan_uuid'] == action_plan1.uuid]) - - # Here are the 2 actions left - self.assertEqual( - set([act.as_dict()['uuid'] for act in ap2_action_list]), - set([act['uuid'] for act in response['actions'] - if act['action_plan_uuid'] == action_plan2.uuid])) - - def test_many_with_parents(self): - action_list = [] - for id_ in range(5): - if id_ > 0: - action = obj_utils.create_test_action( - self.context, id=id_, uuid=utils.generate_uuid(), - parents=[action_list[id_ - 1]]) - else: - action = obj_utils.create_test_action( - self.context, id=id_, uuid=utils.generate_uuid(), - parents=[]) - action_list.append(action.uuid) - response = self.get_json('/actions') - response_actions = response['actions'] - for id_ in range(4): - self.assertEqual(response_actions[id_]['uuid'], - response_actions[id_ + 1]['parents'][0]) - - def test_many_without_soft_deleted(self): - action_list = [] - for id_ in [1, 2, 3]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - for id_ in [4, 5]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action.soft_delete() - response = self.get_json('/actions') - self.assertEqual(3, len(response['actions'])) - uuids = [s['uuid'] for s in response['actions']] - self.assertEqual(sorted(action_list), sorted(uuids)) - - def test_many_with_soft_deleted(self): - action_list = [] - for id_ in [1, 2, 3]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action_list.append(action.uuid) - for id_ in [4, 5]: - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - action.soft_delete() - action_list.append(action.uuid) - response = self.get_json('/actions', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['actions'])) - uuids = [s['uuid'] for s in response['actions']] - self.assertEqual(sorted(action_list), sorted(uuids)) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_action(self.context, id=1, uuid=uuid) - response = self.get_json('/actions/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - parents = None - for id_ in range(5): - action = obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid(), - parents=parents) - parents = [action.id] - response = self.get_json('/actions/?limit=3') - self.assertEqual(3, len(response['actions'])) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_action(self.context, id=id_, - uuid=utils.generate_uuid()) - response = self.get_json('/actions') - self.assertEqual(3, len(response['actions'])) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - obj_utils.create_test_action_plan(self.context) - self.action = obj_utils.create_test_action(self.context, parents=None) - p = mock.patch.object(db_api.BaseConnection, 'update_action') - self.mock_action_update = p.start() - self.mock_action_update.side_effect = self._simulate_rpc_action_update - self.addCleanup(p.stop) - - def _simulate_rpc_action_update(self, action): - action.save() - return action - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_patch_not_allowed(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - new_state = objects.audit.State.SUCCEEDED - response = self.get_json('/actions/%s' % self.action.uuid) - self.assertNotEqual(new_state, response['state']) - - response = self.patch_json( - '/actions/%s' % self.action.uuid, - [{'path': '/state', 'value': new_state, 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(403, response.status_int) - self.assertTrue(response.json['error_message']) - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan(self.context) - self.action = obj_utils.create_test_action(self.context, parents=None) - p = mock.patch.object(db_api.BaseConnection, 'update_action') - self.mock_action_update = p.start() - self.mock_action_update.side_effect = self._simulate_rpc_action_update - self.addCleanup(p.stop) - - def _simulate_rpc_action_update(self, action): - action.save() - return action - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_delete_action_not_allowed(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - response = self.delete('/actions/%s' % self.action.uuid, - expect_errors=True) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestActionPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestActionPolicyEnforcement, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - obj_utils.create_test_action_plan(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "action:get_all", self.get_json, '/actions', - expect_errors=True) - - def test_policy_disallow_get_one(self): - action = obj_utils.create_test_action(self.context) - self._common_policy_check( - "action:get", self.get_json, - '/actions/%s' % action.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "action:detail", self.get_json, - '/actions/detail', - expect_errors=True) - - -class TestActionPolicyEnforcementWithAdminContext(TestListAction, - api_base.AdminRoleTest): - - def setUp(self): - super(TestActionPolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "action:detail": "rule:default", - "action:get": "rule:default", - "action:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_actions_plans.py b/watcher/tests/api/v1/test_actions_plans.py deleted file mode 100644 index b417437..0000000 --- a/watcher/tests/api/v1/test_actions_plans.py +++ /dev/null @@ -1,625 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import itertools -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils - -from watcher.applier import rpcapi as aapi -from watcher.common import utils -from watcher.db import api as db_api -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListActionPlan(api_base.FunctionalTest): - - def setUp(self): - super(TestListActionPlan, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - def test_empty(self): - response = self.get_json('/action_plans') - self.assertEqual([], response['action_plans']) - - def _assert_action_plans_fields(self, action_plan): - action_plan_fields = [ - 'uuid', 'audit_uuid', 'strategy_uuid', 'strategy_name', - 'state', 'global_efficacy', 'efficacy_indicators'] - for field in action_plan_fields: - self.assertIn(field, action_plan) - - def test_one(self): - action_plan = obj_utils.create_test_action_plan(self.context) - response = self.get_json('/action_plans') - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - def test_one_soft_deleted(self): - action_plan = obj_utils.create_test_action_plan(self.context) - action_plan.soft_delete() - response = self.get_json('/action_plans', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - response = self.get_json('/action_plans') - self.assertEqual([], response['action_plans']) - - def test_get_one_ok(self): - action_plan = obj_utils.create_test_action_plan(self.context) - obj_utils.create_test_efficacy_indicator( - self.context, action_plan_id=action_plan['id']) - response = self.get_json('/action_plans/%s' % action_plan['uuid']) - self.assertEqual(action_plan.uuid, response['uuid']) - self._assert_action_plans_fields(response) - self.assertEqual( - [{'description': 'Test indicator', - 'name': 'test_indicator', - 'value': 0.0, - 'unit': '%'}], - response['efficacy_indicators']) - - def test_get_one_soft_deleted(self): - action_plan = obj_utils.create_test_action_plan(self.context) - action_plan.soft_delete() - response = self.get_json('/action_plans/%s' % action_plan['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action_plan.uuid, response['uuid']) - self._assert_action_plans_fields(response) - - response = self.get_json('/action_plans/%s' % action_plan['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - action_plan = obj_utils.create_test_action_plan(self.context) - response = self.get_json('/action_plans/detail') - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - def test_detail_soft_deleted(self): - action_plan = obj_utils.create_test_action_plan(self.context) - action_plan.soft_delete() - response = self.get_json('/action_plans/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(action_plan.uuid, - response['action_plans'][0]["uuid"]) - self._assert_action_plans_fields(response['action_plans'][0]) - - response = self.get_json('/action_plans/detail') - self.assertEqual([], response['action_plans']) - - def test_detail_against_single(self): - action_plan = obj_utils.create_test_action_plan(self.context) - response = self.get_json( - '/action_plan/%s/detail' % action_plan['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - action_plan_list = [] - for id_ in range(5): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan_list.append(action_plan.uuid) - response = self.get_json('/action_plans') - self.assertEqual(len(action_plan_list), len(response['action_plans'])) - uuids = [s['uuid'] for s in response['action_plans']] - self.assertEqual(sorted(action_plan_list), sorted(uuids)) - - def test_many_with_soft_deleted_audit_uuid(self): - action_plan_list = [] - audit1 = obj_utils.create_test_audit(self.context, - id=2, - uuid=utils.generate_uuid()) - audit2 = obj_utils.create_test_audit(self.context, - id=3, - uuid=utils.generate_uuid()) - - for id_ in range(0, 2): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit1.id) - action_plan_list.append(action_plan.uuid) - - for id_ in range(2, 4): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit2.id) - action_plan_list.append(action_plan.uuid) - - self.delete('/audits/%s' % audit1.uuid) - - response = self.get_json('/action_plans') - - self.assertEqual(len(action_plan_list), len(response['action_plans'])) - - for id_ in range(0, 2): - action_plan = response['action_plans'][id_] - self.assertIsNone(action_plan['audit_uuid']) - - for id_ in range(2, 4): - action_plan = response['action_plans'][id_] - self.assertEqual(audit2.uuid, action_plan['audit_uuid']) - - def test_many_with_audit_uuid(self): - action_plan_list = [] - audit = obj_utils.create_test_audit(self.context, - id=2, - uuid=utils.generate_uuid()) - for id_ in range(2, 5): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit.id) - action_plan_list.append(action_plan.uuid) - response = self.get_json('/action_plans') - self.assertEqual(len(action_plan_list), len(response['action_plans'])) - for action in response['action_plans']: - self.assertEqual(audit.uuid, action['audit_uuid']) - - def test_many_with_audit_uuid_filter(self): - action_plan_list1 = [] - audit1 = obj_utils.create_test_audit(self.context, - id=2, - uuid=utils.generate_uuid()) - for id_ in range(2, 5): - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit1.id) - action_plan_list1.append(action_plan.uuid) - - audit2 = obj_utils.create_test_audit(self.context, - id=3, - uuid=utils.generate_uuid()) - action_plan_list2 = [] - for id_ in [5, 6, 7]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit2.id) - action_plan_list2.append(action_plan.uuid) - - response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid) - self.assertEqual(len(action_plan_list2), len(response['action_plans'])) - for action in response['action_plans']: - self.assertEqual(audit2.uuid, action['audit_uuid']) - - def test_many_without_soft_deleted(self): - action_plan_list = [] - for id_ in [1, 2, 3]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan_list.append(action_plan.uuid) - for id_ in [4, 5]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan.soft_delete() - response = self.get_json('/action_plans') - self.assertEqual(3, len(response['action_plans'])) - uuids = [s['uuid'] for s in response['action_plans']] - self.assertEqual(sorted(action_plan_list), sorted(uuids)) - - def test_many_with_soft_deleted(self): - action_plan_list = [] - for id_ in [1, 2, 3]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan_list.append(action_plan.uuid) - for id_ in [4, 5]: - action_plan = obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - action_plan.soft_delete() - action_plan_list.append(action_plan.uuid) - response = self.get_json('/action_plans', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['action_plans'])) - uuids = [s['uuid'] for s in response['action_plans']] - self.assertEqual(sorted(action_plan_list), sorted(uuids)) - - def test_many_with_sort_key_audit_uuid(self): - audit_list = [] - for id_ in range(2, 5): - audit = obj_utils.create_test_audit(self.context, - id=id_, - uuid=utils.generate_uuid()) - obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid(), - audit_id=audit.id) - audit_list.append(audit.uuid) - - response = self.get_json('/action_plans/?sort_key=audit_uuid') - - self.assertEqual(3, len(response['action_plans'])) - uuids = [s['audit_uuid'] for s in response['action_plans']] - self.assertEqual(sorted(audit_list), uuids) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_action_plan(self.context, id=1, uuid=uuid) - response = self.get_json('/action_plans/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - response = self.get_json('/action_plans/?limit=3') - self.assertEqual(3, len(response['action_plans'])) - - next_marker = response['action_plans'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_action_plan( - self.context, id=id_, uuid=utils.generate_uuid()) - response = self.get_json('/action_plans') - self.assertEqual(3, len(response['action_plans'])) - - next_marker = response['action_plans'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan( - self.context) - p = mock.patch.object(db_api.BaseConnection, 'destroy_action_plan') - self.mock_action_plan_delete = p.start() - self.mock_action_plan_delete.side_effect = \ - self._simulate_rpc_action_plan_delete - self.addCleanup(p.stop) - - def _simulate_rpc_action_plan_delete(self, audit_uuid): - action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid) - action_plan.destroy() - - def test_delete_action_plan_without_action(self): - self.delete('/action_plans/%s' % self.action_plan.uuid) - response = self.get_json('/action_plans/%s' % self.action_plan.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_delete_action_plan_with_action(self): - action = obj_utils.create_test_action( - self.context, id=1) - - self.delete('/action_plans/%s' % self.action_plan.uuid) - ap_response = self.get_json('/action_plans/%s' % self.action_plan.uuid, - expect_errors=True) - acts_response = self.get_json( - '/actions/?action_plan_uuid=%s' % self.action_plan.uuid) - act_response = self.get_json( - '/actions/%s' % action.uuid, - expect_errors=True) - - # The action plan does not exist anymore - self.assertEqual(404, ap_response.status_int) - self.assertEqual('application/json', ap_response.content_type) - self.assertTrue(ap_response.json['error_message']) - - # Nor does the action - self.assertEqual(0, len(acts_response['actions'])) - self.assertEqual(404, act_response.status_int) - self.assertEqual('application/json', act_response.content_type) - self.assertTrue(act_response.json['error_message']) - - def test_delete_action_plan_not_found(self): - uuid = utils.generate_uuid() - response = self.delete('/action_plans/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - self.action_plan = obj_utils.create_test_action_plan( - self.context, state=objects.action_plan.State.RECOMMENDED) - p = mock.patch.object(db_api.BaseConnection, 'update_action_plan') - self.mock_action_plan_update = p.start() - self.mock_action_plan_update.side_effect = \ - self._simulate_rpc_action_plan_update - self.addCleanup(p.stop) - - def _simulate_rpc_action_plan_update(self, action_plan): - action_plan.save() - return action_plan - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_denied(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_state = objects.action_plan.State.DELETED - response = self.get_json( - '/action_plans/%s' % self.action_plan.uuid) - self.assertNotEqual(new_state, response['state']) - - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/state', 'value': new_state, 'op': 'replace'}], - expect_errors=True) - - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_replace_non_existent_action_plan_denied(self): - response = self.patch_json( - '/action_plans/%s' % utils.generate_uuid(), - [{'path': '/state', - 'value': objects.action_plan.State.PENDING, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_add_non_existent_property_denied(self): - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_denied(self): - # We should not be able to remove the state of an action plan - response = self.get_json( - '/action_plans/%s' % self.action_plan.uuid) - self.assertIsNotNone(response['state']) - - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/state', 'op': 'remove'}], - expect_errors=True) - - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_uuid_denied(self): - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_non_existent_property_denied(self): - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan') - def test_replace_state_pending_ok(self, applier_mock): - new_state = objects.action_plan.State.PENDING - response = self.get_json( - '/action_plans/%s' % self.action_plan.uuid) - self.assertNotEqual(new_state, response['state']) - response = self.patch_json( - '/action_plans/%s' % self.action_plan.uuid, - [{'path': '/state', 'value': new_state, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - applier_mock.assert_called_once_with(mock.ANY, - self.action_plan.uuid) - - -ALLOWED_TRANSITIONS = [ - {"original_state": objects.action_plan.State.RECOMMENDED, - "new_state": objects.action_plan.State.PENDING}, - {"original_state": objects.action_plan.State.RECOMMENDED, - "new_state": objects.action_plan.State.CANCELLED}, - {"original_state": objects.action_plan.State.ONGOING, - "new_state": objects.action_plan.State.CANCELLING}, - {"original_state": objects.action_plan.State.PENDING, - "new_state": objects.action_plan.State.CANCELLED}, -] - - -class TestPatchStateTransitionDenied(api_base.FunctionalTest): - - STATES = [ - ap_state for ap_state in objects.action_plan.State.__dict__ - if not ap_state.startswith("_") - ] - - scenarios = [ - ( - "%s -> %s" % (original_state, new_state), - {"original_state": original_state, - "new_state": new_state}, - ) - for original_state, new_state - in list(itertools.product(STATES, STATES)) - # from DELETED to ... - # NOTE: Any state transition from DELETED (To RECOMMENDED, PENDING, - # ONGOING, CANCELLED, SUCCEEDED and FAILED) will cause a 404 Not Found - # because we cannot retrieve them with a GET (soft_deleted state). - # This is the reason why they are not listed here but they have a - # special test to cover it - if original_state != objects.action_plan.State.DELETED - and original_state != new_state - and {"original_state": original_state, - "new_state": new_state} not in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionDenied, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - @mock.patch.object( - db_api.BaseConnection, 'update_action_plan', - mock.Mock(side_effect=lambda ap: ap.save() or ap)) - def test_replace_state_pending_denied(self): - action_plan = obj_utils.create_test_action_plan( - self.context, state=self.original_state) - - initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - response = self.patch_json( - '/action_plans/%s' % action_plan.uuid, - [{'path': '/state', 'value': self.new_state, - 'op': 'replace'}], - expect_errors=True) - updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - - self.assertNotEqual(self.new_state, initial_ap['state']) - self.assertEqual(self.original_state, updated_ap['state']) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestPatchStateTransitionOk(api_base.FunctionalTest): - - scenarios = [ - ( - "%s -> %s" % (transition["original_state"], - transition["new_state"]), - transition - ) - for transition in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionOk, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - @mock.patch.object( - db_api.BaseConnection, 'update_action_plan', - mock.Mock(side_effect=lambda ap: ap.save() or ap)) - @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan', mock.Mock()) - def test_replace_state_pending_ok(self): - action_plan = obj_utils.create_test_action_plan( - self.context, state=self.original_state) - - initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - - response = self.patch_json( - '/action_plans/%s' % action_plan.uuid, - [{'path': '/state', 'value': self.new_state, 'op': 'replace'}]) - updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) - - self.assertNotEqual(self.new_state, initial_ap['state']) - self.assertEqual(self.new_state, updated_ap['state']) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - -class TestActionPlanPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestActionPlanPolicyEnforcement, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "action_plan:get_all", self.get_json, '/action_plans', - expect_errors=True) - - def test_policy_disallow_get_one(self): - action_plan = obj_utils.create_test_action_plan(self.context) - self._common_policy_check( - "action_plan:get", self.get_json, - '/action_plans/%s' % action_plan.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "action_plan:detail", self.get_json, - '/action_plans/detail', - expect_errors=True) - - def test_policy_disallow_update(self): - action_plan = obj_utils.create_test_action_plan(self.context) - self._common_policy_check( - "action_plan:update", self.patch_json, - '/action_plans/%s' % action_plan.uuid, - [{'path': '/state', - 'value': objects.action_plan.State.DELETED, - 'op': 'replace'}], - expect_errors=True) - - def test_policy_disallow_delete(self): - action_plan = obj_utils.create_test_action_plan(self.context) - self._common_policy_check( - "action_plan:delete", self.delete, - '/action_plans/%s' % action_plan.uuid, expect_errors=True) - - -class TestActionPlanPolicyEnforcementWithAdminContext(TestListActionPlan, - api_base.AdminRoleTest): - - def setUp(self): - super(TestActionPlanPolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "action_plan:delete": "rule:default", - "action_plan:detail": "rule:default", - "action_plan:get": "rule:default", - "action_plan:get_all": "rule:default", - "action_plan:update": "rule:default"}) diff --git a/watcher/tests/api/v1/test_audit_templates.py b/watcher/tests/api/v1/test_audit_templates.py deleted file mode 100644 index ace32a8..0000000 --- a/watcher/tests/api/v1/test_audit_templates.py +++ /dev/null @@ -1,754 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import itertools -import mock -from webtest.app import AppError - -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from six.moves.urllib import parse as urlparse -from wsme import types as wtypes - -from watcher.api.controllers.v1 import audit_template as api_audit_template -from watcher.common import exception -from watcher.common import utils -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.api import utils as api_utils -from watcher.tests import base -from watcher.tests.db import utils as db_utils -from watcher.tests.objects import utils as obj_utils - - -def post_get_test_audit_template(**kw): - goal = db_utils.get_test_goal() - strategy = db_utils.get_test_strategy(goal_id=goal['id']) - kw['goal'] = kw.get('goal', goal['uuid']) - kw['strategy'] = kw.get('strategy', strategy['uuid']) - kw['scope'] = kw.get('scope', []) - audit_template = api_utils.audit_template_post_data(**kw) - return audit_template - - -class TestAuditTemplateObject(base.TestCase): - - def test_audit_template_init(self): - audit_template_dict = post_get_test_audit_template() - del audit_template_dict['name'] - audit_template = api_audit_template.AuditTemplate( - **audit_template_dict) - self.assertEqual(wtypes.Unset, audit_template.name) - - -class FunctionalTestWithSetup(api_base.FunctionalTest): - - def setUp(self): - super(FunctionalTestWithSetup, self).setUp() - self.fake_goal1 = obj_utils.create_test_goal( - self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") - self.fake_goal2 = obj_utils.create_test_goal( - self.context, id=2, uuid=utils.generate_uuid(), name="dummy_2") - self.fake_strategy1 = obj_utils.create_test_strategy( - self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", - goal_id=self.fake_goal1.id) - self.fake_strategy2 = obj_utils.create_test_strategy( - self.context, id=2, uuid=utils.generate_uuid(), name="strategy_2", - goal_id=self.fake_goal2.id) - - -class TestListAuditTemplate(FunctionalTestWithSetup): - - def test_empty(self): - response = self.get_json('/audit_templates') - self.assertEqual([], response['audit_templates']) - - def _assert_audit_template_fields(self, audit_template): - audit_template_fields = ['name', 'goal_uuid', 'goal_name', - 'strategy_uuid', 'strategy_name'] - for field in audit_template_fields: - self.assertIn(field, audit_template) - - def test_one(self): - audit_template = obj_utils.create_test_audit_template( - self.context, strategy_id=self.fake_strategy1.id) - response = self.get_json('/audit_templates') - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - def test_get_one_soft_deleted_ok(self): - audit_template = obj_utils.create_test_audit_template(self.context) - audit_template.soft_delete() - response = self.get_json('/audit_templates', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - response = self.get_json('/audit_templates') - self.assertEqual([], response['audit_templates']) - - def test_get_one_by_uuid(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json( - '/audit_templates/%s' % audit_template['uuid']) - self.assertEqual(audit_template.uuid, response['uuid']) - self._assert_audit_template_fields(response) - - def test_get_one_by_name(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json(urlparse.quote( - '/audit_templates/%s' % audit_template['name'])) - self.assertEqual(audit_template.uuid, response['uuid']) - self._assert_audit_template_fields(response) - - def test_get_one_soft_deleted(self): - audit_template = obj_utils.create_test_audit_template(self.context) - audit_template.soft_delete() - response = self.get_json( - '/audit_templates/%s' % audit_template['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit_template.uuid, response['uuid']) - self._assert_audit_template_fields(response) - - response = self.get_json( - '/audit_templates/%s' % audit_template['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json('/audit_templates/detail') - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - def test_detail_soft_deleted(self): - audit_template = obj_utils.create_test_audit_template(self.context) - audit_template.soft_delete() - response = self.get_json('/audit_templates/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit_template.uuid, - response['audit_templates'][0]["uuid"]) - self._assert_audit_template_fields(response['audit_templates'][0]) - - response = self.get_json('/audit_templates/detail') - self.assertEqual([], response['audit_templates']) - - def test_detail_against_single(self): - audit_template = obj_utils.create_test_audit_template(self.context) - response = self.get_json( - '/audit_templates/%s/detail' % audit_template['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - audit_template_list = [] - for id_ in range(1, 6): - audit_template = obj_utils.create_test_audit_template( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - audit_template_list.append(audit_template) - - response = self.get_json('/audit_templates') - self.assertEqual(len(audit_template_list), - len(response['audit_templates'])) - uuids = [s['uuid'] for s in response['audit_templates']] - self.assertEqual( - sorted([at.uuid for at in audit_template_list]), - sorted(uuids)) - - def test_many_without_soft_deleted(self): - audit_template_list = [] - for id_ in range(1, 6): - audit_template = obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - audit_template_list.append(audit_template) - - # We soft delete the ones with ID 4 and 5 - [at.soft_delete() for at in audit_template_list[3:]] - - response = self.get_json('/audit_templates') - self.assertEqual(3, len(response['audit_templates'])) - uuids = [s['uuid'] for s in response['audit_templates']] - self.assertEqual( - sorted([at.uuid for at in audit_template_list[:3]]), - sorted(uuids)) - - def test_many_with_soft_deleted(self): - audit_template_list = [] - for id_ in range(1, 6): - audit_template = obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - audit_template_list.append(audit_template) - - # We soft delete the ones with ID 4 and 5 - [at.soft_delete() for at in audit_template_list[3:]] - - response = self.get_json('/audit_templates', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['audit_templates'])) - uuids = [s['uuid'] for s in response['audit_templates']] - self.assertEqual( - sorted([at.uuid for at in audit_template_list]), - sorted(uuids)) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_audit_template(self.context, id=1, uuid=uuid) - response = self.get_json('/audit_templates/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - response = self.get_json('/audit_templates/?limit=3') - self.assertEqual(3, len(response['audit_templates'])) - - next_marker = response['audit_templates'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_)) - response = self.get_json('/audit_templates') - self.assertEqual(3, len(response['audit_templates'])) - - next_marker = response['audit_templates'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_filter_by_goal_uuid(self): - for id_, goal_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_goal1.id, 3), - itertools.repeat(self.fake_goal2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - goal_id=goal_id) - - response = self.get_json( - '/audit_templates?goal=%s' % self.fake_goal2.uuid) - self.assertEqual(2, len(response['audit_templates'])) - - def test_filter_by_goal_name(self): - for id_, goal_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_goal1.id, 3), - itertools.repeat(self.fake_goal2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - goal_id=goal_id) - - response = self.get_json( - '/audit_templates?goal=%s' % self.fake_goal2.name) - self.assertEqual(2, len(response['audit_templates'])) - - def test_filter_by_strategy_uuid(self): - for id_, strategy_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_strategy1.id, 3), - itertools.repeat(self.fake_strategy2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - strategy_id=strategy_id) - - response = self.get_json( - '/audit_templates?strategy=%s' % self.fake_strategy2.uuid) - self.assertEqual(2, len(response['audit_templates'])) - - def test_filter_by_strategy_name(self): - for id_, strategy_id in enumerate(itertools.chain.from_iterable([ - itertools.repeat(self.fake_strategy1.id, 3), - itertools.repeat(self.fake_strategy2.id, 2)]), 1): - obj_utils.create_test_audit_template( - self.context, id=id_, uuid=utils.generate_uuid(), - name='My Audit Template {0}'.format(id_), - strategy_id=strategy_id) - - response = self.get_json( - '/audit_templates?strategy=%s' % self.fake_strategy2.name) - self.assertEqual(2, len(response['audit_templates'])) - - -class TestPatch(FunctionalTestWithSetup): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - self.audit_template = obj_utils.create_test_audit_template( - self.context, strategy_id=None) - - @mock.patch.object(timeutils, 'utcnow') - def test_replace_goal_uuid(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_goal_uuid = self.fake_goal2.uuid - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertNotEqual(new_goal_uuid, response['goal_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', 'value': new_goal_uuid, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual(new_goal_uuid, response['goal_uuid']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - @mock.patch.object(timeutils, 'utcnow') - def test_replace_goal_uuid_by_name(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_goal_uuid = self.fake_goal2.uuid - response = self.get_json(urlparse.quote( - '/audit_templates/%s' % self.audit_template.name)) - self.assertNotEqual(new_goal_uuid, response['goal_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.name, - [{'path': '/goal', 'value': new_goal_uuid, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.name) - self.assertEqual(new_goal_uuid, response['goal_uuid']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - def test_replace_non_existent_audit_template(self): - response = self.patch_json( - '/audit_templates/%s' % utils.generate_uuid(), - [{'path': '/goal', 'value': self.fake_goal1.uuid, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_replace_invalid_goal(self): - with mock.patch.object( - self.dbapi, - 'update_audit_template', - wraps=self.dbapi.update_audit_template - ) as cn_mock: - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', 'value': utils.generate_uuid(), - 'op': 'replace'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_add_goal_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', - 'value': self.fake_goal2.uuid, - 'op': 'add'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual(self.fake_goal2.uuid, response['goal_uuid']) - - def test_add_strategy_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', - 'value': self.fake_strategy1.uuid, - 'op': 'add'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual(self.fake_strategy1.uuid, response['strategy_uuid']) - - def test_replace_strategy_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', - 'value': self.fake_strategy2['uuid'], - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertEqual( - self.fake_strategy2['uuid'], response['strategy_uuid']) - - def test_replace_invalid_strategy(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', - 'value': utils.generate_uuid(), # Does not exist - 'op': 'replace'}], expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_add_non_existent_property(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_strategy(self): - audit_template = obj_utils.create_test_audit_template( - self.context, uuid=utils.generate_uuid(), - name="AT_%s" % utils.generate_uuid(), - goal_id=self.fake_goal1.id, - strategy_id=self.fake_strategy1.id) - response = self.get_json( - '/audit_templates/%s' % audit_template.uuid) - self.assertIsNotNone(response['strategy_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/strategy', 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - def test_remove_goal(self): - response = self.get_json( - '/audit_templates/%s' % self.audit_template.uuid) - self.assertIsNotNone(response['goal_uuid']) - - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/goal', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(403, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_uuid(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_non_existent_property(self): - response = self.patch_json( - '/audit_templates/%s' % self.audit_template.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestPost(FunctionalTestWithSetup): - - @mock.patch.object(timeutils, 'utcnow') - def test_create_audit_template(self, mock_utcnow): - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, - strategy=self.fake_strategy1.uuid) - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.post_json('/audit_templates', audit_template_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - expected_location = \ - '/v1/audit_templates/%s' % response.json['uuid'] - self.assertEqual(urlparse.urlparse(response.location).path, - expected_location) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - self.assertNotIn('updated_at', response.json.keys) - self.assertNotIn('deleted_at', response.json.keys) - self.assertEqual(self.fake_goal1.uuid, response.json['goal_uuid']) - self.assertEqual(self.fake_strategy1.uuid, - response.json['strategy_uuid']) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - - def test_create_audit_template_vlidation_with_aggregates(self): - scope = [{'host_aggregates': [{'id': '*'}]}, - {'availability_zones': [{'name': 'AZ1'}, - {'name': 'AZ2'}]}, - {'exclude': [ - {'instances': [ - {'uuid': 'INSTANCE_1'}, - {'uuid': 'INSTANCE_2'}]}, - {'compute_nodes': [ - {'name': 'Node_1'}, - {'name': 'Node_2'}]}, - {'host_aggregates': [{'id': '*'}]} - ]} - ] - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, - strategy=self.fake_strategy1.uuid, scope=scope) - with self.assertRaisesRegex(AppError, - "be included and excluded together"): - self.post_json('/audit_templates', audit_template_dict) - - def test_create_audit_template_does_autogenerate_id(self): - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, strategy=None) - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - response = self.post_json('/audit_templates', audit_template_dict) - self.assertEqual(audit_template_dict['goal'], - response.json['goal_uuid']) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cn_mock.call_args[0][0]) - - def test_create_audit_template_generate_uuid(self): - audit_template_dict = post_get_test_audit_template( - goal=self.fake_goal1.uuid, strategy=None) - - response = self.post_json('/audit_templates', audit_template_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - def test_create_audit_template_with_invalid_goal(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template( - goal_uuid=utils.generate_uuid()) - response = self.post_json('/audit_templates', - audit_template_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_create_audit_template_with_invalid_strategy(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template( - goal_uuid=self.fake_goal1['uuid'], - strategy_uuid=utils.generate_uuid()) - response = self.post_json('/audit_templates', - audit_template_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_create_audit_template_with_unrelated_strategy(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template( - goal_uuid=self.fake_goal1['uuid'], - strategy=self.fake_strategy2['uuid']) - response = self.post_json('/audit_templates', - audit_template_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - def test_create_audit_template_with_uuid(self): - with mock.patch.object( - self.dbapi, - 'create_audit_template', - wraps=self.dbapi.create_audit_template - ) as cn_mock: - audit_template_dict = post_get_test_audit_template() - response = self.post_json('/audit_templates', audit_template_dict, - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - assert not cn_mock.called - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - obj_utils.create_test_goal(self.context) - self.audit_template = obj_utils.create_test_audit_template( - self.context) - - @mock.patch.object(timeutils, 'utcnow') - def test_delete_audit_template_by_uuid(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - self.delete(urlparse.quote('/audit_templates/%s' % - self.audit_template.uuid)) - response = self.get_json( - urlparse.quote('/audit_templates/%s' % self.audit_template.uuid), - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - self.assertRaises(exception.AuditTemplateNotFound, - objects.AuditTemplate.get_by_uuid, - self.context, - self.audit_template.uuid) - - self.context.show_deleted = True - at = objects.AuditTemplate.get_by_uuid(self.context, - self.audit_template.uuid) - self.assertEqual(self.audit_template.name, at.name) - - @mock.patch.object(timeutils, 'utcnow') - def test_delete_audit_template_by_name(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - self.delete(urlparse.quote('/audit_templates/%s' % - self.audit_template.name)) - response = self.get_json( - urlparse.quote('/audit_templates/%s' % self.audit_template.name), - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - self.assertRaises(exception.AuditTemplateNotFound, - objects.AuditTemplate.get_by_name, - self.context, - self.audit_template.name) - - self.context.show_deleted = True - at = objects.AuditTemplate.get_by_name(self.context, - self.audit_template.name) - self.assertEqual(self.audit_template.uuid, at.uuid) - - def test_delete_audit_template_not_found(self): - uuid = utils.generate_uuid() - response = self.delete( - '/audit_templates/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestAuditTemplatePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "audit_template:get_all", self.get_json, '/audit_templates', - expect_errors=True) - - def test_policy_disallow_get_one(self): - obj_utils.create_test_goal(self.context) - audit_template = obj_utils.create_test_audit_template(self.context) - self._common_policy_check( - "audit_template:get", self.get_json, - '/audit_templates/%s' % audit_template.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "audit_template:detail", self.get_json, - '/audit_templates/detail', - expect_errors=True) - - def test_policy_disallow_update(self): - obj_utils.create_test_goal(self.context) - audit_template = obj_utils.create_test_audit_template(self.context) - self._common_policy_check( - "audit_template:update", self.patch_json, - '/audit_templates/%s' % audit_template.uuid, - [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, - 'op': 'replace'}], expect_errors=True) - - def test_policy_disallow_create(self): - fake_goal1 = obj_utils.get_test_goal( - self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") - fake_goal1.create() - fake_strategy1 = obj_utils.get_test_strategy( - self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", - goal_id=fake_goal1.id) - fake_strategy1.create() - - audit_template_dict = post_get_test_audit_template( - goal=fake_goal1.uuid, - strategy=fake_strategy1.uuid) - self._common_policy_check( - "audit_template:create", self.post_json, '/audit_templates', - audit_template_dict, expect_errors=True) - - def test_policy_disallow_delete(self): - obj_utils.create_test_goal(self.context) - audit_template = obj_utils.create_test_audit_template(self.context) - self._common_policy_check( - "audit_template:delete", self.delete, - '/audit_templates/%s' % audit_template.uuid, expect_errors=True) - - -class TestAuditTemplatePolicyWithAdminContext(TestListAuditTemplate, - api_base.AdminRoleTest): - def setUp(self): - super(TestAuditTemplatePolicyWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "audit_template:create": "rule:default", - "audit_template:delete": "rule:default", - "audit_template:detail": "rule:default", - "audit_template:get": "rule:default", - "audit_template:get_all": "rule:default", - "audit_template:update": "rule:default"}) diff --git a/watcher/tests/api/v1/test_audits.py b/watcher/tests/api/v1/test_audits.py deleted file mode 100644 index b26f62c..0000000 --- a/watcher/tests/api/v1/test_audits.py +++ /dev/null @@ -1,918 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import itertools -import mock - -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from wsme import types as wtypes - -from six.moves.urllib import parse as urlparse -from watcher.api.controllers.v1 import audit as api_audit -from watcher.common import utils -from watcher.db import api as db_api -from watcher.decision_engine import rpcapi as deapi -from watcher import objects -from watcher.tests.api import base as api_base -from watcher.tests.api import utils as api_utils -from watcher.tests import base -from watcher.tests.db import utils as db_utils -from watcher.tests.objects import utils as obj_utils - - -def post_get_test_audit(**kw): - audit = api_utils.audit_post_data(**kw) - audit_template = db_utils.get_test_audit_template() - goal = db_utils.get_test_goal() - del_keys = ['goal_id', 'strategy_id'] - add_keys = {'audit_template_uuid': audit_template['uuid'], - 'goal': goal['uuid'], - } - for k in del_keys: - del audit[k] - for k in add_keys: - audit[k] = kw.get(k, add_keys[k]) - return audit - - -def post_get_test_audit_with_predefined_strategy(**kw): - spec = kw.pop('strategy_parameters_spec', {}) - strategy_id = 2 - strategy = db_utils.get_test_strategy(parameters_spec=spec, id=strategy_id) - audit = api_utils.audit_post_data(**kw) - audit_template = db_utils.get_test_audit_template( - strategy_id=strategy['id']) - del_keys = ['goal_id', 'strategy_id'] - add_keys = {'audit_template_uuid': audit_template['uuid'], - } - for k in del_keys: - del audit[k] - for k in add_keys: - audit[k] = kw.get(k, add_keys[k]) - return audit - - -class TestAuditObject(base.TestCase): - - def test_audit_init(self): - audit_dict = api_utils.audit_post_data(audit_template_id=None, - goal_id=None, - strategy_id=None) - del audit_dict['state'] - audit = api_audit.Audit(**audit_dict) - self.assertEqual(wtypes.Unset, audit.state) - - -class TestListAudit(api_base.FunctionalTest): - - def setUp(self): - super(TestListAudit, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - - def test_empty(self): - response = self.get_json('/audits') - self.assertEqual([], response['audits']) - - def _assert_audit_fields(self, audit): - audit_fields = ['audit_type', 'scope', 'state', 'goal_uuid', - 'strategy_uuid'] - for field in audit_fields: - self.assertIn(field, audit) - - def test_one(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits') - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - def test_one_soft_deleted(self): - audit = obj_utils.create_test_audit(self.context) - audit.soft_delete() - response = self.get_json('/audits', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - response = self.get_json('/audits') - self.assertEqual([], response['audits']) - - def test_get_one(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits/%s' % audit['uuid']) - self.assertEqual(audit.uuid, response['uuid']) - self._assert_audit_fields(response) - - def test_get_one_soft_deleted(self): - audit = obj_utils.create_test_audit(self.context) - audit.soft_delete() - response = self.get_json('/audits/%s' % audit['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit.uuid, response['uuid']) - self._assert_audit_fields(response) - - response = self.get_json('/audits/%s' % audit['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits/detail') - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - def test_detail_soft_deleted(self): - audit = obj_utils.create_test_audit(self.context) - audit.soft_delete() - response = self.get_json('/audits/detail', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) - self._assert_audit_fields(response['audits'][0]) - - response = self.get_json('/audits/detail') - self.assertEqual([], response['audits']) - - def test_detail_against_single(self): - audit = obj_utils.create_test_audit(self.context) - response = self.get_json('/audits/%s/detail' % audit['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - audit_list = [] - for id_ in range(5): - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit_list.append(audit.uuid) - response = self.get_json('/audits') - self.assertEqual(len(audit_list), len(response['audits'])) - uuids = [s['uuid'] for s in response['audits']] - self.assertEqual(sorted(audit_list), sorted(uuids)) - - def test_many_without_soft_deleted(self): - audit_list = [] - for id_ in [1, 2, 3]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit_list.append(audit.uuid) - for id_ in [4, 5]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit.soft_delete() - response = self.get_json('/audits') - self.assertEqual(3, len(response['audits'])) - uuids = [s['uuid'] for s in response['audits']] - self.assertEqual(sorted(audit_list), sorted(uuids)) - - def test_many_with_soft_deleted(self): - audit_list = [] - for id_ in [1, 2, 3]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit_list.append(audit.uuid) - for id_ in [4, 5]: - audit = obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - audit.soft_delete() - audit_list.append(audit.uuid) - response = self.get_json('/audits', - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(5, len(response['audits'])) - uuids = [s['uuid'] for s in response['audits']] - self.assertEqual(sorted(audit_list), sorted(uuids)) - - def test_many_with_sort_key_goal_uuid(self): - goal_list = [] - for id_ in range(5): - goal = obj_utils.create_test_goal( - self.context, - name='gl{0}'.format(id_), - uuid=utils.generate_uuid()) - obj_utils.create_test_audit( - self.context, id=id_, uuid=utils.generate_uuid(), - goal_id=goal.id) - goal_list.append(goal.uuid) - - response = self.get_json('/audits/?sort_key=goal_uuid') - - self.assertEqual(5, len(response['audits'])) - uuids = [s['goal_uuid'] for s in response['audits']] - self.assertEqual(sorted(goal_list), uuids) - - def test_links(self): - uuid = utils.generate_uuid() - obj_utils.create_test_audit(self.context, id=1, uuid=uuid) - response = self.get_json('/audits/%s' % uuid) - self.assertIn('links', response.keys()) - self.assertEqual(2, len(response['links'])) - self.assertIn(uuid, response['links'][0]['href']) - for l in response['links']: - bookmark = l['rel'] == 'bookmark' - self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) - - def test_collection_links(self): - for id_ in range(5): - obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - response = self.get_json('/audits/?limit=3') - self.assertEqual(3, len(response['audits'])) - - next_marker = response['audits'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - def test_collection_links_default_limit(self): - cfg.CONF.set_override('max_limit', 3, 'api') - for id_ in range(5): - obj_utils.create_test_audit(self.context, id=id_, - uuid=utils.generate_uuid()) - response = self.get_json('/audits') - self.assertEqual(3, len(response['audits'])) - - next_marker = response['audits'][-1]['uuid'] - self.assertIn(next_marker, response['next']) - - -class TestPatch(api_base.FunctionalTest): - - def setUp(self): - super(TestPatch, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - new_state = objects.audit.State.CANCELLED - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertNotEqual(new_state, response['state']) - - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': new_state, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(new_state, response['state']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - def test_replace_non_existent_audit(self): - response = self.patch_json( - '/audits/%s' % utils.generate_uuid(), - [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, - 'op': 'replace'}], expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_add_ok(self): - new_state = objects.audit.State.SUCCEEDED - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': new_state, 'op': 'add'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_int) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(new_state, response['state']) - - def test_add_non_existent_property(self): - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/foo', 'value': 'bar', 'op': 'add'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - self.assertTrue(response.json['error_message']) - - def test_remove_ok(self): - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertIsNotNone(response['interval']) - - response = self.patch_json('/audits/%s' % self.audit.uuid, - [{'path': '/interval', 'op': 'remove'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertIsNone(response['interval']) - - def test_remove_uuid(self): - response = self.patch_json('/audits/%s' % self.audit.uuid, - [{'path': '/uuid', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - def test_remove_non_existent_property(self): - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/non-existent', 'op': 'remove'}], - expect_errors=True) - self.assertEqual(400, response.status_code) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -ALLOWED_TRANSITIONS = [ - {"original_state": key, "new_state": value} - for key, values in ( - objects.audit.AuditStateTransitionManager.TRANSITIONS.items()) - for value in values] - - -class TestPatchStateTransitionDenied(api_base.FunctionalTest): - - STATES = [ - ap_state for ap_state in objects.audit.State.__dict__ - if not ap_state.startswith("_") - ] - - scenarios = [ - ( - "%s -> %s" % (original_state, new_state), - {"original_state": original_state, - "new_state": new_state}, - ) - for original_state, new_state - in list(itertools.product(STATES, STATES)) - if original_state != new_state - and {"original_state": original_state, - "new_state": new_state} not in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionDenied, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context, - state=self.original_state) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - def test_replace_denied(self): - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertNotEqual(self.new_state, response['state']) - - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': self.new_state, - 'op': 'replace'}], - expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_code) - self.assertTrue(response.json['error_message']) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(self.original_state, response['state']) - - -class TestPatchStateTransitionOk(api_base.FunctionalTest): - - scenarios = [ - ( - "%s -> %s" % (transition["original_state"], - transition["new_state"]), - transition - ) - for transition in ALLOWED_TRANSITIONS - ] - - def setUp(self): - super(TestPatchStateTransitionOk, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context, - state=self.original_state) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_replace_ok(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertNotEqual(self.new_state, response['state']) - - response = self.patch_json( - '/audits/%s' % self.audit.uuid, - [{'path': '/state', 'value': self.new_state, - 'op': 'replace'}]) - self.assertEqual('application/json', response.content_type) - self.assertEqual(200, response.status_code) - - response = self.get_json('/audits/%s' % self.audit.uuid) - self.assertEqual(self.new_state, response['state']) - return_updated_at = timeutils.parse_isotime( - response['updated_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_updated_at) - - -class TestPost(api_base.FunctionalTest): - - def setUp(self): - super(TestPost, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - p = mock.patch.object(db_api.BaseConnection, 'create_audit') - self.mock_create_audit = p.start() - self.mock_create_audit.side_effect = ( - self._simulate_rpc_audit_create) - self.addCleanup(p.stop) - - def _simulate_rpc_audit_create(self, audit): - audit.create() - return audit - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_audit(self, mock_utcnow, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - # Check location header - self.assertIsNotNone(response.location) - expected_location = '/v1/audits/%s' % response.json['uuid'] - self.assertEqual(urlparse.urlparse(response.location).path, - expected_location) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertNotIn('updated_at', response.json.keys) - self.assertNotIn('deleted_at', response.json.keys) - return_created_at = timeutils.parse_isotime( - response.json['created_at']).replace(tzinfo=None) - self.assertEqual(test_time, return_created_at) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_audit_with_state_not_allowed(self, mock_utcnow, - mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - audit_dict = post_get_test_audit(state=objects.audit.State.SUCCEEDED) - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_create_audit_invalid_audit_template_uuid(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - # Make the audit template UUID some garbage value - audit_dict['audit_template_uuid'] = ( - '01234567-8910-1112-1314-151617181920') - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual("application/json", response.content_type) - expected_error_msg = ('The audit template UUID or name specified is ' - 'invalid') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_doesnt_contain_id(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - state = audit_dict['state'] - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - with mock.patch.object(self.dbapi, 'create_audit', - wraps=self.dbapi.create_audit) as cn_mock: - response = self.post_json('/audits', audit_dict) - self.assertEqual(state, response.json['state']) - cn_mock.assert_called_once_with(mock.ANY) - # Check that 'id' is not in first arg of positional args - self.assertNotIn('id', cn_mock.call_args[0][0]) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_generate_uuid(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_with_interval(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - audit_dict['interval'] = '1200' - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertEqual(audit_dict['interval'], response.json['interval']) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_with_cron_interval(self, - mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - audit_dict['interval'] = '* * * * *' - - response = self.post_json('/audits', audit_dict) - self.assertEqual('application/json', response.content_type) - self.assertEqual(201, response.status_int) - self.assertEqual(objects.audit.State.PENDING, - response.json['state']) - self.assertEqual(audit_dict['interval'], response.json['interval']) - self.assertTrue(utils.is_uuid_like(response.json['uuid'])) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_with_wrong_interval(self, - mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - audit_dict['interval'] = 'zxc' - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(500, response.status_int) - expected_error_msg = ('Exactly 5 or 6 columns has to be ' - 'specified for iteratorexpression.') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_continuous_audit_without_period(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - expected_error_msg = ('Interval of audit must be specified ' - 'for CONTINUOUS.') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_oneshot_audit_with_period(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit() - del audit_dict['uuid'] - del audit_dict['state'] - audit_dict['audit_type'] = objects.audit.AuditType.ONESHOT.value - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual('application/json', response.content_type) - expected_error_msg = 'Interval of audit must not be set for ONESHOT.' - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - - def test_create_audit_trigger_decision_engine(self): - with mock.patch.object(deapi.DecisionEngineAPI, - 'trigger_audit') as de_mock: - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - response = self.post_json('/audits', audit_dict) - de_mock.assert_called_once_with(mock.ANY, response.json['uuid']) - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_with_uuid(self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['scope'] - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - assert not mock_trigger_audit.called - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_parameters_no_predefined_strategy( - self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - audit_dict = post_get_test_audit(parameters={'name': 'Tom'}) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - expected_error_msg = ('Specify parameters but no predefined ' - 'strategy for audit template, or no ' - 'parameter spec in predefined strategy') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - assert not mock_trigger_audit.called - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_parameters_no_schema( - self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - audit_dict = post_get_test_audit_with_predefined_strategy( - parameters={'name': 'Tom'}) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['interval'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual('application/json', response.content_type) - self.assertEqual(400, response.status_int) - expected_error_msg = ('Specify parameters but no predefined ' - 'strategy for audit template, or no ' - 'parameter spec in predefined strategy') - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - assert not mock_trigger_audit.called - - @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') - def test_create_audit_with_parameter_not_allowed( - self, mock_trigger_audit): - mock_trigger_audit.return_value = mock.ANY - audit_template = self.prepare_audit_template_strategy_with_parameter() - - audit_dict = api_utils.audit_post_data( - parameters={'fake1': 1, 'fake2': "hello"}) - - audit_dict['audit_template_uuid'] = audit_template['uuid'] - del_keys = ['uuid', 'goal_id', 'strategy_id', 'state', 'interval', - 'scope', 'next_run_time'] - for k in del_keys: - del audit_dict[k] - - response = self.post_json('/audits', audit_dict, expect_errors=True) - self.assertEqual(400, response.status_int) - self.assertEqual("application/json", response.content_type) - expected_error_msg = 'Audit parameter fake2 are not allowed' - self.assertTrue(response.json['error_message']) - self.assertIn(expected_error_msg, response.json['error_message']) - assert not mock_trigger_audit.called - - def prepare_audit_template_strategy_with_parameter(self): - fake_spec = { - "properties": { - "fake1": { - "description": "number parameter example", - "type": "number", - "default": 3.2, - "minimum": 1.0, - "maximum": 10.2, - } - } - } - template_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a67' - strategy_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a68' - template_name = 'my template' - strategy_name = 'my strategy' - strategy_id = 3 - strategy = db_utils.get_test_strategy(parameters_spec=fake_spec, - id=strategy_id, - uuid=strategy_uuid, - name=strategy_name) - obj_utils.create_test_strategy(self.context, - parameters_spec=fake_spec, - id=strategy_id, - uuid=strategy_uuid, - name=strategy_name) - obj_utils.create_test_audit_template(self.context, - strategy_id=strategy_id, - uuid=template_uuid, - name='name') - audit_template = db_utils.get_test_audit_template( - strategy_id=strategy['id'], uuid=template_uuid, name=template_name) - return audit_template - - -class TestDelete(api_base.FunctionalTest): - - def setUp(self): - super(TestDelete, self).setUp() - obj_utils.create_test_goal(self.context) - obj_utils.create_test_strategy(self.context) - obj_utils.create_test_audit_template(self.context) - self.audit = obj_utils.create_test_audit(self.context) - p = mock.patch.object(db_api.BaseConnection, 'update_audit') - self.mock_audit_update = p.start() - self.mock_audit_update.side_effect = self._simulate_rpc_audit_update - self.addCleanup(p.stop) - - def _simulate_rpc_audit_update(self, audit): - audit.save() - return audit - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_delete_audit(self, mock_utcnow): - test_time = datetime.datetime(2000, 1, 1, 0, 0) - mock_utcnow.return_value = test_time - self.delete('/audits/%s' % self.audit.uuid) - response = self.get_json('/audits/%s' % self.audit.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - self.context.show_deleted = True - audit = objects.Audit.get_by_uuid(self.context, self.audit.uuid) - - return_deleted_at = timeutils.strtime(audit['deleted_at']) - self.assertEqual(timeutils.strtime(test_time), return_deleted_at) - self.assertEqual(objects.audit.State.DELETED, audit['state']) - - def test_delete_audit_not_found(self): - uuid = utils.generate_uuid() - response = self.delete('/audits/%s' % uuid, expect_errors=True) - self.assertEqual(404, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue(response.json['error_message']) - - -class TestAuditPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestAuditPolicyEnforcement, self).setUp() - obj_utils.create_test_goal(self.context) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "audit:get_all", self.get_json, '/audits', - expect_errors=True) - - def test_policy_disallow_get_one(self): - audit = obj_utils.create_test_audit(self.context) - self._common_policy_check( - "audit:get", self.get_json, - '/audits/%s' % audit.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "audit:detail", self.get_json, - '/audits/detail', - expect_errors=True) - - def test_policy_disallow_update(self): - audit = obj_utils.create_test_audit(self.context) - self._common_policy_check( - "audit:update", self.patch_json, - '/audits/%s' % audit.uuid, - [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, - 'op': 'replace'}], expect_errors=True) - - def test_policy_disallow_create(self): - audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) - del audit_dict['uuid'] - del audit_dict['state'] - del audit_dict['scope'] - del audit_dict['next_run_time'] - self._common_policy_check( - "audit:create", self.post_json, '/audits', audit_dict, - expect_errors=True) - - def test_policy_disallow_delete(self): - audit = obj_utils.create_test_audit(self.context) - self._common_policy_check( - "audit:delete", self.delete, - '/audits/%s' % audit.uuid, expect_errors=True) - - -class TestAuditEnforcementWithAdminContext(TestListAudit, - api_base.AdminRoleTest): - - def setUp(self): - super(TestAuditEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "audit:create": "rule:default", - "audit:delete": "rule:default", - "audit:detail": "rule:default", - "audit:get": "rule:default", - "audit:get_all": "rule:default", - "audit:update": "rule:default"}) diff --git a/watcher/tests/api/v1/test_goals.py b/watcher/tests/api/v1/test_goals.py deleted file mode 100644 index 6c71c15..0000000 --- a/watcher/tests/api/v1/test_goals.py +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves.urllib import parse as urlparse - -from watcher.common import utils -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListGoal(api_base.FunctionalTest): - - def _assert_goal_fields(self, goal): - goal_fields = ['uuid', 'name', 'display_name', - 'efficacy_specification'] - for field in goal_fields: - self.assertIn(field, goal) - - def test_one(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals') - self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) - self._assert_goal_fields(response['goals'][0]) - - def test_get_one_by_uuid(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals/%s' % goal.uuid) - self.assertEqual(goal.uuid, response["uuid"]) - self.assertEqual(goal.name, response["name"]) - self._assert_goal_fields(response) - - def test_get_one_by_name(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json(urlparse.quote( - '/goals/%s' % goal['name'])) - self.assertEqual(goal.uuid, response['uuid']) - self._assert_goal_fields(response) - - def test_get_one_soft_deleted(self): - goal = obj_utils.create_test_goal(self.context) - goal.soft_delete() - response = self.get_json( - '/goals/%s' % goal['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(goal.uuid, response['uuid']) - self._assert_goal_fields(response) - - response = self.get_json( - '/goals/%s' % goal['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals/detail') - self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) - self._assert_goal_fields(response['goals'][0]) - - def test_detail_against_single(self): - goal = obj_utils.create_test_goal(self.context) - response = self.get_json('/goals/%s/detail' % goal.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - goal_list = [] - for idx in range(1, 6): - goal = obj_utils.create_test_goal( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(idx)) - goal_list.append(goal.uuid) - response = self.get_json('/goals') - self.assertGreater(len(response['goals']), 2) - - def test_many_without_soft_deleted(self): - goal_list = [] - for id_ in [1, 2, 3]: - goal = obj_utils.create_test_goal( - self.context, id=id_, uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(id_)) - goal_list.append(goal.uuid) - for id_ in [4, 5]: - goal = obj_utils.create_test_goal( - self.context, id=id_, uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(id_)) - goal.soft_delete() - response = self.get_json('/goals') - self.assertEqual(3, len(response['goals'])) - uuids = [s['uuid'] for s in response['goals']] - self.assertEqual(sorted(goal_list), sorted(uuids)) - - def test_goals_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_goal( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(idx)) - response = self.get_json('/goals/?limit=2') - self.assertEqual(2, len(response['goals'])) - - def test_goals_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_goal( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='GOAL_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/goals') - self.assertEqual(3, len(response['goals'])) - - -class TestGoalPolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "goal:get_all", self.get_json, '/goals', - expect_errors=True) - - def test_policy_disallow_get_one(self): - goal = obj_utils.create_test_goal(self.context) - self._common_policy_check( - "goal:get", self.get_json, - '/goals/%s' % goal.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "goal:detail", self.get_json, - '/goals/detail', - expect_errors=True) - - -class TestGoalPolicyEnforcementWithAdminContext(TestListGoal, - api_base.AdminRoleTest): - - def setUp(self): - super(TestGoalPolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "goal:detail": "rule:default", - "goal:get_all": "rule:default", - "goal:get_one": "rule:default"}) diff --git a/watcher/tests/api/v1/test_root.py b/watcher/tests/api/v1/test_root.py deleted file mode 100644 index 2cac444..0000000 --- a/watcher/tests/api/v1/test_root.py +++ /dev/null @@ -1,20 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from watcher.tests.api import base as api_base - - -class TestV1Routing(api_base.FunctionalTest): - def setUp(self): - super(TestV1Routing, self).setUp() diff --git a/watcher/tests/api/v1/test_scoring_engines.py b/watcher/tests/api/v1/test_scoring_engines.py deleted file mode 100644 index 2e7b3cc..0000000 --- a/watcher/tests/api/v1/test_scoring_engines.py +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from watcher.common import utils - -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListScoringEngine(api_base.FunctionalTest): - - def _assert_scoring_engine_fields(self, scoring_engine): - scoring_engine_fields = ['uuid', 'name', 'description', 'metainfo'] - for field in scoring_engine_fields: - self.assertIn(field, scoring_engine) - - def test_one(self): - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - response = self.get_json('/scoring_engines') - self.assertEqual( - scoring_engine.name, response['scoring_engines'][0]['name']) - self._assert_scoring_engine_fields(response['scoring_engines'][0]) - - def test_get_one_soft_deleted(self): - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - scoring_engine.soft_delete() - response = self.get_json( - '/scoring_engines/%s' % scoring_engine['name'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(scoring_engine.name, response['name']) - self._assert_scoring_engine_fields(response) - - response = self.get_json( - '/scoring_engines/%s' % scoring_engine['name'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - obj_utils.create_test_goal(self.context) - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - response = self.get_json('/scoring_engines/detail') - self.assertEqual( - scoring_engine.name, response['scoring_engines'][0]['name']) - self._assert_scoring_engine_fields(response['scoring_engines'][0]) - for scoring_engine in response['scoring_engines']: - self.assertTrue( - all(val is not None for key, val in scoring_engine.items() - if key in ['uuid', 'name', 'description', 'metainfo'])) - - def test_detail_against_single(self): - scoring_engine = obj_utils.create_test_scoring_engine(self.context) - response = self.get_json( - '/scoring_engines/%s/detail' % scoring_engine.id, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - scoring_engine_list = [] - for idx in range(1, 6): - scoring_engine = obj_utils.create_test_scoring_engine( - self.context, id=idx, uuid=utils.generate_uuid(), - name=str(idx), description='SE_{0}'.format(idx)) - scoring_engine_list.append(scoring_engine.name) - response = self.get_json('/scoring_engines') - self.assertEqual(5, len(response['scoring_engines'])) - for scoring_engine in response['scoring_engines']: - self.assertTrue( - all(val is not None for key, val in scoring_engine.items() - if key in ['name', 'description', 'metainfo'])) - - def test_many_without_soft_deleted(self): - scoring_engine_list = [] - for id_ in [1, 2, 3]: - scoring_engine = obj_utils.create_test_scoring_engine( - self.context, id=id_, uuid=utils.generate_uuid(), - name=str(id_), description='SE_{0}'.format(id_)) - scoring_engine_list.append(scoring_engine.name) - for id_ in [4, 5]: - scoring_engine = obj_utils.create_test_scoring_engine( - self.context, id=id_, uuid=utils.generate_uuid(), - name=str(id_), description='SE_{0}'.format(id_)) - scoring_engine.soft_delete() - response = self.get_json('/scoring_engines') - self.assertEqual(3, len(response['scoring_engines'])) - names = [s['name'] for s in response['scoring_engines']] - self.assertEqual(sorted(scoring_engine_list), sorted(names)) - - def test_scoring_engines_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_scoring_engine( - self.context, id=idx, uuid=utils.generate_uuid(), - name=str(idx), description='SE_{0}'.format(idx)) - response = self.get_json('/scoring_engines/?limit=2') - self.assertEqual(2, len(response['scoring_engines'])) - - def test_scoring_engines_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_scoring_engine( - self.context, id=idx, uuid=utils.generate_uuid(), - name=str(idx), description='SE_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/scoring_engines') - self.assertEqual(3, len(response['scoring_engines'])) - - -class TestScoringEnginePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "scoring_engine:get_all", self.get_json, '/scoring_engines', - expect_errors=True) - - def test_policy_disallow_get_one(self): - se = obj_utils.create_test_scoring_engine(self.context) - self._common_policy_check( - "scoring_engine:get", self.get_json, - '/scoring_engines/%s' % se.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "scoring_engine:detail", self.get_json, - '/scoring_engines/detail', - expect_errors=True) - - -class TestScoringEnginePolicyEnforcementWithAdminContext( - TestListScoringEngine, api_base.AdminRoleTest): - - def setUp(self): - super(TestScoringEnginePolicyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "scoring_engine:detail": "rule:default", - "scoring_engine:get": "rule:default", - "scoring_engine:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_services.py b/watcher/tests/api/v1/test_services.py deleted file mode 100644 index c556d95..0000000 --- a/watcher/tests/api/v1/test_services.py +++ /dev/null @@ -1,178 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves.urllib import parse as urlparse - -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListService(api_base.FunctionalTest): - - def _assert_service_fields(self, service): - service_fields = ['id', 'name', 'host', 'status'] - for field in service_fields: - self.assertIn(field, service) - - def test_one(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services') - self.assertEqual(service.id, response['services'][0]["id"]) - self._assert_service_fields(response['services'][0]) - - def test_get_one_by_id(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services/%s' % service.id) - self.assertEqual(service.id, response["id"]) - self.assertEqual(service.name, response["name"]) - self._assert_service_fields(response) - - def test_get_one_by_name(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json(urlparse.quote( - '/services/%s' % service['name'])) - self.assertEqual(service.id, response['id']) - self._assert_service_fields(response) - - def test_get_one_soft_deleted(self): - service = obj_utils.create_test_service(self.context) - service.soft_delete() - response = self.get_json( - '/services/%s' % service['id'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(service.id, response['id']) - self._assert_service_fields(response) - - response = self.get_json( - '/services/%s' % service['id'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services/detail') - self.assertEqual(service.id, response['services'][0]["id"]) - self._assert_service_fields(response['services'][0]) - for service in response['services']: - self.assertTrue( - all(val is not None for key, val in service.items() - if key in ['id', 'name', 'host', 'status']) - ) - - def test_detail_against_single(self): - service = obj_utils.create_test_service(self.context) - response = self.get_json('/services/%s/detail' % service.id, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - service_list = [] - for idx in range(1, 4): - service = obj_utils.create_test_service( - self.context, id=idx, host='CONTROLLER1', - name='SERVICE_{0}'.format(idx)) - service_list.append(service.id) - for idx in range(1, 4): - service = obj_utils.create_test_service( - self.context, id=3+idx, host='CONTROLLER2', - name='SERVICE_{0}'.format(idx)) - service_list.append(service.id) - response = self.get_json('/services') - self.assertEqual(6, len(response['services'])) - for service in response['services']: - self.assertTrue( - all(val is not None for key, val in service.items() - if key in ['id', 'name', 'host', 'status'])) - - def test_many_without_soft_deleted(self): - service_list = [] - for id_ in [1, 2, 3]: - service = obj_utils.create_test_service( - self.context, id=id_, host='CONTROLLER', - name='SERVICE_{0}'.format(id_)) - service_list.append(service.id) - for id_ in [4, 5]: - service = obj_utils.create_test_service( - self.context, id=id_, host='CONTROLLER', - name='SERVICE_{0}'.format(id_)) - service.soft_delete() - response = self.get_json('/services') - self.assertEqual(3, len(response['services'])) - ids = [s['id'] for s in response['services']] - self.assertEqual(sorted(service_list), sorted(ids)) - - def test_services_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_service( - self.context, id=idx, - host='CONTROLLER', - name='SERVICE_{0}'.format(idx)) - response = self.get_json('/services/?limit=2') - self.assertEqual(2, len(response['services'])) - - def test_services_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_service( - self.context, id=idx, - host='CONTROLLER', - name='SERVICE_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/services') - self.assertEqual(3, len(response['services'])) - - -class TestServicePolicyEnforcement(api_base.FunctionalTest): - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:default"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "service:get_all", self.get_json, '/services', - expect_errors=True) - - def test_policy_disallow_get_one(self): - service = obj_utils.create_test_service(self.context) - self._common_policy_check( - "service:get", self.get_json, - '/services/%s' % service.id, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "service:detail", self.get_json, - '/services/detail', - expect_errors=True) - - -class TestServiceEnforcementWithAdminContext(TestListService, - api_base.AdminRoleTest): - - def setUp(self): - super(TestServiceEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "service:detail": "rule:default", - "service:get": "rule:default", - "service:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_strategies.py b/watcher/tests/api/v1/test_strategies.py deleted file mode 100644 index 6edcd48..0000000 --- a/watcher/tests/api/v1/test_strategies.py +++ /dev/null @@ -1,248 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_serialization import jsonutils -from six.moves.urllib import parse as urlparse - -from watcher.common import utils -from watcher.tests.api import base as api_base -from watcher.tests.objects import utils as obj_utils - - -class TestListStrategy(api_base.FunctionalTest): - - def setUp(self): - super(TestListStrategy, self).setUp() - self.fake_goal = obj_utils.create_test_goal( - self.context, uuid=utils.generate_uuid()) - - def _assert_strategy_fields(self, strategy): - strategy_fields = ['uuid', 'name', 'display_name', 'goal_uuid'] - for field in strategy_fields: - self.assertIn(field, strategy) - - def test_one(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies') - self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) - self._assert_strategy_fields(response['strategies'][0]) - - def test_get_one_by_uuid(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies/%s' % strategy.uuid) - self.assertEqual(strategy.uuid, response["uuid"]) - self.assertEqual(strategy.name, response["name"]) - self._assert_strategy_fields(response) - - def test_get_one_by_name(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json(urlparse.quote( - '/strategies/%s' % strategy['name'])) - self.assertEqual(strategy.uuid, response['uuid']) - self._assert_strategy_fields(response) - - def test_get_one_soft_deleted(self): - strategy = obj_utils.create_test_strategy(self.context) - strategy.soft_delete() - response = self.get_json( - '/strategies/%s' % strategy['uuid'], - headers={'X-Show-Deleted': 'True'}) - self.assertEqual(strategy.uuid, response['uuid']) - self._assert_strategy_fields(response) - - response = self.get_json( - '/strategies/%s' % strategy['uuid'], - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_detail(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies/detail') - self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) - self._assert_strategy_fields(response['strategies'][0]) - for strategy in response['strategies']: - self.assertTrue( - all(val is not None for key, val in strategy.items() - if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) - - def test_detail_against_single(self): - strategy = obj_utils.create_test_strategy(self.context) - response = self.get_json('/strategies/%s/detail' % strategy.uuid, - expect_errors=True) - self.assertEqual(404, response.status_int) - - def test_many(self): - strategy_list = [] - for idx in range(1, 6): - strategy = obj_utils.create_test_strategy( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(idx)) - strategy_list.append(strategy.uuid) - response = self.get_json('/strategies') - self.assertEqual(5, len(response['strategies'])) - for strategy in response['strategies']: - self.assertTrue( - all(val is not None for key, val in strategy.items() - if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) - - def test_many_without_soft_deleted(self): - strategy_list = [] - for id_ in [1, 2, 3]: - strategy = obj_utils.create_test_strategy( - self.context, id=id_, uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(id_)) - strategy_list.append(strategy.uuid) - for id_ in [4, 5]: - strategy = obj_utils.create_test_strategy( - self.context, id=id_, uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(id_)) - strategy.soft_delete() - response = self.get_json('/strategies') - self.assertEqual(3, len(response['strategies'])) - uuids = [s['uuid'] for s in response['strategies']] - self.assertEqual(sorted(strategy_list), sorted(uuids)) - - def test_strategies_collection_links(self): - for idx in range(1, 6): - obj_utils.create_test_strategy( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(idx)) - response = self.get_json('/strategies/?limit=2') - self.assertEqual(2, len(response['strategies'])) - - def test_strategies_collection_links_default_limit(self): - for idx in range(1, 6): - obj_utils.create_test_strategy( - self.context, id=idx, - uuid=utils.generate_uuid(), - name='STRATEGY_{0}'.format(idx)) - cfg.CONF.set_override('max_limit', 3, 'api') - response = self.get_json('/strategies') - self.assertEqual(3, len(response['strategies'])) - - def test_filter_by_goal_uuid(self): - goal1 = obj_utils.create_test_goal( - self.context, - id=2, - uuid=utils.generate_uuid(), - name='My_Goal 1') - goal2 = obj_utils.create_test_goal( - self.context, - id=3, - uuid=utils.generate_uuid(), - name='My Goal 2') - - for id_ in range(1, 3): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal1['id']) - for id_ in range(3, 5): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal2['id']) - - response = self.get_json('/strategies/?goal=%s' % goal1['uuid']) - - strategies = response['strategies'] - self.assertEqual(2, len(strategies)) - for strategy in strategies: - self.assertEqual(goal1['uuid'], strategy['goal_uuid']) - - def test_filter_by_goal_name(self): - goal1 = obj_utils.create_test_goal( - self.context, - id=2, - uuid=utils.generate_uuid(), - name='My_Goal 1') - goal2 = obj_utils.create_test_goal( - self.context, - id=3, - uuid=utils.generate_uuid(), - name='My Goal 2') - - for id_ in range(1, 3): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal1['id']) - for id_ in range(3, 5): - obj_utils.create_test_strategy( - self.context, id=id_, - uuid=utils.generate_uuid(), - name='Goal %s' % id_, - goal_id=goal2['id']) - - response = self.get_json('/strategies/?goal=%s' % goal1['name']) - - strategies = response['strategies'] - self.assertEqual(2, len(strategies)) - for strategy in strategies: - self.assertEqual(goal1['uuid'], strategy['goal_uuid']) - - -class TestStrategyPolicyEnforcement(api_base.FunctionalTest): - - def setUp(self): - super(TestStrategyPolicyEnforcement, self).setUp() - self.fake_goal = obj_utils.create_test_goal( - self.context, uuid=utils.generate_uuid()) - - def _common_policy_check(self, rule, func, *arg, **kwarg): - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - rule: "rule:defaut"}) - response = func(*arg, **kwarg) - self.assertEqual(403, response.status_int) - self.assertEqual('application/json', response.content_type) - self.assertTrue( - "Policy doesn't allow %s to be performed." % rule, - jsonutils.loads(response.json['error_message'])['faultstring']) - - def test_policy_disallow_get_all(self): - self._common_policy_check( - "strategy:get_all", self.get_json, '/strategies', - expect_errors=True) - - def test_policy_disallow_get_one(self): - strategy = obj_utils.create_test_strategy(self.context) - self._common_policy_check( - "strategy:get", self.get_json, - '/strategies/%s' % strategy.uuid, - expect_errors=True) - - def test_policy_disallow_detail(self): - self._common_policy_check( - "strategy:detail", self.get_json, - '/strategies/detail', - expect_errors=True) - - -class TestStrategyEnforcementWithAdminContext( - TestListStrategy, api_base.AdminRoleTest): - - def setUp(self): - super(TestStrategyEnforcementWithAdminContext, self).setUp() - self.policy.set_rules({ - "admin_api": "(role:admin or role:administrator)", - "default": "rule:admin_api", - "strategy:detail": "rule:default", - "strategy:get": "rule:default", - "strategy:get_all": "rule:default"}) diff --git a/watcher/tests/api/v1/test_types.py b/watcher/tests/api/v1/test_types.py deleted file mode 100644 index 2a6a34e..0000000 --- a/watcher/tests/api/v1/test_types.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import webtest -import wsme -from wsme import types as wtypes - -from watcher.api.controllers.v1 import types -from watcher.common import exception -from watcher.common import utils -from watcher.tests import base - - -class TestUuidType(base.TestCase): - - def test_valid_uuid(self): - test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' - self.assertEqual(test_uuid, types.UuidType.validate(test_uuid)) - - def test_invalid_uuid(self): - self.assertRaises(exception.InvalidUUID, - types.UuidType.validate, 'invalid-uuid') - - -class TestNameType(base.TestCase): - - def test_valid_name(self): - test_name = 'hal-9000' - self.assertEqual(test_name, types.NameType.validate(test_name)) - - def test_invalid_name(self): - self.assertRaises(exception.InvalidName, - types.NameType.validate, '-this is not valid-') - - -class TestUuidOrNameType(base.TestCase): - - @mock.patch.object(utils, 'is_uuid_like') - @mock.patch.object(utils, 'is_hostname_safe') - def test_valid_uuid(self, host_mock, uuid_mock): - test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' - host_mock.return_value = False - uuid_mock.return_value = True - self.assertTrue(types.UuidOrNameType.validate(test_uuid)) - uuid_mock.assert_called_once_with(test_uuid) - - @mock.patch.object(utils, 'is_uuid_like') - @mock.patch.object(utils, 'is_hostname_safe') - def test_valid_name(self, host_mock, uuid_mock): - test_name = 'dc16-database5' - uuid_mock.return_value = False - host_mock.return_value = True - self.assertTrue(types.UuidOrNameType.validate(test_name)) - host_mock.assert_called_once_with(test_name) - - def test_invalid_uuid_or_name(self): - self.assertRaises(exception.InvalidUuidOrName, - types.UuidOrNameType.validate, 'inval#uuid%or*name') - - -class MyPatchType(types.JsonPatchType): - """Helper class for TestJsonPatchType tests.""" - - @staticmethod - def mandatory_attrs(): - return ['/mandatory'] - - @staticmethod - def internal_attrs(): - return ['/internal'] - - -class MyRoot(wsme.WSRoot): - """Helper class for TestJsonPatchType tests.""" - - @wsme.expose([wsme.types.text], body=[MyPatchType]) - @wsme.validate([MyPatchType]) - def test(self, patch): - return patch - - -class TestJsonPatchType(base.TestCase): - - def setUp(self): - super(TestJsonPatchType, self).setUp() - self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) - - def _patch_json(self, params, expect_errors=False): - return self.app.patch_json( - '/test', - params=params, - headers={'Accept': 'application/json'}, - expect_errors=expect_errors - ) - - def test_valid_patches(self): - valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, - {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, - {'path': '/str', 'op': 'replace', 'value': 'bar'}, - {'path': '/bool', 'op': 'add', 'value': True}, - {'path': '/int', 'op': 'add', 'value': 1}, - {'path': '/float', 'op': 'add', 'value': 0.123}, - {'path': '/list', 'op': 'add', 'value': [1, 2]}, - {'path': '/none', 'op': 'add', 'value': None}, - {'path': '/empty_dict', 'op': 'add', 'value': {}}, - {'path': '/empty_list', 'op': 'add', 'value': []}, - {'path': '/dict', 'op': 'add', - 'value': {'cat': 'meow'}}] - ret = self._patch_json(valid_patches, False) - self.assertEqual(200, ret.status_int) - self.assertEqual(valid_patches, ret.json) - - def test_cannot_update_internal_attr(self): - patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_cannot_update_internal_dict_attr(self): - patch = [{'path': '/internal', 'op': 'replace', - 'value': 'foo'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_mandatory_attr(self): - patch = [{'op': 'replace', 'path': '/mandatory', 'value': 'foo'}] - ret = self._patch_json(patch, False) - self.assertEqual(200, ret.status_int) - self.assertEqual(patch, ret.json) - - def test_cannot_remove_mandatory_attr(self): - patch = [{'op': 'remove', 'path': '/mandatory'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_missing_required_fields_path(self): - missing_path = [{'op': 'remove'}] - ret = self._patch_json(missing_path, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_missing_required_fields_op(self): - missing_op = [{'path': '/foo'}] - ret = self._patch_json(missing_op, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_invalid_op(self): - patch = [{'path': '/foo', 'op': 'invalid'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_invalid_path(self): - patch = [{'path': 'invalid-path', 'op': 'remove'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_cannot_add_with_no_value(self): - patch = [{'path': '/extra/foo', 'op': 'add'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - def test_cannot_replace_with_no_value(self): - patch = [{'path': '/foo', 'op': 'replace'}] - ret = self._patch_json(patch, True) - self.assertEqual(400, ret.status_int) - self.assertTrue(ret.json['faultstring']) - - -class TestBooleanType(base.TestCase): - - def test_valid_true_values(self): - v = types.BooleanType() - self.assertTrue(v.validate("true")) - self.assertTrue(v.validate("TRUE")) - self.assertTrue(v.validate("True")) - self.assertTrue(v.validate("t")) - self.assertTrue(v.validate("1")) - self.assertTrue(v.validate("y")) - self.assertTrue(v.validate("yes")) - self.assertTrue(v.validate("on")) - - def test_valid_false_values(self): - v = types.BooleanType() - self.assertFalse(v.validate("false")) - self.assertFalse(v.validate("FALSE")) - self.assertFalse(v.validate("False")) - self.assertFalse(v.validate("f")) - self.assertFalse(v.validate("0")) - self.assertFalse(v.validate("n")) - self.assertFalse(v.validate("no")) - self.assertFalse(v.validate("off")) - - def test_invalid_value(self): - v = types.BooleanType() - self.assertRaises(exception.Invalid, v.validate, "invalid-value") - self.assertRaises(exception.Invalid, v.validate, "01") - - -class TestJsonType(base.TestCase): - - def test_valid_values(self): - vt = types.jsontype - value = vt.validate("hello") - self.assertEqual("hello", value) - value = vt.validate(10) - self.assertEqual(10, value) - value = vt.validate(0.123) - self.assertEqual(0.123, value) - value = vt.validate(True) - self.assertTrue(value) - value = vt.validate([1, 2, 3]) - self.assertEqual([1, 2, 3], value) - value = vt.validate({'foo': 'bar'}) - self.assertEqual({'foo': 'bar'}, value) - value = vt.validate(None) - self.assertIsNone(value) - - def test_invalid_values(self): - vt = types.jsontype - self.assertRaises(exception.Invalid, vt.validate, object()) - - def test_apimultitype_tostring(self): - vts = str(types.jsontype) - self.assertIn(str(wtypes.text), vts) - self.assertIn(str(int), vts) - self.assertIn(str(float), vts) - self.assertIn(str(types.BooleanType), vts) - self.assertIn(str(list), vts) - self.assertIn(str(dict), vts) - self.assertIn(str(None), vts) diff --git a/watcher/tests/api/v1/test_utils.py b/watcher/tests/api/v1/test_utils.py deleted file mode 100644 index e5541dd..0000000 --- a/watcher/tests/api/v1/test_utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import wsme - -from oslo_config import cfg - -from watcher.api.controllers.v1 import utils -from watcher.tests import base - -CONF = cfg.CONF - - -class TestApiUtils(base.TestCase): - - def test_validate_limit(self): - limit = utils.validate_limit(10) - self.assertEqual(10, 10) - - # max limit - limit = utils.validate_limit(999999999) - self.assertEqual(CONF.api.max_limit, limit) - - # negative - self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) - - # zero - self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) - - def test_validate_sort_dir(self): - # if sort_dir is valid, nothing should happen - try: - utils.validate_sort_dir('asc') - except Exception as exc: - self.fail(exc) - - # invalid sort_dir parameter - self.assertRaises(wsme.exc.ClientSideError, - utils.validate_sort_dir, - 'fake-sort') - - def test_validate_search_filters(self): - allowed_fields = ["allowed", "authorized"] - - test_filters = {"allowed": 1, "authorized": 2} - try: - utils.validate_search_filters(test_filters, allowed_fields) - except Exception as exc: - self.fail(exc) - - def test_validate_search_filters_with_invalid_key(self): - allowed_fields = ["allowed", "authorized"] - - test_filters = {"allowed": 1, "unauthorized": 2} - - self.assertRaises( - wsme.exc.ClientSideError, utils.validate_search_filters, - test_filters, allowed_fields) diff --git a/watcher/tests/applier/__init__.py b/watcher/tests/applier/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/action_plan/__init__.py b/watcher/tests/applier/action_plan/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/action_plan/test_default_action_handler.py b/watcher/tests/applier/action_plan/test_default_action_handler.py deleted file mode 100755 index 7aadee9..0000000 --- a/watcher/tests/applier/action_plan/test_default_action_handler.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.applier.action_plan import default -from watcher.applier import default as ap_applier -from watcher.common import exception -from watcher import notifications -from watcher import objects -from watcher.objects import action_plan as ap_objects -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestDefaultActionPlanHandler(base.DbTestCase): - - class FakeApplierException(Exception): - pass - - def setUp(self): - super(TestDefaultActionPlanHandler, self).setUp() - - p_action_plan_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_plan_notifications = p_action_plan_notifications.start() - self.addCleanup(p_action_plan_notifications.stop) - - obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit( - self.context, strategy_id=self.strategy.id) - self.action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit.id, - strategy_id=self.strategy.id) - self.action = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan.id, - action_type='nop', - input_parameters={'message': 'hello World'}) - - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_launch_action_plan(self, m_get_action_plan): - m_get_action_plan.return_value = self.action_plan - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - - expected_calls = [ - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual(ap_objects.State.SUCCEEDED, self.action_plan.state) - - self.assertEqual( - expected_calls, - self.m_action_plan_notifications - .send_action_notification - .call_args_list) - - @mock.patch.object(ap_applier.DefaultApplier, "execute") - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_launch_action_plan_with_error(self, m_get_action_plan, m_execute): - m_get_action_plan.return_value = self.action_plan - m_execute.side_effect = self.FakeApplierException - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - - expected_calls = [ - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.action_plan, - action=objects.fields.NotificationAction.EXECUTION, - priority=objects.fields.NotificationPriority.ERROR, - phase=objects.fields.NotificationPhase.ERROR)] - - self.assertEqual(ap_objects.State.FAILED, self.action_plan.state) - - self.assertEqual( - expected_calls, - self.m_action_plan_notifications - .send_action_notification - .call_args_list) - - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_cancel_action_plan(self, m_get_action_plan): - m_get_action_plan.return_value = self.action_plan - self.action_plan.state = ap_objects.State.CANCELLED - self.action_plan.save() - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - action = self.action.get_by_uuid(self.context, self.action.uuid) - self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) - self.assertEqual(objects.action.State.CANCELLED, action.state) - - @mock.patch.object(ap_applier.DefaultApplier, "execute") - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_cancel_action_plan_with_exception(self, m_get_action_plan, - m_execute): - m_get_action_plan.return_value = self.action_plan - m_execute.side_effect = exception.ActionPlanCancelled( - self.action_plan.uuid) - command = default.DefaultActionPlanHandler( - self.context, mock.MagicMock(), self.action_plan.uuid) - command.execute() - self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) diff --git a/watcher/tests/applier/actions/__init__.py b/watcher/tests/applier/actions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/actions/loading/__init__.py b/watcher/tests/applier/actions/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/actions/loading/test_default_actions_loader.py b/watcher/tests/applier/actions/loading/test_default_actions_loader.py deleted file mode 100644 index 5a36391..0000000 --- a/watcher/tests/applier/actions/loading/test_default_actions_loader.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import unicode_literals - -from watcher.applier.actions import base as abase -from watcher.applier.loading import default -from watcher.tests import base - - -class TestDefaultActionLoader(base.TestCase): - def setUp(self): - super(TestDefaultActionLoader, self).setUp() - self.loader = default.DefaultActionLoader() - - def test_endpoints(self): - for endpoint in self.loader.list_available(): - loaded = self.loader.load(endpoint) - self.assertIsNotNone(loaded) - self.assertIsInstance(loaded, abase.BaseAction) diff --git a/watcher/tests/applier/actions/test_change_node_power_state.py b/watcher/tests/applier/actions/test_change_node_power_state.py deleted file mode 100644 index ca60995..0000000 --- a/watcher/tests/applier/actions/test_change_node_power_state.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) 2017 ZTE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import change_node_power_state -from watcher.common import clients -from watcher.tests import base - -COMPUTE_NODE = "compute-1" - - -@mock.patch.object(clients.OpenStackClients, 'nova') -@mock.patch.object(clients.OpenStackClients, 'ironic') -class TestChangeNodePowerState(base.TestCase): - - def setUp(self): - super(TestChangeNodePowerState, self).setUp() - - self.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - "state": change_node_power_state.NodeState.POWERON.value, - } - self.action = change_node_power_state.ChangeNodePowerState( - mock.Mock()) - self.action.input_parameters = self.input_parameters - - def test_parameters_down(self, mock_ironic, mock_nova): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - self.action.STATE: - change_node_power_state.NodeState.POWEROFF.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_up(self, mock_ironic, mock_nova): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - self.action.STATE: - change_node_power_state.NodeState.POWERON.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_exception_wrong_state(self, mock_ironic, mock_nova): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, - self.action.STATE: 'error'} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_resource_id_empty(self, mock_ironic, mock_nova): - self.action.input_parameters = { - self.action.STATE: - change_node_power_state.NodeState.POWERON.value, - } - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_applies_add_extra(self, mock_ironic, mock_nova): - self.action.input_parameters = {"extra": "failed"} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_change_service_state_pre_condition(self, mock_ironic, mock_nova): - try: - self.action.pre_condition() - except Exception as exc: - self.fail(exc) - - def test_change_node_state_post_condition(self, mock_ironic, mock_nova): - try: - self.action.post_condition() - except Exception as exc: - self.fail(exc) - - def test_execute_node_service_state_with_poweron_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - self.action.execute() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWERON.value) - - def test_execute_change_node_state_with_poweroff_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - mock_nvclient = mock_nova.return_value - mock_get = mock.MagicMock() - mock_get.to_dict.return_value = {'running_vms': 0} - mock_nvclient.hypervisors.get.return_value = mock_get - self.action.input_parameters["state"] = ( - change_node_power_state.NodeState.POWEROFF.value) - self.action.execute() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWEROFF.value) - - def test_revert_change_node_state_with_poweron_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - mock_nvclient = mock_nova.return_value - mock_get = mock.MagicMock() - mock_get.to_dict.return_value = {'running_vms': 0} - mock_nvclient.hypervisors.get.return_value = mock_get - self.action.input_parameters["state"] = ( - change_node_power_state.NodeState.POWERON.value) - self.action.revert() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWEROFF.value) - - def test_revert_change_node_state_with_poweroff_target( - self, mock_ironic, mock_nova): - mock_irclient = mock_ironic.return_value - self.action.input_parameters["state"] = ( - change_node_power_state.NodeState.POWEROFF.value) - self.action.revert() - - mock_irclient.node.set_power_state.assert_called_once_with( - COMPUTE_NODE, change_node_power_state.NodeState.POWERON.value) diff --git a/watcher/tests/applier/actions/test_change_nova_service_state.py b/watcher/tests/applier/actions/test_change_nova_service_state.py deleted file mode 100644 index e2f016c..0000000 --- a/watcher/tests/applier/actions/test_change_nova_service_state.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import change_nova_service_state -from watcher.common import clients -from watcher.common import nova_helper -from watcher.decision_engine.model import element -from watcher.tests import base - - -class TestChangeNovaServiceState(base.TestCase): - - def setUp(self): - super(TestChangeNovaServiceState, self).setUp() - - self.m_osc_cls = mock.Mock() - self.m_helper_cls = mock.Mock() - self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.m_helper_cls.return_value = self.m_helper - self.m_osc = mock.Mock(spec=clients.OpenStackClients) - self.m_osc_cls.return_value = self.m_osc - - m_openstack_clients = mock.patch.object( - clients, "OpenStackClients", self.m_osc_cls) - m_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.m_helper_cls) - - m_openstack_clients.start() - m_nova_helper.start() - - self.addCleanup(m_openstack_clients.stop) - self.addCleanup(m_nova_helper.stop) - - self.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - "state": element.ServiceState.ENABLED.value, - } - self.action = change_nova_service_state.ChangeNovaServiceState( - mock.Mock()) - self.action.input_parameters = self.input_parameters - - def test_parameters_down(self): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - self.action.STATE: element.ServiceState.DISABLED.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_up(self): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - self.action.STATE: element.ServiceState.ENABLED.value} - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_exception_wrong_state(self): - self.action.input_parameters = { - baction.BaseAction.RESOURCE_ID: "compute-1", - self.action.STATE: 'error'} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_resource_id_empty(self): - self.action.input_parameters = { - self.action.STATE: element.ServiceState.ENABLED.value, - } - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_applies_add_extra(self): - self.action.input_parameters = {"extra": "failed"} - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_change_service_state_pre_condition(self): - try: - self.action.pre_condition() - except Exception as exc: - self.fail(exc) - - def test_change_service_state_post_condition(self): - try: - self.action.post_condition() - except Exception as exc: - self.fail(exc) - - def test_execute_change_service_state_with_enable_target(self): - self.action.execute() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.enable_service_nova_compute.assert_called_once_with( - "compute-1") - - def test_execute_change_service_state_with_disable_target(self): - self.action.input_parameters["state"] = ( - element.ServiceState.DISABLED.value) - self.action.execute() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.disable_service_nova_compute.assert_called_once_with( - "compute-1") - - def test_revert_change_service_state_with_enable_target(self): - self.action.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.disable_service_nova_compute.assert_called_once_with( - "compute-1") - - def test_revert_change_service_state_with_disable_target(self): - self.action.input_parameters["state"] = ( - element.ServiceState.DISABLED.value) - self.action.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.enable_service_nova_compute.assert_called_once_with( - "compute-1") diff --git a/watcher/tests/applier/actions/test_migration.py b/watcher/tests/applier/actions/test_migration.py deleted file mode 100644 index 7d85a00..0000000 --- a/watcher/tests/applier/actions/test_migration.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - - -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import migration -from watcher.common import clients -from watcher.common import exception -from watcher.common import nova_helper -from watcher.tests import base - - -class TestMigration(base.TestCase): - - INSTANCE_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba" - - def setUp(self): - super(TestMigration, self).setUp() - - self.m_osc_cls = mock.Mock() - self.m_helper_cls = mock.Mock() - self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.m_helper_cls.return_value = self.m_helper - self.m_osc = mock.Mock(spec=clients.OpenStackClients) - self.m_osc_cls.return_value = self.m_osc - - m_openstack_clients = mock.patch.object( - clients, "OpenStackClients", self.m_osc_cls) - m_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.m_helper_cls) - - m_openstack_clients.start() - m_nova_helper.start() - - self.addCleanup(m_openstack_clients.stop) - self.addCleanup(m_nova_helper.stop) - - self.input_parameters = { - "migration_type": "live", - "source_node": "compute1-hostname", - "destination_node": "compute2-hostname", - baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, - } - self.action = migration.Migrate(mock.Mock()) - self.action.input_parameters = self.input_parameters - - self.input_parameters_cold = { - "migration_type": "cold", - "source_node": "compute1-hostname", - "destination_node": "compute2-hostname", - baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, - } - self.action_cold = migration.Migrate(mock.Mock()) - self.action_cold.input_parameters = self.input_parameters_cold - - def test_parameters(self): - params = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.MIGRATION_TYPE: 'live', - self.action.DESTINATION_NODE: 'compute-2', - self.action.SOURCE_NODE: 'compute-3'} - self.action.input_parameters = params - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_cold(self): - params = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.MIGRATION_TYPE: 'cold', - self.action.DESTINATION_NODE: 'compute-2', - self.action.SOURCE_NODE: 'compute-3'} - self.action_cold.input_parameters = params - self.assertTrue(self.action_cold.validate_parameters()) - - def test_parameters_exception_empty_fields(self): - parameters = {baction.BaseAction.RESOURCE_ID: None, - 'migration_type': None, - 'source_node': None, - 'destination_node': None} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_migration_type(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - 'migration_type': 'unknown', - 'source_node': 'compute-2', - 'destination_node': 'compute-3'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_source_node(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - 'migration_type': 'live', - 'source_node': None, - 'destination_node': 'compute-3'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_destination_node_none(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - 'migration_type': 'live', - 'source_node': 'compute-1', - 'destination_node': None} - self.action.input_parameters = parameters - self.assertTrue(self.action.validate_parameters) - - def test_parameters_exception_resource_id(self): - parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", - 'migration_type': 'live', - 'source_node': 'compute-2', - 'destination_node': 'compute-3'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_migration_pre_condition(self): - try: - self.action.pre_condition() - except Exception as exc: - self.fail(exc) - - def test_migration_post_condition(self): - try: - self.action.post_condition() - except Exception as exc: - self.fail(exc) - - def test_execute_live_migration_invalid_instance(self): - self.m_helper.find_instance.return_value = None - exc = self.assertRaises( - exception.InstanceNotFound, self.action.execute) - self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) - self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) - - def test_execute_cold_migration_invalid_instance(self): - self.m_helper.find_instance.return_value = None - exc = self.assertRaises( - exception.InstanceNotFound, self.action_cold.execute) - self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) - self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) - - def test_execute_live_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - try: - self.action.execute() - except Exception as exc: - self.fail(exc) - - self.m_helper.live_migrate_instance.assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname") - - def test_execute_cold_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - try: - self.action_cold.execute() - except Exception as exc: - self.fail(exc) - - self.m_helper.watcher_non_live_migrate_instance.\ - assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname" - ) - - def test_revert_live_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - self.action.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.live_migrate_instance.assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute1-hostname" - ) - - def test_revert_cold_migration(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - self.action_cold.revert() - - self.m_helper_cls.assert_called_once_with(osc=self.m_osc) - self.m_helper.watcher_non_live_migrate_instance.\ - assert_called_once_with( - instance_id=self.INSTANCE_UUID, - dest_hostname="compute1-hostname" - ) - - def test_live_migrate_non_shared_storage_instance(self): - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - - self.m_helper.live_migrate_instance.side_effect = [ - nova_helper.nvexceptions.ClientException(400, "BadRequest"), True] - - try: - self.action.execute() - except Exception as exc: - self.fail(exc) - - self.m_helper.live_migrate_instance.assert_has_calls([ - mock.call(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname"), - mock.call(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname", - block_migration=True) - ]) - - expected = [mock.call.first(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname"), - mock.call.second(instance_id=self.INSTANCE_UUID, - dest_hostname="compute2-hostname", - block_migration=True) - ] - self.m_helper.live_migrate_instance.mock_calls == expected - self.assertEqual(2, self.m_helper.live_migrate_instance.call_count) - - def test_abort_live_migrate(self): - migration = mock.MagicMock() - migration.id = "2" - migrations = [migration] - self.m_helper.get_running_migration.return_value = migrations - self.m_helper.find_instance.return_value = self.INSTANCE_UUID - try: - self.action.abort() - except Exception as exc: - self.fail(exc) - - self.m_helper.abort_live_migrate.assert_called_once_with( - instance_id=self.INSTANCE_UUID, source="compute1-hostname", - destination="compute2-hostname") diff --git a/watcher/tests/applier/actions/test_resize.py b/watcher/tests/applier/actions/test_resize.py deleted file mode 100644 index 0cdfc0e..0000000 --- a/watcher/tests/applier/actions/test_resize.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals -import jsonschema -import mock - -from watcher.applier.actions import base as baction -from watcher.applier.actions import resize -from watcher.common import clients -from watcher.common import nova_helper -from watcher.tests import base - - -class TestResize(base.TestCase): - - INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" - - def setUp(self): - super(TestResize, self).setUp() - - self.r_osc_cls = mock.Mock() - self.r_helper_cls = mock.Mock() - self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.r_helper_cls.return_value = self.r_helper - self.r_osc = mock.Mock(spec=clients.OpenStackClients) - self.r_osc_cls.return_value = self.r_osc - - r_openstack_clients = mock.patch.object( - clients, "OpenStackClients", self.r_osc_cls) - r_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.r_helper_cls) - - r_openstack_clients.start() - r_nova_helper.start() - - self.addCleanup(r_openstack_clients.stop) - self.addCleanup(r_nova_helper.stop) - - self.input_parameters = { - "flavor": "x1", - baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, - } - self.action = resize.Resize(mock.Mock()) - self.action.input_parameters = self.input_parameters - - def test_parameters(self): - params = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.FLAVOR: 'x1'} - self.action.input_parameters = params - self.assertTrue(self.action.validate_parameters()) - - def test_parameters_exception_empty_fields(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.FLAVOR: None} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_flavor(self): - parameters = {baction.BaseAction.RESOURCE_ID: - self.INSTANCE_UUID, - self.action.FLAVOR: None} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_parameters_exception_resource_id(self): - parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", - self.action.FLAVOR: 'x1'} - self.action.input_parameters = parameters - self.assertRaises(jsonschema.ValidationError, - self.action.validate_parameters) - - def test_execute_resize(self): - self.r_helper.find_instance.return_value = self.INSTANCE_UUID - self.action.execute() - self.r_helper.resize_instance.assert_called_once_with( - instance_id=self.INSTANCE_UUID, flavor='x1') diff --git a/watcher/tests/applier/actions/test_sleep.py b/watcher/tests/applier/actions/test_sleep.py deleted file mode 100644 index 0b83c8f..0000000 --- a/watcher/tests/applier/actions/test_sleep.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2016 b<>com -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import jsonschema -import mock - -from watcher.applier.actions import sleep -from watcher.tests import base - - -class TestSleep(base.TestCase): - def setUp(self): - super(TestSleep, self).setUp() - self.s = sleep.Sleep(mock.Mock()) - - def test_parameters_duration(self): - self.s.input_parameters = {self.s.DURATION: 1.0} - self.assertTrue(self.s.validate_parameters()) - - def test_parameters_duration_empty(self): - self.s.input_parameters = {self.s.DURATION: None} - self.assertRaises(jsonschema.ValidationError, - self.s.validate_parameters) - - def test_parameters_wrong_parameter(self): - self.s.input_parameters = {self.s.DURATION: "ef"} - self.assertRaises(jsonschema.ValidationError, - self.s.validate_parameters) - - def test_parameters_add_field(self): - self.s.input_parameters = {self.s.DURATION: 1.0, "not_required": "nop"} - self.assertRaises(jsonschema.ValidationError, - self.s.validate_parameters) diff --git a/watcher/tests/applier/messaging/__init__.py b/watcher/tests/applier/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py b/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py deleted file mode 100644 index cb6bf90..0000000 --- a/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import mock - -from watcher.applier.messaging import trigger -from watcher.common import utils -from watcher.tests import base - - -class TestTriggerActionPlan(base.TestCase): - def __init__(self, *args, **kwds): - super(TestTriggerActionPlan, self).__init__(*args, **kwds) - self.applier = mock.MagicMock() - self.endpoint = trigger.TriggerActionPlan(self.applier) - - def setUp(self): - super(TestTriggerActionPlan, self).setUp() - - def test_launch_action_plan(self): - action_plan_uuid = utils.generate_uuid() - expected_uuid = self.endpoint.launch_action_plan(self.context, - action_plan_uuid) - self.assertEqual(expected_uuid, action_plan_uuid) diff --git a/watcher/tests/applier/test_applier_manager.py b/watcher/tests/applier/test_applier_manager.py deleted file mode 100644 index bfa6750..0000000 --- a/watcher/tests/applier/test_applier_manager.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock - -import oslo_messaging as om -from watcher.applier import manager as applier_manager -from watcher.common import service -from watcher.tests import base - - -class TestApplierManager(base.TestCase): - def setUp(self): - super(TestApplierManager, self).setUp() - p_heartbeat = mock.patch.object( - service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - self.applier = service.Service(applier_manager.ApplierManager) - - @mock.patch.object(om.rpc.server.RPCServer, "stop") - @mock.patch.object(om.rpc.server.RPCServer, "start") - def test_start(self, m_messaging_start, m_messaging_stop): - self.applier.start() - self.applier.stop() - self.assertEqual(1, m_messaging_start.call_count) - self.assertEqual(1, m_messaging_stop.call_count) diff --git a/watcher/tests/applier/test_rpcapi.py b/watcher/tests/applier/test_rpcapi.py deleted file mode 100644 index 80e221c..0000000 --- a/watcher/tests/applier/test_rpcapi.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock -import oslo_messaging as om -from watcher.applier import rpcapi - -from watcher.common import exception -from watcher.common import utils -from watcher.tests import base - - -class TestApplierAPI(base.TestCase): - - api = rpcapi.ApplierAPI() - - def setUp(self): - super(TestApplierAPI, self).setUp() - - def test_get_api_version(self): - with mock.patch.object(om.RPCClient, 'call') as mock_call: - expected_context = self.context - self.api.check_api_version(expected_context) - mock_call.assert_called_once_with( - expected_context, - 'check_api_version', - api_version=rpcapi.ApplierAPI().API_VERSION) - - def test_execute_audit_without_error(self): - with mock.patch.object(om.RPCClient, 'cast') as mock_cast: - action_plan_uuid = utils.generate_uuid() - self.api.launch_action_plan(self.context, action_plan_uuid) - mock_cast.assert_called_once_with( - self.context, - 'launch_action_plan', - action_plan_uuid=action_plan_uuid) - - def test_execute_action_plan_throw_exception(self): - action_plan_uuid = "uuid" - self.assertRaises(exception.InvalidUuidOrName, - self.api.launch_action_plan, - action_plan_uuid) diff --git a/watcher/tests/applier/workflow_engine/__init__.py b/watcher/tests/applier/workflow_engine/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/workflow_engine/loading/__init__.py b/watcher/tests/applier/workflow_engine/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py b/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py deleted file mode 100644 index 49d27ca..0000000 --- a/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import unicode_literals - -from watcher.applier.loading import default -from watcher.applier.workflow_engine import base as wbase -from watcher.tests import base - - -class TestDefaultActionLoader(base.TestCase): - def setUp(self): - super(TestDefaultActionLoader, self).setUp() - self.loader = default.DefaultWorkFlowEngineLoader() - - def test_endpoints(self): - for endpoint in self.loader.list_available(): - loaded = self.loader.load(endpoint) - self.assertIsNotNone(loaded) - self.assertIsInstance(loaded, wbase.BaseWorkFlowEngine) diff --git a/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py b/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py deleted file mode 100644 index fdf902c..0000000 --- a/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py +++ /dev/null @@ -1,354 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -import mock - -import six - -from watcher.applier.actions import base as abase -from watcher.applier.actions import factory -from watcher.applier.workflow_engine import default as tflow -from watcher.common import exception -from watcher.common import utils -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class ExpectedException(Exception): - pass - - -@six.add_metaclass(abc.ABCMeta) -class FakeAction(abase.BaseAction): - def schema(self): - pass - - def post_condition(self): - pass - - def pre_condition(self): - pass - - def revert(self): - pass - - def execute(self): - raise ExpectedException() - - def get_description(self): - return "fake action, just for test" - - -class TestDefaultWorkFlowEngine(base.DbTestCase): - def setUp(self): - super(TestDefaultWorkFlowEngine, self).setUp() - self.engine = tflow.DefaultWorkFlowEngine( - config=mock.Mock(), - context=self.context, - applier_manager=mock.MagicMock()) - self.engine.config.max_workers = 2 - - @mock.patch('taskflow.engines.load') - @mock.patch('taskflow.patterns.graph_flow.Flow.link') - def test_execute(self, graph_flow, engines): - actions = mock.MagicMock() - try: - self.engine.execute(actions) - self.assertTrue(engines.called) - except Exception as exc: - self.fail(exc) - - def create_action(self, action_type, parameters, parents=None, uuid=None, - state=None): - action = { - 'uuid': uuid or utils.generate_uuid(), - 'action_plan_id': 0, - 'action_type': action_type, - 'input_parameters': parameters, - 'state': objects.action.State.PENDING, - 'parents': parents or [], - - } - new_action = objects.Action(self.context, **action) - with mock.patch.object(notifications.action, 'send_create'): - new_action.create() - return new_action - - def check_action_state(self, action, expected_state): - to_check = objects.Action.get_by_uuid(self.context, action.uuid) - self.assertEqual(expected_state, to_check.state) - - def check_actions_state(self, actions, expected_state): - for a in actions: - self.check_action_state(a, expected_state) - - @mock.patch('taskflow.engines.load') - @mock.patch('taskflow.patterns.graph_flow.Flow.link') - def test_execute_with_no_actions(self, graph_flow, engines): - actions = [] - try: - self.engine.execute(actions) - self.assertFalse(graph_flow.called) - self.assertTrue(engines.called) - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_one_action(self, mock_send_update, - mock_execution_notification, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [self.create_action("nop", {'message': 'test'})] - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_nop_sleep(self, mock_send_update, - mock_execution_notification, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - first_nop = self.create_action("nop", {'message': 'test'}) - second_nop = self.create_action("nop", {'message': 'second test'}) - sleep = self.create_action("sleep", {'duration': 0.0}, - parents=[first_nop.uuid, second_nop.uuid]) - actions.extend([first_nop, second_nop, sleep]) - - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_parents(self, mock_send_update, - mock_execution_notification, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - first_nop = self.create_action( - "nop", {'message': 'test'}, - uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19') - second_nop = self.create_action( - "nop", {'message': 'second test'}, - uuid='0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23') - first_sleep = self.create_action( - "sleep", {'duration': 0.0}, parents=[first_nop.uuid, - second_nop.uuid], - uuid='be436531-0da3-4dad-a9c0-ea1d2aff6496') - second_sleep = self.create_action( - "sleep", {'duration': 0.0}, parents=[first_sleep.uuid], - uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c') - actions.extend([first_nop, second_nop, first_sleep, second_sleep]) - - expected_nodes = [ - {'uuid': 'bc7eee5c-4fbe-4def-9744-b539be55aa19', - 'input_parameters': {u'message': u'test'}, - 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], - 'action_type': u'nop', 'id': 1}, - {'uuid': '0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', - 'input_parameters': {u'message': u'second test'}, - 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], - 'action_type': u'nop', 'id': 2}, - {'uuid': 'be436531-0da3-4dad-a9c0-ea1d2aff6496', - 'input_parameters': {u'duration': 0.0}, - 'action_plan_id': 0, 'state': u'PENDING', - 'parents': [u'bc7eee5c-4fbe-4def-9744-b539be55aa19', - u'0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23'], - 'action_type': u'sleep', 'id': 3}, - {'uuid': '9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', - 'input_parameters': {u'duration': 0.0}, - 'action_plan_id': 0, 'state': u'PENDING', - 'parents': [u'be436531-0da3-4dad-a9c0-ea1d2aff6496'], - 'action_type': u'sleep', 'id': 4}] - - expected_edges = [ - ('action_type:nop uuid:0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', - 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), - ('action_type:nop uuid:bc7eee5c-4fbe-4def-9744-b539be55aa19', - 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), - ('action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496', - 'action_type:sleep uuid:9eb51e14-936d-4d12-a500-6ba0f5e0bb1c')] - - try: - flow = self.engine.execute(actions) - actual_nodes = sorted([x[0]._db_action.as_dict() - for x in flow.iter_nodes()], - key=lambda x: x['id']) - for expected, actual in zip(expected_nodes, actual_nodes): - for key in expected.keys(): - self.assertIn(expected[key], actual.values()) - actual_edges = [(u.name, v.name) - for (u, v, _) in flow.iter_links()] - - for edge in expected_edges: - self.assertIn(edge, actual_edges) - - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_two_actions(self, m_send_update, m_execution, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - second = self.create_action("sleep", {'duration': 0.0}) - first = self.create_action("nop", {'message': 'test'}) - - actions.append(first) - actions.append(second) - - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_three_actions(self, m_send_update, m_execution, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - third = self.create_action("nop", {'message': 'next'}) - second = self.create_action("sleep", {'duration': 0.0}) - first = self.create_action("nop", {'message': 'hello'}) - - self.check_action_state(first, objects.action.State.PENDING) - self.check_action_state(second, objects.action.State.PENDING) - self.check_action_state(third, objects.action.State.PENDING) - - actions.append(first) - actions.append(second) - actions.append(third) - - try: - self.engine.execute(actions) - self.check_actions_state(actions, objects.action.State.SUCCEEDED) - - except Exception as exc: - self.fail(exc) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - def test_execute_with_exception(self, m_send_update, m_execution, - m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [] - - third = self.create_action("no_exist", {'message': 'next'}) - second = self.create_action("sleep", {'duration': 0.0}) - first = self.create_action("nop", {'message': 'hello'}) - - self.check_action_state(first, objects.action.State.PENDING) - self.check_action_state(second, objects.action.State.PENDING) - self.check_action_state(third, objects.action.State.PENDING) - - actions.append(first) - actions.append(second) - actions.append(third) - - self.assertRaises(exception.WorkflowExecutionException, - self.engine.execute, actions) - - self.check_action_state(first, objects.action.State.SUCCEEDED) - self.check_action_state(second, objects.action.State.SUCCEEDED) - self.check_action_state(third, objects.action.State.FAILED) - - @mock.patch.object(objects.ActionPlan, "get_by_id") - @mock.patch.object(notifications.action, 'send_execution_notification') - @mock.patch.object(notifications.action, 'send_update') - @mock.patch.object(factory.ActionFactory, "make_action") - def test_execute_with_action_exception(self, m_make_action, m_send_update, - m_send_execution, m_get_actionplan): - m_get_actionplan.return_value = obj_utils.get_test_action_plan( - self.context, id=0) - actions = [self.create_action("fake_action", {})] - m_make_action.return_value = FakeAction(mock.Mock()) - - exc = self.assertRaises(exception.WorkflowExecutionException, - self.engine.execute, actions) - - self.assertIsInstance(exc.kwargs['error'], ExpectedException) - self.check_action_state(actions[0], objects.action.State.FAILED) - - @mock.patch.object(objects.ActionPlan, "get_by_uuid") - def test_execute_with_action_plan_cancel(self, m_get_actionplan): - obj_utils.create_test_goal(self.context) - strategy = obj_utils.create_test_strategy(self.context) - audit = obj_utils.create_test_audit( - self.context, strategy_id=strategy.id) - action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=audit.id, - strategy_id=strategy.id, - state=objects.action_plan.State.CANCELLING) - action1 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - action_type='nop', state=objects.action.State.SUCCEEDED, - input_parameters={'message': 'hello World'}) - action2 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - action_type='nop', state=objects.action.State.ONGOING, - uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', - input_parameters={'message': 'hello World'}) - action3 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - action_type='nop', state=objects.action.State.PENDING, - uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19', - input_parameters={'message': 'hello World'}) - m_get_actionplan.return_value = action_plan - actions = [] - actions.append(action1) - actions.append(action2) - actions.append(action3) - self.assertRaises(exception.ActionPlanCancelled, - self.engine.execute, actions) - try: - self.check_action_state(action1, objects.action.State.SUCCEEDED) - self.check_action_state(action2, objects.action.State.CANCELLED) - self.check_action_state(action3, objects.action.State.CANCELLED) - - except Exception as exc: - self.fail(exc) diff --git a/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py b/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py deleted file mode 100644 index c05d471..0000000 --- a/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import eventlet -import mock - -from watcher.applier.workflow_engine import default as tflow -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestTaskFlowActionContainer(base.DbTestCase): - def setUp(self): - super(TestTaskFlowActionContainer, self).setUp() - self.engine = tflow.DefaultWorkFlowEngine( - config=mock.Mock(), - context=self.context, - applier_manager=mock.MagicMock()) - obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy(self.context) - self.audit = obj_utils.create_test_audit( - self.context, strategy_id=self.strategy.id) - - def test_execute(self): - action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit.id, - strategy_id=self.strategy.id, - state=objects.action.State.ONGOING) - - action = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - state=objects.action.State.ONGOING, - action_type='nop', - input_parameters={'message': 'hello World'}) - action_container = tflow.TaskFlowActionContainer( - db_action=action, - engine=self.engine) - action_container.execute() - - self.assertTrue(action.state, objects.action.State.SUCCEEDED) - - @mock.patch('eventlet.spawn') - def test_execute_with_cancel_action_plan(self, mock_eventlet_spawn): - action_plan = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit.id, - strategy_id=self.strategy.id, - state=objects.action_plan.State.CANCELLING) - - action = obj_utils.create_test_action( - self.context, action_plan_id=action_plan.id, - state=objects.action.State.ONGOING, - action_type='nop', - input_parameters={'message': 'hello World'}) - action_container = tflow.TaskFlowActionContainer( - db_action=action, - engine=self.engine) - - def empty_test(): - pass - et = eventlet.spawn(empty_test) - mock_eventlet_spawn.return_value = et - action_container.execute() - et.kill.assert_called_with() diff --git a/watcher/tests/base.py b/watcher/tests/base.py deleted file mode 100644 index fc57081..0000000 --- a/watcher/tests/base.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os - -import mock -from oslo_config import cfg -from oslo_log import log -from oslo_messaging import conffixture -from oslotest import base -import pecan -from pecan import testing -import testscenarios - -from watcher.common import context as watcher_context -from watcher.common import service -from watcher.objects import base as objects_base -from watcher.tests import conf_fixture -from watcher.tests import policy_fixture - - -CONF = cfg.CONF -try: - log.register_options(CONF) -except cfg.ArgsAlreadyParsedError: - pass -CONF.set_override('use_stderr', False) - - -class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): - """Test base class.""" - - def setUp(self): - super(BaseTestCase, self).setUp() - self.addCleanup(cfg.CONF.reset) - - -class TestCase(BaseTestCase): - """Test case base class for all unit tests.""" - - def setUp(self): - super(TestCase, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - self.messaging_conf = self.useFixture(conffixture.ConfFixture(CONF)) - self.messaging_conf.transport_driver = 'fake' - - cfg.CONF.set_override("auth_type", "admin_token", - group='keystone_authtoken') - cfg.CONF.set_override("auth_uri", "http://127.0.0.1/identity", - group='keystone_authtoken') - - app_config_path = os.path.join(os.path.dirname(__file__), 'config.py') - self.app = testing.load_test_app(app_config_path) - self.token_info = { - 'token': { - 'project': { - 'id': 'fake_project' - }, - 'user': { - 'id': 'fake_user' - } - } - } - - objects_base.WatcherObject.indirection_api = None - - self.context = watcher_context.RequestContext( - auth_token_info=self.token_info, - project_id='fake_project', - user_id='fake_user') - - self.policy = self.useFixture(policy_fixture.PolicyFixture()) - - def make_context(*args, **kwargs): - # If context hasn't been constructed with token_info - if not kwargs.get('auth_token_info'): - kwargs['auth_token_info'] = copy.deepcopy(self.token_info) - if not kwargs.get('project_id'): - kwargs['project_id'] = 'fake_project' - if not kwargs.get('user_id'): - kwargs['user_id'] = 'fake_user' - - context = watcher_context.RequestContext(*args, **kwargs) - return watcher_context.RequestContext.from_dict(context.to_dict()) - - p = mock.patch.object(watcher_context, 'make_context', - side_effect=make_context) - self.mock_make_context = p.start() - self.addCleanup(p.stop) - - self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) - self._reset_singletons() - - self._base_test_obj_backup = copy.copy( - objects_base.WatcherObjectRegistry._registry._obj_classes) - self.addCleanup(self._restore_obj_registry) - self.addCleanup(self._reset_singletons) - - def _reset_singletons(self): - service.Singleton._instances.clear() - - def reset_pecan(): - pecan.set_config({}, overwrite=True) - - self.addCleanup(reset_pecan) - - def _restore_obj_registry(self): - objects_base.WatcherObjectRegistry._registry._obj_classes = ( - self._base_test_obj_backup) - - def config(self, **kw): - """Override config options for a test.""" - group = kw.pop('group', None) - for k, v in kw.items(): - CONF.set_override(k, v, group) - - def get_path(self, project_file=None): - """Get the absolute path to a file. Used for testing the API. - - :param project_file: File whose path to return. Default: None. - :returns: path to the specified file, or path to project root. - """ - root = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..')) - if project_file: - return os.path.join(root, project_file) - else: - return root diff --git a/watcher/tests/cmd/__init__.py b/watcher/tests/cmd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/cmd/test_api.py b/watcher/tests/cmd/test_api.py deleted file mode 100644 index bdeba3b..0000000 --- a/watcher/tests/cmd/test_api.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import types - -import mock -from oslo_config import cfg -from oslo_service import wsgi -from pecan.testing import load_test_app - -from watcher.api import config as api_config -from watcher.cmd import api -from watcher.common import service -from watcher.tests import base - - -class TestApi(base.BaseTestCase): - - def setUp(self): - super(TestApi, self).setUp() - - self.conf = cfg.CONF - self._parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parse(self, args=[]): - return cfg.ConfigOpts._parse_cli_opts(self, []) - - _fake_parse_method = types.MethodType(_fake_parse, self.conf) - self.conf._parse_cli_opts = _fake_parse_method - - def tearDown(self): - super(TestApi, self).tearDown() - self.conf._parse_cli_opts = self._parse_cli_opts - - @mock.patch.object(wsgi, "Server", mock.Mock()) - @mock.patch("watcher.api.app.pecan.make_app") - @mock.patch.object(service, "launch") - def test_run_api_app(self, m_launcher, m_make_app): - m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) - api.main() - self.assertEqual(1, m_launcher.call_count) - - @mock.patch.object(wsgi, "Server", mock.Mock()) - @mock.patch("watcher.api.app.pecan.make_app") - @mock.patch.object(service, "launch") - def test_run_api_app_serve_specific_address(self, m_launcher, m_make_app): - cfg.CONF.set_default("host", "localhost", group="api") - m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) - api.main() - self.assertEqual(1, m_launcher.call_count) diff --git a/watcher/tests/cmd/test_applier.py b/watcher/tests/cmd/test_applier.py deleted file mode 100644 index 25690eb..0000000 --- a/watcher/tests/cmd/test_applier.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import types - -import mock -from oslo_config import cfg -from oslo_service import service -from watcher.common import service as watcher_service - -from watcher.cmd import applier -from watcher.tests import base - - -class TestApplier(base.BaseTestCase): - def setUp(self): - super(TestApplier, self).setUp() - - self.conf = cfg.CONF - self._parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parse(self, args=[]): - return cfg.ConfigOpts._parse_cli_opts(self, []) - - _fake_parse_method = types.MethodType(_fake_parse, self.conf) - self.conf._parse_cli_opts = _fake_parse_method - p_heartbeat = mock.patch.object( - watcher_service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - - def tearDown(self): - super(TestApplier, self).tearDown() - self.conf._parse_cli_opts = self._parse_cli_opts - - @mock.patch.object(service, "launch") - def test_run_applier_app(self, m_launch): - applier.main() - self.assertEqual(1, m_launch.call_count) diff --git a/watcher/tests/cmd/test_db_manage.py b/watcher/tests/cmd/test_db_manage.py deleted file mode 100644 index f2e85ee..0000000 --- a/watcher/tests/cmd/test_db_manage.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -import mock -from oslo_config import cfg - -from watcher.cmd import dbmanage -from watcher.db import migration -from watcher.db import purge -from watcher.tests import base - - -class TestDBManageRunApp(base.TestCase): - - scenarios = ( - ("upgrade", {"command": "upgrade", "expected": "upgrade"}), - ("downgrade", {"command": "downgrade", "expected": "downgrade"}), - ("revision", {"command": "revision", "expected": "revision"}), - ("stamp", {"command": "stamp", "expected": "stamp"}), - ("version", {"command": "version", "expected": "version"}), - ("create_schema", {"command": "create_schema", - "expected": "create_schema"}), - ("purge", {"command": "purge", "expected": "purge"}), - ("no_param", {"command": None, "expected": "upgrade"}), - ) - - @mock.patch.object(dbmanage, "register_sub_command_opts", mock.Mock()) - @mock.patch("watcher.cmd.dbmanage.service.prepare_service") - @mock.patch("watcher.cmd.dbmanage.sys") - def test_run_db_manage_app(self, m_sys, m_prepare_service): - # Patch command function - m_func = mock.Mock() - cfg.CONF.register_opt(cfg.SubCommandOpt("command")) - cfg.CONF.command.func = m_func - - # Only append if the command is not None - m_sys.argv = list(filter(None, ["watcher-db-manage", self.command])) - - dbmanage.main() - self.assertEqual(1, m_func.call_count) - m_prepare_service.assert_called_once_with( - ["watcher-db-manage", self.expected], cfg.CONF) - - -class TestDBManageRunCommand(base.TestCase): - - @mock.patch.object(migration, "upgrade") - def test_run_db_upgrade(self, m_upgrade): - cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") - cfg.CONF.set_default("revision", "dummy", group="command") - dbmanage.DBCommand.upgrade() - - m_upgrade.assert_called_once_with("dummy") - - @mock.patch.object(migration, "downgrade") - def test_run_db_downgrade(self, m_downgrade): - cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") - cfg.CONF.set_default("revision", "dummy", group="command") - dbmanage.DBCommand.downgrade() - - m_downgrade.assert_called_once_with("dummy") - - @mock.patch.object(migration, "revision") - def test_run_db_revision(self, m_revision): - cfg.CONF.register_opt(cfg.StrOpt("message"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("autogenerate"), group="command") - cfg.CONF.set_default( - "message", "dummy_message", group="command" - ) - cfg.CONF.set_default( - "autogenerate", "dummy_autogenerate", group="command" - ) - dbmanage.DBCommand.revision() - - m_revision.assert_called_once_with( - "dummy_message", "dummy_autogenerate" - ) - - @mock.patch.object(migration, "stamp") - def test_run_db_stamp(self, m_stamp): - cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") - cfg.CONF.set_default("revision", "dummy", group="command") - dbmanage.DBCommand.stamp() - - @mock.patch.object(migration, "version") - def test_run_db_version(self, m_version): - dbmanage.DBCommand.version() - - self.assertEqual(1, m_version.call_count) - - @mock.patch.object(purge, "PurgeCommand") - def test_run_db_purge(self, m_purge_cls): - m_purge = mock.Mock() - m_purge_cls.return_value = m_purge - m_purge_cls.get_goal_uuid.return_value = 'Some UUID' - cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") - cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") - cfg.CONF.set_default("age_in_days", None, group="command") - cfg.CONF.set_default("max_number", None, group="command") - cfg.CONF.set_default("goal", None, group="command") - cfg.CONF.set_default("exclude_orphans", True, group="command") - cfg.CONF.set_default("dry_run", False, group="command") - - dbmanage.DBCommand.purge() - - m_purge_cls.assert_called_once_with( - None, None, 'Some UUID', True, False) - m_purge.execute.assert_called_once_with() - - @mock.patch.object(sys, "exit") - @mock.patch.object(purge, "PurgeCommand") - def test_run_db_purge_negative_max_number(self, m_purge_cls, m_exit): - m_purge = mock.Mock() - m_purge_cls.return_value = m_purge - m_purge_cls.get_goal_uuid.return_value = 'Some UUID' - cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") - cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") - cfg.CONF.set_default("age_in_days", None, group="command") - cfg.CONF.set_default("max_number", -1, group="command") - cfg.CONF.set_default("goal", None, group="command") - cfg.CONF.set_default("exclude_orphans", True, group="command") - cfg.CONF.set_default("dry_run", False, group="command") - - dbmanage.DBCommand.purge() - - self.assertEqual(0, m_purge_cls.call_count) - self.assertEqual(0, m_purge.execute.call_count) - self.assertEqual(0, m_purge.do_delete.call_count) - self.assertEqual(1, m_exit.call_count) - - @mock.patch.object(sys, "exit") - @mock.patch.object(purge, "PurgeCommand") - def test_run_db_purge_dry_run(self, m_purge_cls, m_exit): - m_purge = mock.Mock() - m_purge_cls.return_value = m_purge - m_purge_cls.get_goal_uuid.return_value = 'Some UUID' - cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") - cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") - cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") - cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") - cfg.CONF.set_default("age_in_days", None, group="command") - cfg.CONF.set_default("max_number", None, group="command") - cfg.CONF.set_default("goal", None, group="command") - cfg.CONF.set_default("exclude_orphans", True, group="command") - cfg.CONF.set_default("dry_run", True, group="command") - - dbmanage.DBCommand.purge() - - m_purge_cls.assert_called_once_with( - None, None, 'Some UUID', True, True) - self.assertEqual(1, m_purge.execute.call_count) - self.assertEqual(0, m_purge.do_delete.call_count) - self.assertEqual(0, m_exit.call_count) diff --git a/watcher/tests/cmd/test_decision_engine.py b/watcher/tests/cmd/test_decision_engine.py deleted file mode 100644 index 3f0380b..0000000 --- a/watcher/tests/cmd/test_decision_engine.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import types - -import mock -from oslo_config import cfg -from oslo_service import service - -from watcher.cmd import decisionengine -from watcher.common import service as watcher_service -from watcher.decision_engine.audit import continuous -from watcher.decision_engine import sync -from watcher.tests import base - - -class TestDecisionEngine(base.BaseTestCase): - - def setUp(self): - super(TestDecisionEngine, self).setUp() - - self.conf = cfg.CONF - self._parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parse(self, args=[]): - return cfg.ConfigOpts._parse_cli_opts(self, []) - - _fake_parse_method = types.MethodType(_fake_parse, self.conf) - self.conf._parse_cli_opts = _fake_parse_method - - p_heartbeat = mock.patch.object( - watcher_service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - p_continuoushandler = mock.patch.object( - continuous.ContinuousAuditHandler, "start") - self.m_continuoushandler = p_continuoushandler.start() - self.addCleanup(p_continuoushandler.stop) - - def tearDown(self): - super(TestDecisionEngine, self).tearDown() - self.conf._parse_cli_opts = self._parse_cli_opts - - @mock.patch.object(sync.Syncer, "sync", mock.Mock()) - @mock.patch.object(service, "launch") - def test_run_de_app(self, m_launch): - decisionengine.main() - self.assertEqual(1, m_launch.call_count) diff --git a/watcher/tests/common/__init__.py b/watcher/tests/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/common/loader/__init__.py b/watcher/tests/common/loader/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/common/loader/test_loader.py b/watcher/tests/common/loader/test_loader.py deleted file mode 100644 index 6eef2bd..0000000 --- a/watcher/tests/common/loader/test_loader.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import mock - -from oslo_config import cfg -from stevedore import driver as drivermanager -from stevedore import extension as stevedore_extension - -from watcher.common import exception -from watcher.common.loader import default -from watcher.common.loader import loadable -from watcher.tests import base - - -class FakeLoadable(loadable.Loadable): - - @classmethod - def get_config_opts(cls): - return [] - - -class FakeLoadableWithOpts(loadable.Loadable): - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt("test_opt", default="fake_with_opts"), - ] - - -class TestLoader(base.TestCase): - - def setUp(self): - super(TestLoader, self).setUp() - - def _fake_parse(self, *args, **kw): - return cfg.ConfigOpts._parse_cli_opts(cfg.CONF, []) - - cfg.CONF._parse_cli_opts = _fake_parse - - def test_load_loadable_no_opt(self): - fake_driver = drivermanager.DriverManager.make_test_instance( - extension=stevedore_extension.Extension( - name="fake", - entry_point="%s:%s" % (FakeLoadable.__module__, - FakeLoadable.__name__), - plugin=FakeLoadable, - obj=None), - namespace="TESTING") - - loader_manager = default.DefaultLoader(namespace='TESTING') - with mock.patch.object(drivermanager, - "DriverManager") as m_driver_manager: - m_driver_manager.return_value = fake_driver - loaded_driver = loader_manager.load(name='fake') - - self.assertIsInstance(loaded_driver, FakeLoadable) - - @mock.patch("watcher.common.loader.default.drivermanager.DriverManager") - def test_load_loadable_bad_plugin(self, m_driver_manager): - m_driver_manager.side_effect = Exception() - - loader_manager = default.DefaultLoader(namespace='TESTING') - self.assertRaises(exception.LoadingError, loader_manager.load, - name='bad_driver') - - def test_load_loadable_with_opts(self): - fake_driver = drivermanager.DriverManager.make_test_instance( - extension=stevedore_extension.Extension( - name="fake", - entry_point="%s:%s" % (FakeLoadableWithOpts.__module__, - FakeLoadableWithOpts.__name__), - plugin=FakeLoadableWithOpts, - obj=None), - namespace="TESTING") - - loader_manager = default.DefaultLoader(namespace='TESTING') - with mock.patch.object(drivermanager, - "DriverManager") as m_driver_manager: - m_driver_manager.return_value = fake_driver - loaded_driver = loader_manager.load(name='fake') - - self.assertIsInstance(loaded_driver, FakeLoadableWithOpts) - - self.assertEqual( - "fake_with_opts", loaded_driver.config.get("test_opt")) - - self.assertEqual( - "fake_with_opts", loaded_driver.config.test_opt) diff --git a/watcher/tests/common/test_cinder_helper.py b/watcher/tests/common/test_cinder_helper.py deleted file mode 100644 index 9c5991c..0000000 --- a/watcher/tests/common/test_cinder_helper.py +++ /dev/null @@ -1,126 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock - -from watcher.common import cinder_helper -from watcher.common import clients -from watcher.common import exception -from watcher.tests import base - - -@mock.patch.object(clients.OpenStackClients, 'cinder') -class TestCinderHelper(base.TestCase): - - def setUp(self): - super(TestCinderHelper, self).setUp() - - @staticmethod - def fake_storage_node(**kwargs): - node = mock.MagicMock() - node.binary = kwargs.get('binary', 'cinder-volume') - node.host = kwargs.get('name', 'host@backend') - - return node - - def test_get_storage_node_list(self, mock_cinder): - node1 = self.fake_storage_node() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [node1] - cinder_util.get_storage_node_list() - cinder_util.cinder.services.list.assert_called_once_with( - binary='cinder-volume') - - def test_get_storage_node_by_name_success(self, mock_cinder): - node1 = self.fake_storage_node() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [node1] - node = cinder_util.get_storage_node_by_name('host@backend') - - self.assertEqual(node, node1) - - def test_get_storage_node_by_name_failure(self, mock_cinder): - node1 = self.fake_storage_node() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [node1] - self.assertRaisesRegex( - exception.StorageNodeNotFound, - "The storage node failure could not be found", - cinder_util.get_storage_node_by_name, 'failure') - - @staticmethod - def fake_pool(**kwargs): - pool = mock.MagicMock() - pool.name = kwargs.get('name', 'host@backend#pool') - - return pool - - def test_get_storage_pool_list(self, mock_cinder): - pool = self.fake_pool() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.pools.list.return_value = [pool] - cinder_util.get_storage_pool_list() - cinder_util.cinder.pools.list.assert_called_once_with(detailed=True) - - def test_get_storage_pool_by_name_success(self, mock_cinder): - pool1 = self.fake_pool() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.pools.list.return_value = [pool1] - pool = cinder_util.get_storage_pool_by_name('host@backend#pool') - - self.assertEqual(pool, pool1) - - def test_get_storage_pool_by_name_failure(self, mock_cinder): - pool1 = self.fake_pool() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.services.list.return_value = [pool1] - self.assertRaisesRegex( - exception.PoolNotFound, - "The pool failure could not be found", - cinder_util.get_storage_pool_by_name, 'failure') - - @staticmethod - def fake_volume_type(**kwargs): - volume_type = mock.MagicMock() - volume_type.name = kwargs.get('name', 'fake_type') - extra_specs = {'volume_backend_name': 'backend'} - volume_type.extra_specs = kwargs.get('extra_specs', extra_specs) - return volume_type - - def test_get_volume_type_list(self, mock_cinder): - volume_type1 = self.fake_volume_type() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.volume_types.list.return_value = [volume_type1] - cinder_util.get_volume_type_list() - cinder_util.cinder.volume_types.list.assert_called_once_with() - - def test_get_volume_type_by_backendname_with_backend_exist( - self, mock_cinder): - volume_type1 = self.fake_volume_type() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.volume_types.list.return_value = [volume_type1] - volume_type_name = cinder_util.get_volume_type_by_backendname( - 'backend') - - self.assertEqual(volume_type_name, volume_type1.name) - - def test_get_volume_type_by_backendname_with_no_backend_exist( - self, mock_cinder): - volume_type1 = self.fake_volume_type() - cinder_util = cinder_helper.CinderHelper() - cinder_util.cinder.volume_types.list.return_value = [volume_type1] - volume_type_name = cinder_util.get_volume_type_by_backendname( - 'nobackend') - - self.assertEqual("", volume_type_name) diff --git a/watcher/tests/common/test_clients.py b/watcher/tests/common/test_clients.py deleted file mode 100755 index 32ab071..0000000 --- a/watcher/tests/common/test_clients.py +++ /dev/null @@ -1,432 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometerclient import client as ceclient -import ceilometerclient.v2.client as ceclient_v2 -from cinderclient import client as ciclient -from cinderclient.v1 import client as ciclient_v1 -from glanceclient import client as glclient -from gnocchiclient import client as gnclient -from gnocchiclient.v1 import client as gnclient_v1 -from ironicclient import client as irclient -from ironicclient.v1 import client as irclient_v1 -from keystoneauth1 import loading as ka_loading -import mock -from monascaclient import client as monclient -from monascaclient.v2_0 import client as monclient_v2 -from neutronclient.neutron import client as netclient -from neutronclient.v2_0 import client as netclient_v2 -from novaclient import client as nvclient - -from watcher.common import clients -from watcher import conf -from watcher.tests import base - -CONF = conf.CONF - - -class TestClients(base.TestCase): - - def _register_watcher_clients_auth_opts(self): - _AUTH_CONF_GROUP = 'watcher_clients_auth' - ka_loading.register_auth_conf_options(CONF, _AUTH_CONF_GROUP) - ka_loading.register_session_conf_options(CONF, _AUTH_CONF_GROUP) - CONF.set_override('auth_type', 'password', group=_AUTH_CONF_GROUP) - - # ka_loading.load_auth_from_conf_options(CONF, _AUTH_CONF_GROUP) - # ka_loading.load_session_from_conf_options(CONF, _AUTH_CONF_GROUP) - # CONF.set_override( - # 'auth-url', 'http://server.ip:35357', group=_AUTH_CONF_GROUP) - - # If we don't clean up the _AUTH_CONF_GROUP conf options, then other - # tests that run after this one will fail, complaining about required - # options that _AUTH_CONF_GROUP wants. - def cleanup_conf_from_loading(): - # oslo_config doesn't seem to allow unregistering groups through a - # single method, so we do this instead - CONF.reset() - del CONF._groups[_AUTH_CONF_GROUP] - - self.addCleanup(cleanup_conf_from_loading) - - def reset_register_opts_mock(conf_obj, original_method): - conf_obj.register_opts = original_method - - original_register_opts = CONF.register_opts - self.addCleanup(reset_register_opts_mock, - CONF, - original_register_opts) - - expected = {'username': 'foousername', - 'password': 'foopassword', - 'auth_url': 'http://server.ip:35357', - 'cafile': None, - 'certfile': None, - 'keyfile': None, - 'insecure': False, - 'user_domain_id': 'foouserdomainid', - 'project_domain_id': 'fooprojdomainid'} - - # Because some of the conf options for auth plugins are not registered - # until right before they are loaded, and because the method that does - # the actual loading of the conf option values is an anonymous method - # (see _getter method of load_from_conf_options in - # keystoneauth1.loading.conf.py), we need to manually monkey patch - # the register opts method so that we can override the conf values to - # our custom values. - def mock_register_opts(*args, **kwargs): - ret = original_register_opts(*args, **kwargs) - if 'group' in kwargs and kwargs['group'] == _AUTH_CONF_GROUP: - for key, value in expected.items(): - CONF.set_override(key, value, group=_AUTH_CONF_GROUP) - return ret - - CONF.register_opts = mock_register_opts - - def test_get_keystone_session(self): - self._register_watcher_clients_auth_opts() - - osc = clients.OpenStackClients() - - expected = {'username': 'foousername', - 'password': 'foopassword', - 'auth_url': 'http://server.ip:35357', - 'user_domain_id': 'foouserdomainid', - 'project_domain_id': 'fooprojdomainid'} - - sess = osc.session - self.assertEqual(expected['auth_url'], sess.auth.auth_url) - self.assertEqual(expected['username'], sess.auth._username) - self.assertEqual(expected['password'], sess.auth._password) - self.assertEqual(expected['user_domain_id'], sess.auth._user_domain_id) - self.assertEqual(expected['project_domain_id'], - sess.auth._project_domain_id) - - @mock.patch.object(nvclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._nova = None - osc.nova() - mock_call.assert_called_once_with( - CONF.nova_client.api_version, - endpoint_type=CONF.nova_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova_diff_vers(self, mock_session): - CONF.set_override('api_version', '2.3', group='nova_client') - osc = clients.OpenStackClients() - osc._nova = None - osc.nova() - self.assertEqual('2.3', osc.nova().api_version.get_string()) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', 'publicURL', group='nova_client') - osc = clients.OpenStackClients() - osc._nova = None - osc.nova() - self.assertEqual('publicURL', osc.nova().client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_nova_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._nova = None - nova = osc.nova() - nova_cached = osc.nova() - self.assertEqual(nova, nova_cached) - - @mock.patch.object(glclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._glance = None - osc.glance() - mock_call.assert_called_once_with( - CONF.glance_client.api_version, - interface=CONF.glance_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance_diff_vers(self, mock_session): - CONF.set_override('api_version', '1', group='glance_client') - osc = clients.OpenStackClients() - osc._glance = None - osc.glance() - self.assertEqual(1.0, osc.glance().version) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', - 'internalURL', group='glance_client') - osc = clients.OpenStackClients() - osc._glance = None - osc.glance() - self.assertEqual('internalURL', osc.glance().http_client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_glance_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._glance = None - glance = osc.glance() - glance_cached = osc.glance() - self.assertEqual(glance, glance_cached) - - @mock.patch.object(gnclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._gnocchi = None - osc.gnocchi() - mock_call.assert_called_once_with( - CONF.gnocchi_client.api_version, - interface=CONF.gnocchi_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi_diff_vers(self, mock_session): - # gnocchiclient currently only has one version (v1) - CONF.set_override('api_version', '1', group='gnocchi_client') - osc = clients.OpenStackClients() - osc._gnocchi = None - osc.gnocchi() - self.assertEqual(gnclient_v1.Client, type(osc.gnocchi())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi_diff_endpoint(self, mock_session): - # gnocchiclient currently only has one version (v1) - CONF.set_override('endpoint_type', 'publicURL', group='gnocchi_client') - osc = clients.OpenStackClients() - osc._gnocchi = None - osc.gnocchi() - self.assertEqual('publicURL', osc.gnocchi().api.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_gnocchi_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._gnocchi = None - gnocchi = osc.gnocchi() - gnocchi_cached = osc.gnocchi() - self.assertEqual(gnocchi, gnocchi_cached) - - @mock.patch.object(ciclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._cinder = None - osc.cinder() - mock_call.assert_called_once_with( - CONF.cinder_client.api_version, - endpoint_type=CONF.cinder_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder_diff_vers(self, mock_session): - CONF.set_override('api_version', '1', group='cinder_client') - osc = clients.OpenStackClients() - osc._cinder = None - osc.cinder() - self.assertEqual(ciclient_v1.Client, type(osc.cinder())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', - 'internalURL', group='cinder_client') - osc = clients.OpenStackClients() - osc._cinder = None - osc.cinder() - self.assertEqual('internalURL', osc.cinder().client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_cinder_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._cinder = None - cinder = osc.cinder() - cinder_cached = osc.cinder() - self.assertEqual(cinder, cinder_cached) - - @mock.patch.object(ceclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ceilometer(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._ceilometer = None - osc.ceilometer() - mock_call.assert_called_once_with( - CONF.ceilometer_client.api_version, - None, - endpoint_type=CONF.ceilometer_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') - def test_clients_ceilometer_diff_vers(self, mock_get_redirect_client, - mock_session): - '''ceilometerclient currently only has one version (v2)''' - mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] - CONF.set_override('api_version', '2', - group='ceilometer_client') - osc = clients.OpenStackClients() - osc._ceilometer = None - osc.ceilometer() - self.assertEqual(ceclient_v2.Client, - type(osc.ceilometer())) - - @mock.patch.object(clients.OpenStackClients, 'session') - @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') - def test_clients_ceilometer_diff_endpoint(self, mock_get_redirect_client, - mock_session): - mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] - CONF.set_override('endpoint_type', 'publicURL', - group='ceilometer_client') - osc = clients.OpenStackClients() - osc._ceilometer = None - osc.ceilometer() - self.assertEqual('publicURL', osc.ceilometer().http_client.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') - def test_clients_ceilometer_cached(self, mock_get_redirect_client, - mock_session): - mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] - osc = clients.OpenStackClients() - osc._ceilometer = None - ceilometer = osc.ceilometer() - ceilometer_cached = osc.ceilometer() - self.assertEqual(ceilometer, ceilometer_cached) - - @mock.patch.object(netclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._neutron = None - osc.neutron() - mock_call.assert_called_once_with( - CONF.neutron_client.api_version, - endpoint_type=CONF.neutron_client.endpoint_type, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron_diff_vers(self, mock_session): - '''neutronclient currently only has one version (v2)''' - CONF.set_override('api_version', '2.0', - group='neutron_client') - osc = clients.OpenStackClients() - osc._neutron = None - osc.neutron() - self.assertEqual(netclient_v2.Client, - type(osc.neutron())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron_diff_endpoint(self, mock_session): - '''neutronclient currently only has one version (v2)''' - CONF.set_override('endpoint_type', 'internalURL', - group='neutron_client') - osc = clients.OpenStackClients() - osc._neutron = None - osc.neutron() - self.assertEqual('internalURL', osc.neutron().httpclient.interface) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_neutron_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._neutron = None - neutron = osc.neutron() - neutron_cached = osc.neutron() - self.assertEqual(neutron, neutron_cached) - - @mock.patch.object(monclient, 'Client') - @mock.patch.object(ka_loading, 'load_session_from_conf_options') - def test_clients_monasca(self, mock_session, mock_call): - mock_session.return_value = mock.Mock( - get_endpoint=mock.Mock(return_value='test_endpoint'), - get_token=mock.Mock(return_value='test_token'),) - - self._register_watcher_clients_auth_opts() - - osc = clients.OpenStackClients() - osc._monasca = None - osc.monasca() - mock_call.assert_called_once_with( - CONF.monasca_client.api_version, - 'test_endpoint', - auth_url='http://server.ip:35357', cert_file=None, insecure=False, - key_file=None, keystone_timeout=None, os_cacert=None, - password='foopassword', service_type='monitoring', - token='test_token', username='foousername') - - @mock.patch.object(ka_loading, 'load_session_from_conf_options') - def test_clients_monasca_diff_vers(self, mock_session): - mock_session.return_value = mock.Mock( - get_endpoint=mock.Mock(return_value='test_endpoint'), - get_token=mock.Mock(return_value='test_token'),) - - self._register_watcher_clients_auth_opts() - - CONF.set_override('api_version', '2_0', group='monasca_client') - osc = clients.OpenStackClients() - osc._monasca = None - osc.monasca() - self.assertEqual(monclient_v2.Client, type(osc.monasca())) - - @mock.patch.object(ka_loading, 'load_session_from_conf_options') - def test_clients_monasca_cached(self, mock_session): - mock_session.return_value = mock.Mock( - get_endpoint=mock.Mock(return_value='test_endpoint'), - get_token=mock.Mock(return_value='test_token'),) - - self._register_watcher_clients_auth_opts() - - osc = clients.OpenStackClients() - osc._monasca = None - monasca = osc.monasca() - monasca_cached = osc.monasca() - self.assertEqual(monasca, monasca_cached) - - @mock.patch.object(irclient, 'Client') - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic(self, mock_session, mock_call): - osc = clients.OpenStackClients() - osc._ironic = None - osc.ironic() - mock_call.assert_called_once_with( - CONF.ironic_client.api_version, - CONF.ironic_client.endpoint_type, - max_retries=None, - os_ironic_api_version=None, - retry_interval=None, - session=mock_session) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic_diff_vers(self, mock_session): - CONF.set_override('api_version', '1', group='ironic_client') - osc = clients.OpenStackClients() - osc._ironic = None - osc.ironic() - self.assertEqual(irclient_v1.Client, type(osc.ironic())) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic_diff_endpoint(self, mock_session): - CONF.set_override('endpoint_type', 'internalURL', - group='ironic_client') - osc = clients.OpenStackClients() - osc._ironic = None - osc.ironic() - self.assertEqual('internalURL', osc.ironic().http_client.endpoint) - - @mock.patch.object(clients.OpenStackClients, 'session') - def test_clients_ironic_cached(self, mock_session): - osc = clients.OpenStackClients() - osc._ironic = None - ironic = osc.ironic() - ironic_cached = osc.ironic() - self.assertEqual(ironic, ironic_cached) diff --git a/watcher/tests/common/test_nova_helper.py b/watcher/tests/common/test_nova_helper.py deleted file mode 100644 index 06daf6f..0000000 --- a/watcher/tests/common/test_nova_helper.py +++ /dev/null @@ -1,365 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import time - -import mock - -from watcher.common import clients -from watcher.common import nova_helper -from watcher.common import utils -from watcher.tests import base - - -@mock.patch.object(clients.OpenStackClients, 'nova') -@mock.patch.object(clients.OpenStackClients, 'neutron') -@mock.patch.object(clients.OpenStackClients, 'cinder') -@mock.patch.object(clients.OpenStackClients, 'glance') -class TestNovaHelper(base.TestCase): - - def setUp(self): - super(TestNovaHelper, self).setUp() - self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe" - self.source_node = "ldev-indeedsrv005" - self.destination_node = "ldev-indeedsrv006" - self.flavor_name = "x1" - - @staticmethod - def fake_server(*args, **kwargs): - server = mock.MagicMock() - server.id = args[0] - server.status = 'ACTIVE' - - return server - - @staticmethod - def fake_migration(*args, **kwargs): - migration = mock.MagicMock() - migration.id = args[0] - return migration - - @staticmethod - def fake_nova_find_list(nova_util, find=None, list=None): - nova_util.nova.servers.get.return_value = find - if list is None: - nova_util.nova.servers.list.return_value = [] - else: - nova_util.nova.servers.list.return_value = [list] - - @staticmethod - def fake_nova_migration_list(nova_util, list=None): - if list is None: - nova_util.nova.server_migrations.list.return_value = [] - else: - nova_util.nova.server_migration.list.return_value = [list] - - @staticmethod - def fake_live_migrate(server, *args, **kwargs): - - def side_effect(*args, **kwargs): - setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") - - server.live_migrate.side_effect = side_effect - - @staticmethod - def fake_confirm_resize(server, *args, **kwargs): - - def side_effect(*args, **kwargs): - setattr(server, 'status', 'ACTIVE') - - server.confirm_resize.side_effect = side_effect - - @staticmethod - def fake_cold_migrate(server, *args, **kwargs): - - def side_effect(*args, **kwargs): - setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") - setattr(server, 'status', 'VERIFY_RESIZE') - - server.migrate.side_effect = side_effect - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron, - mock_nova): - nova_util = nova_helper.NovaHelper() - instance_id = utils.generate_uuid() - server = self.fake_server(instance_id) - setattr(server, 'OS-EXT-STS:vm_state', 'stopped') - self.fake_nova_find_list(nova_util, find=server, list=server) - - result = nova_util.stop_instance(instance_id) - self.assertTrue(result) - - setattr(server, 'OS-EXT-STS:vm_state', 'active') - result = nova_util.stop_instance(instance_id) - self.assertFalse(result) - - self.fake_nova_find_list(nova_util, find=server, list=None) - - result = nova_util.stop_instance(instance_id) - self.assertFalse(result) - - def test_set_host_offline(self, mock_glance, mock_cinder, mock_neutron, - mock_nova): - host = mock.MagicMock() - nova_util = nova_helper.NovaHelper() - nova_util.nova.hosts = mock.MagicMock() - nova_util.nova.hosts.get.return_value = host - result = nova_util.set_host_offline("rennes") - self.assertTrue(result) - - nova_util.nova.hosts.get.return_value = None - result = nova_util.set_host_offline("rennes") - self.assertFalse(result) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_resize_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'status', 'VERIFY_RESIZE') - self.fake_nova_find_list(nova_util, find=server, list=server) - is_success = nova_util.resize_instance(self.instance_uuid, - self.flavor_name) - self.assertTrue(is_success) - - setattr(server, 'status', 'SOMETHING_ELSE') - is_success = nova_util.resize_instance(self.instance_uuid, - self.flavor_name) - self.assertFalse(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_live_migrate_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.destination_node) - self.fake_nova_find_list(nova_util, find=server, list=server) - is_success = nova_util.live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertTrue(is_success) - - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.source_node) - self.fake_nova_find_list(nova_util, find=server, list=None) - is_success = nova_util.live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertFalse(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_live_migrate_instance_no_destination_node( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - self.destination_node = None - self.fake_nova_find_list(nova_util, find=server, list=server) - self.fake_live_migrate(server) - is_success = nova_util.live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertTrue(is_success) - - def test_watcher_non_live_migrate_instance_not_found( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - self.fake_nova_find_list(nova_util, find=None, list=None) - - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, - self.destination_node) - - self.assertFalse(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_watcher_non_live_migrate_instance_volume( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_servers = nova_util.nova.servers - instance = self.fake_server(self.instance_uuid) - setattr(instance, 'OS-EXT-SRV-ATTR:host', - self.source_node) - setattr(instance, 'OS-EXT-STS:vm_state', "stopped") - attached_volumes = [{'id': str(utils.generate_uuid())}] - setattr(instance, "os-extended-volumes:volumes_attached", - attached_volumes) - self.fake_nova_find_list(nova_util, find=instance, list=instance) - nova_servers.create_image.return_value = utils.generate_uuid() - nova_util.glance.images.get.return_value = mock.MagicMock( - status='active') - nova_util.cinder.volumes.get.return_value = mock.MagicMock( - status='available') - - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, - self.destination_node) - self.assertTrue(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_watcher_non_live_migrate_keep_image( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_servers = nova_util.nova.servers - instance = self.fake_server(self.instance_uuid) - setattr(instance, 'OS-EXT-SRV-ATTR:host', - self.source_node) - setattr(instance, 'OS-EXT-STS:vm_state', "stopped") - addresses = mock.MagicMock() - network_type = mock.MagicMock() - networks = [] - networks.append(("lan", network_type)) - addresses.items.return_value = networks - attached_volumes = mock.MagicMock() - setattr(instance, 'addresses', addresses) - setattr(instance, "os-extended-volumes:volumes_attached", - attached_volumes) - self.fake_nova_find_list(nova_util, find=instance, list=instance) - nova_servers.create_image.return_value = utils.generate_uuid() - nova_util.glance.images.get.return_value = mock.MagicMock( - status='active') - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, - self.destination_node, keep_original_image_name=False) - self.assertTrue(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_abort_live_migrate_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.source_node) - setattr(server, 'OS-EXT-STS:task_state', None) - migration = self.fake_migration(2) - self.fake_nova_migration_list(nova_util, list=migration) - - self.fake_nova_find_list(nova_util, find=server, list=server) - - self.assertTrue(nova_util.abort_live_migrate( - self.instance_uuid, self.source_node, self.destination_node)) - - setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) - - self.assertFalse(nova_util.abort_live_migrate( - self.instance_uuid, self.source_node, self.destination_node)) - - setattr(server, 'status', 'ERROR') - self.assertRaises(Exception, nova_util.abort_live_migrate, - (self.instance_uuid, self.source_node, - self.destination_node)) - - def test_non_live_migrate_instance_no_destination_node( - self, mock_glance, mock_cinder, mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - server = self.fake_server(self.instance_uuid) - setattr(server, 'OS-EXT-SRV-ATTR:host', - self.source_node) - self.destination_node = None - self.fake_nova_find_list(nova_util, find=server, list=server) - self.fake_cold_migrate(server) - self.fake_confirm_resize(server) - is_success = nova_util.watcher_non_live_migrate_instance( - self.instance_uuid, self.destination_node - ) - self.assertTrue(is_success) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_create_image_from_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - instance = self.fake_server(self.instance_uuid) - image = mock.MagicMock() - setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node) - setattr(instance, 'OS-EXT-STS:vm_state', "stopped") - self.fake_nova_find_list(nova_util, find=instance, list=instance) - image_uuid = 'fake-image-uuid' - nova_util.nova.servers.create_image.return_value = image - - glance_client = mock.MagicMock() - mock_glance.return_value = glance_client - - glance_client.images = {image_uuid: image} - instance = nova_util.create_image_from_instance( - self.instance_uuid, "Cirros" - ) - self.assertIsNotNone(instance) - - nova_util.glance.images.get.return_value = None - instance = nova_util.create_image_from_instance( - self.instance_uuid, "Cirros" - ) - self.assertIsNone(instance) - - def test_enable_service_nova_compute(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_services = nova_util.nova.services - nova_services.enable.return_value = mock.MagicMock( - status='enabled') - - result = nova_util.enable_service_nova_compute('nanjing') - self.assertTrue(result) - - nova_services.enable.return_value = mock.MagicMock( - status='disabled') - - result = nova_util.enable_service_nova_compute('nanjing') - self.assertFalse(result) - - def test_disable_service_nova_compute(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - nova_services = nova_util.nova.services - nova_services.disable.return_value = mock.MagicMock( - status='enabled') - - result = nova_util.disable_service_nova_compute('nanjing') - self.assertFalse(result) - - nova_services.disable.return_value = mock.MagicMock( - status='disabled') - - result = nova_util.disable_service_nova_compute('nanjing') - self.assertTrue(result) - - @mock.patch.object(time, 'sleep', mock.Mock()) - def test_create_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - instance = self.fake_server(self.instance_uuid) - nova_util.nova.services.create.return_value = instance - nova_util.nova.services.get.return_value = instance - - instance = nova_util.create_instance(self.source_node) - self.assertIsNotNone(instance) - - def test_get_flavor_instance(self, mock_glance, mock_cinder, - mock_neutron, mock_nova): - nova_util = nova_helper.NovaHelper() - instance = self.fake_server(self.instance_uuid) - flavor = {'id': 1, 'name': 'm1.tiny', 'ram': 512, 'vcpus': 1, - 'disk': 0, 'ephemeral': 0} - instance.flavor = flavor - nova_util.nova.flavors.get.return_value = flavor - cache = flavor - - nova_util.get_flavor_instance(instance, cache) - self.assertEqual(instance.flavor['name'], cache['name']) diff --git a/watcher/tests/common/test_service.py b/watcher/tests/common/test_service.py deleted file mode 100644 index 71f4f37..0000000 --- a/watcher/tests/common/test_service.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import mock - -from oslo_config import cfg - -import oslo_messaging as om -from watcher.common import rpc -from watcher.common import service -from watcher import objects -from watcher.tests import base - -CONF = cfg.CONF - - -class DummyManager(object): - - API_VERSION = '1.0' - - conductor_endpoints = [mock.Mock()] - notification_endpoints = [mock.Mock()] - - def __init__(self): - self.publisher_id = "pub_id" - self.conductor_topic = "conductor_topic" - self.notification_topics = [] - self.api_version = self.API_VERSION - self.service_name = None - - -class TestServiceHeartbeat(base.TestCase): - - def setUp(self): - super(TestServiceHeartbeat, self).setUp() - - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(objects.Service, 'create') - def test_send_beat_with_creating_service(self, mock_create, - mock_list): - CONF.set_default('host', 'fake-fqdn') - - mock_list.return_value = [] - service.ServiceHeartbeat(service_name='watcher-service') - mock_list.assert_called_once_with(mock.ANY, - filters={'name': 'watcher-service', - 'host': 'fake-fqdn'}) - self.assertEqual(1, mock_create.call_count) - - @mock.patch.object(objects.Service, 'list') - @mock.patch.object(objects.Service, 'save') - def test_send_beat_without_creating_service(self, mock_save, mock_list): - - mock_list.return_value = [objects.Service(mock.Mock(), - name='watcher-service', - host='controller')] - service.ServiceHeartbeat(service_name='watcher-service') - self.assertEqual(1, mock_save.call_count) - - -class TestService(base.TestCase): - - def setUp(self): - super(TestService, self).setUp() - - @mock.patch.object(om.rpc.server, "RPCServer") - def test_start(self, m_handler): - dummy_service = service.Service(DummyManager) - dummy_service.start() - self.assertEqual(1, m_handler.call_count) - - @mock.patch.object(om.rpc.server, "RPCServer") - def test_stop(self, m_handler): - dummy_service = service.Service(DummyManager) - dummy_service.stop() - self.assertEqual(1, m_handler.call_count) - - def test_build_topic_handler(self): - topic_name = "mytopic" - dummy_service = service.Service(DummyManager) - handler = dummy_service.build_topic_handler(topic_name) - self.assertIsNotNone(handler) - self.assertIsInstance(handler, om.rpc.server.RPCServer) - self.assertEqual("mytopic", handler._target.topic) - - def test_init_service(self): - dummy_service = service.Service(DummyManager) - self.assertIsInstance(dummy_service.serializer, - rpc.RequestContextSerializer) - self.assertIsInstance( - dummy_service.conductor_topic_handler, - om.rpc.server.RPCServer) diff --git a/watcher/tests/conf/__init__.py b/watcher/tests/conf/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/conf/test_list_opts.py b/watcher/tests/conf/test_list_opts.py deleted file mode 100755 index ef7f4f1..0000000 --- a/watcher/tests/conf/test_list_opts.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# Copyright (c) 2016 Intel Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import extension - -from watcher.conf import opts -from watcher.conf import plugins -from watcher.tests import base -from watcher.tests.decision_engine import fake_strategies - - -class TestListOpts(base.TestCase): - def setUp(self): - super(TestListOpts, self).setUp() - self.base_sections = [ - 'DEFAULT', 'api', 'database', 'watcher_decision_engine', - 'watcher_applier', 'watcher_planner', 'nova_client', - 'glance_client', 'gnocchi_client', 'cinder_client', - 'ceilometer_client', 'monasca_client', 'ironic_client', - 'neutron_client', 'watcher_clients_auth'] - self.opt_sections = list(dict(opts.list_opts()).keys()) - - def test_run_list_opts(self): - expected_sections = self.opt_sections - - result = opts.list_opts() - - self.assertIsNotNone(result) - for section_name, options in result: - self.assertIn(section_name, expected_sections) - self.assertTrue(len(options)) - - def test_list_opts_no_opts(self): - expected_sections = self.base_sections - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=fake_strategies.FakeDummy1Strategy2.get_name(), - entry_point="%s:%s" % ( - fake_strategies.FakeDummy1Strategy2.__module__, - fake_strategies.FakeDummy1Strategy2.__name__), - plugin=fake_strategies.FakeDummy1Strategy2, - obj=None, - )], - namespace="watcher_strategies", - ) - - def m_list_available(namespace): - if namespace == "watcher_strategies": - return fake_extmanager_call - else: - return extension.ExtensionManager.make_test_instance( - extensions=[], namespace=namespace) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.side_effect = m_list_available - result = opts.list_opts() - - self.assertIsNotNone(result) - for section_name, options in result: - self.assertIn(section_name, expected_sections) - self.assertTrue(len(options)) - - def test_list_opts_with_opts(self): - expected_sections = self.base_sections + [ - 'watcher_strategies.strategy_1'] - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=fake_strategies.FakeDummy1Strategy1.get_name(), - entry_point="%s:%s" % ( - fake_strategies.FakeDummy1Strategy1.__module__, - fake_strategies.FakeDummy1Strategy1.__name__), - plugin=fake_strategies.FakeDummy1Strategy1, - obj=None, - )], - namespace="watcher_strategies", - ) - - def m_list_available(namespace): - if namespace == "watcher_strategies": - return fake_extmanager_call - else: - return extension.ExtensionManager.make_test_instance( - extensions=[], namespace=namespace) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.side_effect = m_list_available - result = opts.list_opts() - - self.assertIsNotNone(result) - for section_name, options in result: - self.assertIn(section_name, expected_sections) - self.assertTrue(len(options)) - - result_map = dict(result) - strategy_opts = result_map['watcher_strategies.strategy_1'] - self.assertEqual(['test_opt'], [opt.name for opt in strategy_opts]) - - -class TestPlugins(base.TestCase): - - def test_show_plugins(self): - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=fake_strategies.FakeDummy1Strategy1.get_name(), - entry_point="%s:%s" % ( - fake_strategies.FakeDummy1Strategy1.__module__, - fake_strategies.FakeDummy1Strategy1.__name__), - plugin=fake_strategies.FakeDummy1Strategy1, - obj=None, - )], - namespace="watcher_strategies", - ) - - def m_list_available(namespace): - if namespace == "watcher_strategies": - return fake_extmanager_call - else: - return extension.ExtensionManager.make_test_instance( - extensions=[], namespace=namespace) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - with mock.patch.object( - plugins, "_show_plugins_ascii_table" - ) as m_show: - m_ext_manager.side_effect = m_list_available - plugins.show_plugins() - m_show.assert_called_once_with( - [('watcher_strategies.strategy_1', 'strategy_1', - 'watcher.tests.decision_engine.' - 'fake_strategies.FakeDummy1Strategy1')]) diff --git a/watcher/tests/conf_fixture.py b/watcher/tests/conf_fixture.py deleted file mode 100644 index 3e197b9..0000000 --- a/watcher/tests/conf_fixture.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -from oslo_config import cfg - -from watcher.common import config - -CONF = cfg.CONF -CONF.import_opt('host', 'watcher.conf.service') -CONF.import_opt('connection', 'oslo_db.options', group='database') -CONF.import_opt('sqlite_synchronous', 'oslo_db.options', group='database') - - -class ConfFixture(fixtures.Fixture): - """Fixture to manage conf settings.""" - - def __init__(self, conf=cfg.CONF): - self.conf = conf - - def setUp(self): - super(ConfFixture, self).setUp() - - self.conf.set_default('connection', "sqlite://", group='database') - self.conf.set_default('sqlite_synchronous', False, group='database') - config.parse_args([], default_config_files=[]) - self.addCleanup(self.conf.reset) - - -class ConfReloadFixture(ConfFixture): - """Fixture to manage reloads of conf settings.""" - - def __init__(self, conf=cfg.CONF): - self.conf = conf - self._original_parse_cli_opts = self.conf._parse_cli_opts - - def _fake_parser(self, *args, **kw): - return cfg.ConfigOpts._parse_cli_opts(self.conf, []) - - def _restore_parser(self): - self.conf._parse_cli_opts = self._original_parse_cli_opts - - def setUp(self): - super(ConfReloadFixture, self).setUp() - self.conf._parse_cli_opts = self._fake_parser - self.addCleanup(self._restore_parser) diff --git a/watcher/tests/config.py b/watcher/tests/config.py deleted file mode 100644 index 7b8745b..0000000 --- a/watcher/tests/config.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.api import hooks - -# Server Specific Configurations -server = { - 'port': '9322', - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -app = { - 'root': 'watcher.api.controllers.root.RootController', - 'modules': ['watcher.api'], - 'hooks': [ - hooks.ContextHook(), - ], - 'acl_public_routes': [ - '/' - ], -} - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/watcher/tests/datasource/__init__.py b/watcher/tests/datasource/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/datasource/test_ceilometer_helper.py b/watcher/tests/datasource/test_ceilometer_helper.py deleted file mode 100644 index fd05181..0000000 --- a/watcher/tests/datasource/test_ceilometer_helper.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import unicode_literals - -import mock - -from watcher.common import clients -from watcher.datasource import ceilometer as ceilometer_helper -from watcher.tests import base - - -@mock.patch.object(clients.OpenStackClients, 'ceilometer') -class TestCeilometerHelper(base.BaseTestCase): - - def test_build_query(self, mock_ceilometer): - mock_ceilometer.return_value = mock.MagicMock() - cm = ceilometer_helper.CeilometerHelper() - expected = [{'field': 'user_id', 'op': 'eq', 'value': u'user_id'}, - {'field': 'project_id', 'op': 'eq', 'value': u'tenant_id'}, - {'field': 'resource_id', 'op': 'eq', - 'value': u'resource_id'}] - - query = cm.build_query(user_id="user_id", - tenant_id="tenant_id", - resource_id="resource_id", - user_ids=["user_ids"], - tenant_ids=["tenant_ids"], - resource_ids=["resource_ids"]) - self.assertEqual(expected, query) - - def test_statistic_aggregation(self, mock_ceilometer): - cm = ceilometer_helper.CeilometerHelper() - ceilometer = mock.MagicMock() - statistic = mock.MagicMock() - expected_result = 100 - statistic[-1]._info = {'aggregate': {'avg': expected_result}} - ceilometer.statistics.list.return_value = statistic - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.statistic_aggregation( - resource_id="INSTANCE_ID", - meter_name="cpu_util", - period="7300" - ) - self.assertEqual(expected_result, val) - - def test_get_last_sample(self, mock_ceilometer): - ceilometer = mock.MagicMock() - statistic = mock.MagicMock() - expected_result = 100 - statistic[-1]._info = {'counter_volume': expected_result} - ceilometer.samples.list.return_value = statistic - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.get_last_sample_value( - resource_id="id", - meter_name="compute.node.percent" - ) - self.assertEqual(expected_result, val) - - def test_get_last_sample_none(self, mock_ceilometer): - ceilometer = mock.MagicMock() - expected = [] - ceilometer.samples.list.return_value = expected - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.get_last_sample_values( - resource_id="id", - meter_name="compute.node.percent" - ) - self.assertEqual(expected, val) - - def test_statistic_list(self, mock_ceilometer): - ceilometer = mock.MagicMock() - expected_value = [] - ceilometer.statistics.list.return_value = expected_value - mock_ceilometer.return_value = ceilometer - cm = ceilometer_helper.CeilometerHelper() - val = cm.statistic_list(meter_name="cpu_util") - self.assertEqual(expected_value, val) diff --git a/watcher/tests/datasource/test_gnocchi_helper.py b/watcher/tests/datasource/test_gnocchi_helper.py deleted file mode 100644 index 8b481a3..0000000 --- a/watcher/tests/datasource/test_gnocchi_helper.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from oslo_config import cfg -from oslo_utils import timeutils - -from watcher.common import clients -from watcher.common import exception -from watcher.datasource import gnocchi as gnocchi_helper -from watcher.tests import base - -CONF = cfg.CONF - - -@mock.patch.object(clients.OpenStackClients, 'gnocchi') -class TestGnocchiHelper(base.BaseTestCase): - - def test_gnocchi_statistic_aggregation(self, mock_gnocchi): - gnocchi = mock.MagicMock() - expected_result = 5.5 - - expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]] - - gnocchi.metric.get_measures.return_value = expected_measures - mock_gnocchi.return_value = gnocchi - - helper = gnocchi_helper.GnocchiHelper() - result = helper.statistic_aggregation( - resource_id='16a86790-327a-45f9-bc82-45839f062fdc', - metric='cpu_util', - granularity=360, - start_time=timeutils.parse_isotime("2017-02-02T09:00:00.000000"), - stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"), - aggregation='mean' - ) - self.assertEqual(expected_result, result) - - def test_gnocchi_wrong_datetime(self, mock_gnocchi): - gnocchi = mock.MagicMock() - - expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]] - - gnocchi.metric.get_measures.return_value = expected_measures - mock_gnocchi.return_value = gnocchi - - helper = gnocchi_helper.GnocchiHelper() - self.assertRaises( - exception.InvalidParameter, helper.statistic_aggregation, - resource_id='16a86790-327a-45f9-bc82-45839f062fdc', - metric='cpu_util', - granularity=360, - start_time="2017-02-02T09:00:00.000000", - stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"), - aggregation='mean') diff --git a/watcher/tests/datasource/test_monasca_helper.py b/watcher/tests/datasource/test_monasca_helper.py deleted file mode 100644 index 5c49af6..0000000 --- a/watcher/tests/datasource/test_monasca_helper.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from monascaclient import exc -from oslo_config import cfg -from oslo_utils import timeutils - -from watcher.common import clients -from watcher.datasource import monasca as monasca_helper -from watcher.tests import base - -CONF = cfg.CONF - - -@mock.patch.object(clients.OpenStackClients, 'monasca') -class TestMonascaHelper(base.BaseTestCase): - - def test_monasca_statistic_aggregation(self, mock_monasca): - monasca = mock.MagicMock() - expected_result = [{ - 'columns': ['timestamp', 'avg'], - 'dimensions': { - 'hostname': 'rdev-indeedsrv001', - 'service': 'monasca'}, - 'id': '0', - 'name': 'cpu.percent', - 'statistics': [ - ['2016-07-29T12:45:00Z', 0.0], - ['2016-07-29T12:50:00Z', 0.9100000000000001], - ['2016-07-29T12:55:00Z', 0.9111111111111112]]}] - - monasca.metrics.list_statistics.return_value = expected_result - mock_monasca.return_value = monasca - - helper = monasca_helper.MonascaHelper() - result = helper.statistic_aggregation( - meter_name='cpu.percent', - dimensions={'hostname': 'NODE_UUID'}, - start_time=timeutils.parse_isotime("2016-06-06T10:33:22.063176"), - end_time=None, - period=7200, - aggregate='avg', - group_by='*', - ) - self.assertEqual(expected_result, result) - - def test_monasca_statistic_list(self, mock_monasca): - monasca = mock.MagicMock() - expected_result = [{ - 'columns': ['timestamp', 'value', 'value_meta'], - 'dimensions': { - 'hostname': 'rdev-indeedsrv001', - 'service': 'monasca'}, - 'id': '0', - 'measurements': [ - ['2016-07-29T12:54:06.000Z', 0.9, {}], - ['2016-07-29T12:54:36.000Z', 0.9, {}], - ['2016-07-29T12:55:06.000Z', 0.9, {}], - ['2016-07-29T12:55:36.000Z', 0.8, {}]], - 'name': 'cpu.percent'}] - - monasca.metrics.list_measurements.return_value = expected_result - mock_monasca.return_value = monasca - helper = monasca_helper.MonascaHelper() - val = helper.statistics_list(meter_name="cpu.percent", dimensions={}) - self.assertEqual(expected_result, val) - - def test_monasca_statistic_list_query_retry(self, mock_monasca): - monasca = mock.MagicMock() - expected_result = [{ - 'columns': ['timestamp', 'value', 'value_meta'], - 'dimensions': { - 'hostname': 'rdev-indeedsrv001', - 'service': 'monasca'}, - 'id': '0', - 'measurements': [ - ['2016-07-29T12:54:06.000Z', 0.9, {}], - ['2016-07-29T12:54:36.000Z', 0.9, {}], - ['2016-07-29T12:55:06.000Z', 0.9, {}], - ['2016-07-29T12:55:36.000Z', 0.8, {}]], - 'name': 'cpu.percent'}] - - monasca.metrics.list_measurements.side_effect = [ - exc.HTTPUnauthorized, expected_result] - mock_monasca.return_value = monasca - helper = monasca_helper.MonascaHelper() - val = helper.statistics_list(meter_name="cpu.percent", dimensions={}) - self.assertEqual(expected_result, val) diff --git a/watcher/tests/db/__init__.py b/watcher/tests/db/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/db/base.py b/watcher/tests/db/base.py deleted file mode 100644 index 57d7a61..0000000 --- a/watcher/tests/db/base.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Watcher DB test base class.""" - -import fixtures -from oslo_config import cfg - -from watcher.db import api as dbapi -from watcher.db.sqlalchemy import api as sqla_api -from watcher.db.sqlalchemy import migration -from watcher.db.sqlalchemy import models -from watcher.tests import base -from watcher.tests.db import utils - - -CONF = cfg.CONF - -CONF.import_opt('enable_authentication', 'watcher.api.acl') - -_DB_CACHE = None - - -class Database(fixtures.Fixture): - - def __init__(self, db_api, db_migrate, sql_connection): - self.sql_connection = sql_connection - - self.engine = db_api.get_engine() - self.engine.dispose() - conn = self.engine.connect() - self.setup_sqlite(db_migrate) - self.post_migrations() - - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - - def setup_sqlite(self, db_migrate): - if db_migrate.version(): - return - models.Base.metadata.create_all(self.engine) - db_migrate.stamp('head') - - def setUp(self): - super(Database, self).setUp() - - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - - def post_migrations(self): - """Any addition steps that are needed outside of the migrations.""" - - -class DbTestCase(base.TestCase): - - def get_next_id(self): - return next(self._id_gen) - - def setUp(self): - cfg.CONF.set_override("enable_authentication", False) - # To use in-memory SQLite DB - cfg.CONF.set_override("connection", "sqlite://", group="database") - - super(DbTestCase, self).setUp() - - self.dbapi = dbapi.get_instance() - - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(sqla_api, migration, - sql_connection=CONF.database.connection) - self.useFixture(_DB_CACHE) - self._id_gen = utils.id_generator() diff --git a/watcher/tests/db/test_action.py b/watcher/tests/db/test_action.py deleted file mode 100644 index d98d854..0000000 --- a/watcher/tests/db/test_action.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Action via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbActionFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbActionFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - self.audit = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - self.action_plan = utils.create_test_action_plan( - audit_id=self.audit.id, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.action1 = utils.create_test_action( - action_plan_id=self.action_plan.id, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.action2 = utils.create_test_action( - action_plan_id=self.action_plan.id, id=2, uuid=None) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.action3 = utils.create_test_action( - action_plan_id=self.action_plan.id, id=3, uuid=None) - - def _soft_delete_actions(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action(self.action1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_action(self.action2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_action(self.action3.uuid) - - def _update_actions(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_action( - self.action1.uuid, - values={"state": objects.action_plan.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_action( - self.action2.uuid, - values={"state": objects.action_plan.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_action( - self.action3.uuid, - values={"state": objects.action_plan.State.SUCCEEDED}) - - def test_get_action_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action(self.action1.uuid) - - res = self.dbapi.get_action_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action(self.action1.uuid) - - res = self.dbapi.get_action_list( - self.context, filters={'deleted': False}) - - self.assertEqual([self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_deleted_at_eq(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_deleted_at_lt(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_deleted_at_lte(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_deleted_at_gt(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_deleted_at_gte(self): - self._soft_delete_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action1['id'], self.action2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_action_filter_created_at_eq(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_created_at_lt(self): - with freezegun.freeze_time(self.FAKE_TODAY): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_created_at_lte(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_created_at_gt(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_created_at_gte(self): - res = self.dbapi.get_action_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action1['id'], self.action2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_action_filter_updated_at_eq(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_updated_at_lt(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_updated_at_lte(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action2['id'], self.action3['id']], - [r.id for r in res]) - - def test_get_action_filter_updated_at_gt(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action1['id']], [r.id for r in res]) - - def test_get_action_filter_updated_at_gte(self): - self._update_actions() - - res = self.dbapi.get_action_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action1['id'], self.action2['id']], - [r.id for r in res]) - - -class DbActionTestCase(base.DbTestCase): - - def _create_test_action(self, **kwargs): - action = utils.get_test_action(**kwargs) - self.dbapi.create_action(action) - return action - - def _create_test_action_plan(self, **kwargs): - action_plan = utils.get_test_action_plan(**kwargs) - self.dbapi.create_action_plan(action_plan) - return action_plan - - def test_get_action_list(self): - uuids = [] - for _ in range(1, 4): - action = utils.create_test_action(uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(action['uuid'])) - actions = self.dbapi.get_action_list(self.context) - action_uuids = [a.uuid for a in actions] - self.assertEqual(3, len(action_uuids)) - self.assertEqual(sorted(uuids), sorted(action_uuids)) - for action in actions: - self.assertIsNone(action.action_plan) - - def test_get_action_list_eager(self): - _action_plan = utils.get_test_action_plan() - action_plan = self.dbapi.create_action_plan(_action_plan) - - uuids = [] - for i in range(1, 4): - action = utils.create_test_action( - id=i, uuid=w_utils.generate_uuid(), - action_plan_id=action_plan.id) - uuids.append(six.text_type(action['uuid'])) - actions = self.dbapi.get_action_list(self.context, eager=True) - action_map = {a.uuid: a for a in actions} - self.assertEqual(sorted(uuids), sorted(action_map.keys())) - eager_action = action_map[action.uuid] - self.assertEqual( - action_plan.as_dict(), eager_action.action_plan.as_dict()) - - def test_get_action_list_with_filters(self): - audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) - action_plan = self._create_test_action_plan( - id=1, - uuid=w_utils.generate_uuid(), - audit_id=audit.id, - parents=None, - state=objects.action_plan.State.RECOMMENDED) - action1 = self._create_test_action( - id=1, - action_plan_id=1, - description='description action 1', - uuid=w_utils.generate_uuid(), - parents=None, - state=objects.action_plan.State.PENDING) - action2 = self._create_test_action( - id=2, - action_plan_id=2, - description='description action 2', - uuid=w_utils.generate_uuid(), - parents=[action1['uuid']], - state=objects.action_plan.State.PENDING) - action3 = self._create_test_action( - id=3, - action_plan_id=1, - description='description action 3', - uuid=w_utils.generate_uuid(), - parents=[action2['uuid']], - state=objects.action_plan.State.ONGOING) - res = self.dbapi.get_action_list( - self.context, - filters={'state': objects.action_plan.State.ONGOING}) - self.assertEqual([action3['id']], [r.id for r in res]) - - res = self.dbapi.get_action_list(self.context, - filters={'state': 'bad-state'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_action_list( - self.context, - filters={'action_plan_id': 2}) - self.assertEqual([action2['id']], [r.id for r in res]) - - res = self.dbapi.get_action_list( - self.context, - filters={'action_plan_uuid': action_plan['uuid']}) - self.assertEqual( - sorted([action1['id'], action3['id']]), - sorted([r.id for r in res])) - - res = self.dbapi.get_action_list( - self.context, - filters={'audit_uuid': audit.uuid}) - for action in res: - self.assertEqual(action_plan['id'], action.action_plan_id) - - def test_get_action_list_with_filter_by_uuid(self): - action = self._create_test_action() - res = self.dbapi.get_action_list( - self.context, filters={'uuid': action["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(action['uuid'], res[0].uuid) - - def test_get_action_by_id(self): - action = self._create_test_action() - action = self.dbapi.get_action_by_id(self.context, action['id']) - self.assertEqual(action['uuid'], action.uuid) - - def test_get_action_by_uuid(self): - action = self._create_test_action() - action = self.dbapi.get_action_by_uuid(self.context, action['uuid']) - self.assertEqual(action['id'], action.id) - - def test_get_action_that_does_not_exist(self): - self.assertRaises(exception.ActionNotFound, - self.dbapi.get_action_by_id, self.context, 1234) - - def test_update_action(self): - action = self._create_test_action() - res = self.dbapi.update_action( - action['id'], {'state': objects.action_plan.State.CANCELLED}) - self.assertEqual(objects.action_plan.State.CANCELLED, res.state) - - def test_update_action_that_does_not_exist(self): - self.assertRaises(exception.ActionNotFound, - self.dbapi.update_action, 1234, {'state': ''}) - - def test_update_action_uuid(self): - action = self._create_test_action() - self.assertRaises(exception.Invalid, - self.dbapi.update_action, action['id'], - {'uuid': 'hello'}) - - def test_destroy_action(self): - action = self._create_test_action() - self.dbapi.destroy_action(action['id']) - self.assertRaises(exception.ActionNotFound, - self.dbapi.get_action_by_id, - self.context, action['id']) - - def test_destroy_action_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_action(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_action_by_uuid(self.context, - uuid)) - self.dbapi.destroy_action(uuid) - self.assertRaises(exception.ActionNotFound, - self.dbapi.get_action_by_uuid, self.context, uuid) - - def test_destroy_action_that_does_not_exist(self): - self.assertRaises(exception.ActionNotFound, - self.dbapi.destroy_action, 1234) - - def test_create_action_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_action(id=1, uuid=uuid) - self.assertRaises(exception.ActionAlreadyExists, - self._create_test_action, - id=2, uuid=uuid) diff --git a/watcher/tests/db/test_action_plan.py b/watcher/tests/db/test_action_plan.py deleted file mode 100644 index 94e5757..0000000 --- a/watcher/tests/db/test_action_plan.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating ActionPlan via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.objects import action_plan as ap_objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbActionPlanFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbActionPlanFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - self.audit = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.action_plan1 = utils.create_test_action_plan( - audit_id=self.audit.id, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.action_plan2 = utils.create_test_action_plan( - audit_id=self.audit.id, id=2, uuid=None) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.action_plan3 = utils.create_test_action_plan( - audit_id=self.audit.id, id=3, uuid=None) - - def _soft_delete_action_plans(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_action_plan(self.action_plan2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_action_plan(self.action_plan3.uuid) - - def _update_action_plans(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_action_plan( - self.action_plan1.uuid, - values={"state": ap_objects.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_action_plan( - self.action_plan2.uuid, - values={"state": ap_objects.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_action_plan( - self.action_plan3.uuid, - values={"state": ap_objects.State.SUCCEEDED}) - - def test_get_action_plan_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted': False}) - - self.assertEqual([self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_eq(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_lt(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_lte(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_gt(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_deleted_at_gte(self): - self._soft_delete_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan1['id'], self.action_plan2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_action_plan_list_filter_created_at_eq(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_lt(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_lte(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_gt(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_created_at_gte(self): - res = self.dbapi.get_action_plan_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan1['id'], self.action_plan2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_action_plan_list_filter_updated_at_eq(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_lt(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_lte(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan2['id'], self.action_plan3['id']], - [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_gt(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_filter_updated_at_gte(self): - self._update_action_plans() - - res = self.dbapi.get_action_plan_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.action_plan1['id'], self.action_plan2['id']], - [r.id for r in res]) - - -class DbActionPlanTestCase(base.DbTestCase): - - def _create_test_audit(self, **kwargs): - audit = utils.get_test_audit(**kwargs) - self.dbapi.create_audit(audit) - return audit - - def _create_test_action_plan(self, **kwargs): - action_plan = utils.get_test_action_plan(**kwargs) - self.dbapi.create_action_plan(action_plan) - return action_plan - - def test_get_action_plan_list(self): - uuids = [] - for _ in range(1, 4): - action_plan = utils.create_test_action_plan( - uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(action_plan['uuid'])) - action_plans = self.dbapi.get_action_plan_list(self.context) - action_plan_uuids = [ap.uuid for ap in action_plans] - self.assertEqual(sorted(uuids), sorted(action_plan_uuids)) - for action_plan in action_plans: - self.assertIsNone(action_plan.audit) - self.assertIsNone(action_plan.strategy) - - def test_get_action_plan_list_eager(self): - _strategy = utils.get_test_strategy() - strategy = self.dbapi.create_strategy(_strategy) - _audit = utils.get_test_audit() - audit = self.dbapi.create_audit(_audit) - - uuids = [] - for _ in range(1, 4): - action_plan = utils.create_test_action_plan( - uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(action_plan['uuid'])) - action_plans = self.dbapi.get_action_plan_list( - self.context, eager=True) - action_plan_map = {a.uuid: a for a in action_plans} - self.assertEqual(sorted(uuids), sorted(action_plan_map.keys())) - eager_action_plan = action_plan_map[action_plan.uuid] - self.assertEqual( - strategy.as_dict(), eager_action_plan.strategy.as_dict()) - self.assertEqual(audit.as_dict(), eager_action_plan.audit.as_dict()) - - def test_get_action_plan_list_with_filters(self): - audit = self._create_test_audit( - id=2, - audit_type='ONESHOT', - uuid=w_utils.generate_uuid(), - state=ap_objects.State.ONGOING) - action_plan1 = self._create_test_action_plan( - id=1, - uuid=w_utils.generate_uuid(), - audit_id=audit['id'], - state=ap_objects.State.RECOMMENDED) - action_plan2 = self._create_test_action_plan( - id=2, - uuid=w_utils.generate_uuid(), - audit_id=audit['id'], - state=ap_objects.State.ONGOING) - - res = self.dbapi.get_action_plan_list( - self.context, - filters={'state': ap_objects.State.RECOMMENDED}) - self.assertEqual([action_plan1['id']], [r.id for r in res]) - - res = self.dbapi.get_action_plan_list( - self.context, - filters={'state': ap_objects.State.ONGOING}) - self.assertEqual([action_plan2['id']], [r.id for r in res]) - - res = self.dbapi.get_action_plan_list( - self.context, - filters={'audit_uuid': audit['uuid']}) - - for r in res: - self.assertEqual(audit['id'], r.audit_id) - - self.dbapi.soft_delete_action_plan(action_plan1['uuid']) - res = self.dbapi.get_action_plan_list( - self.context, - filters={'audit_uuid': audit['uuid']}) - - self.assertEqual([action_plan2['id']], [r.id for r in res]) - self.assertNotEqual([action_plan1['id']], [r.id for r in res]) - - def test_get_action_plan_list_with_filter_by_uuid(self): - action_plan = self._create_test_action_plan() - res = self.dbapi.get_action_plan_list( - self.context, filters={'uuid': action_plan["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(action_plan['uuid'], res[0].uuid) - - def test_get_action_plan_by_id(self): - action_plan = self._create_test_action_plan() - action_plan = self.dbapi.get_action_plan_by_id( - self.context, action_plan['id']) - self.assertEqual(action_plan['uuid'], action_plan.uuid) - - def test_get_action_plan_by_uuid(self): - action_plan = self._create_test_action_plan() - action_plan = self.dbapi.get_action_plan_by_uuid( - self.context, action_plan['uuid']) - self.assertEqual(action_plan['id'], action_plan.id) - - def test_get_action_plan_that_does_not_exist(self): - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.get_action_plan_by_id, self.context, 1234) - - def test_update_action_plan(self): - action_plan = self._create_test_action_plan() - res = self.dbapi.update_action_plan( - action_plan['id'], {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_action_plan_that_does_not_exist(self): - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.update_action_plan, 1234, {'name': ''}) - - def test_update_action_plan_uuid(self): - action_plan = self._create_test_action_plan() - self.assertRaises(exception.Invalid, - self.dbapi.update_action_plan, action_plan['id'], - {'uuid': 'hello'}) - - def test_destroy_action_plan(self): - action_plan = self._create_test_action_plan() - self.dbapi.destroy_action_plan(action_plan['id']) - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.get_action_plan_by_id, - self.context, action_plan['id']) - - def test_destroy_action_plan_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_action_plan(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_action_plan_by_uuid( - self.context, uuid)) - self.dbapi.destroy_action_plan(uuid) - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.get_action_plan_by_uuid, - self.context, uuid) - - def test_destroy_action_plan_that_does_not_exist(self): - self.assertRaises(exception.ActionPlanNotFound, - self.dbapi.destroy_action_plan, 1234) - - def test_destroy_action_plan_that_referenced_by_actions(self): - action_plan = self._create_test_action_plan() - action = utils.create_test_action(action_plan_id=action_plan['id']) - self.assertEqual(action_plan['id'], action.action_plan_id) - self.assertRaises(exception.ActionPlanReferenced, - self.dbapi.destroy_action_plan, action_plan['id']) - - def test_create_action_plan_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_action_plan(id=1, uuid=uuid) - self.assertRaises(exception.ActionPlanAlreadyExists, - self._create_test_action_plan, - id=2, uuid=uuid) diff --git a/watcher/tests/db/test_audit.py b/watcher/tests/db/test_audit.py deleted file mode 100644 index 1b467ba..0000000 --- a/watcher/tests/db/test_audit.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Audit via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbAuditFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbAuditFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.audit1 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.audit2 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=2, uuid=None, - state=objects.audit.State.FAILED) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.audit3 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=3, uuid=None, - state=objects.audit.State.CANCELLED) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.audit4 = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=4, uuid=None, - state=objects.audit.State.SUSPENDED) - - def _soft_delete_audits(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit(self.audit1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_audit(self.audit2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_audit(self.audit3.uuid) - - def _update_audits(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_audit( - self.audit1.uuid, - values={"state": objects.audit.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_audit( - self.audit2.uuid, - values={"state": objects.audit.State.SUCCEEDED}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_audit( - self.audit3.uuid, - values={"state": objects.audit.State.SUCCEEDED}) - - def test_get_audit_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit(self.audit1.uuid) - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit(self.audit1.uuid) - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_eq(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_lt(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_lte(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_gt(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_deleted_at_gte(self): - self._soft_delete_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit1['id'], self.audit2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_audit_list_filter_created_at_eq(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_created_at_lt(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_created_at_lte(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_created_at_gt(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_created_at_gte(self): - res = self.dbapi.get_audit_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit1['id'], self.audit2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_audit_list_filter_updated_at_eq(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_lt(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_lte(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit2['id'], self.audit3['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_gt(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit1['id']], [r.id for r in res]) - - def test_get_audit_list_filter_updated_at_gte(self): - self._update_audits() - - res = self.dbapi.get_audit_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit1['id'], self.audit2['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_state_in(self): - res = self.dbapi.get_audit_list( - self.context, - filters={ - 'state__in': - objects.audit.AuditStateTransitionManager.INACTIVE_STATES - }) - - self.assertEqual( - [self.audit2['id'], self.audit3['id'], self.audit4['id']], - [r.id for r in res]) - - def test_get_audit_list_filter_state_notin(self): - res = self.dbapi.get_audit_list( - self.context, - filters={ - 'state__notin': - objects.audit.AuditStateTransitionManager.INACTIVE_STATES - }) - - self.assertEqual( - [self.audit1['id']], - [r.id for r in res]) - - -class DbAuditTestCase(base.DbTestCase): - - def _create_test_audit(self, **kwargs): - audit = utils.get_test_audit(**kwargs) - self.dbapi.create_audit(audit) - return audit - - def test_get_audit_list(self): - uuids = [] - for _ in range(1, 4): - audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) - uuids.append(six.text_type(audit['uuid'])) - audits = self.dbapi.get_audit_list(self.context) - audit_uuids = [a.uuid for a in audits] - self.assertEqual(sorted(uuids), sorted(audit_uuids)) - for audit in audits: - self.assertIsNone(audit.goal) - self.assertIsNone(audit.strategy) - - def test_get_audit_list_eager(self): - _goal = utils.get_test_goal() - goal = self.dbapi.create_goal(_goal) - _strategy = utils.get_test_strategy() - strategy = self.dbapi.create_strategy(_strategy) - - uuids = [] - for i in range(1, 4): - audit = utils.create_test_audit( - id=i, uuid=w_utils.generate_uuid(), - goal_id=goal.id, strategy_id=strategy.id) - uuids.append(six.text_type(audit['uuid'])) - audits = self.dbapi.get_audit_list(self.context, eager=True) - audit_map = {a.uuid: a for a in audits} - self.assertEqual(sorted(uuids), sorted(audit_map.keys())) - eager_audit = audit_map[audit.uuid] - self.assertEqual(goal.as_dict(), eager_audit.goal.as_dict()) - self.assertEqual(strategy.as_dict(), eager_audit.strategy.as_dict()) - - def test_get_audit_list_with_filters(self): - audit1 = self._create_test_audit( - id=1, - audit_type=objects.audit.AuditType.ONESHOT.value, - uuid=w_utils.generate_uuid(), - state=objects.audit.State.ONGOING) - audit2 = self._create_test_audit( - id=2, - audit_type='CONTINUOUS', - uuid=w_utils.generate_uuid(), - state=objects.audit.State.PENDING) - - res = self.dbapi.get_audit_list( - self.context, - filters={'audit_type': objects.audit.AuditType.ONESHOT.value}) - self.assertEqual([audit1['id']], [r.id for r in res]) - - res = self.dbapi.get_audit_list(self.context, - filters={'audit_type': 'bad-type'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_audit_list( - self.context, - filters={'state': objects.audit.State.ONGOING}) - self.assertEqual([audit1['id']], [r.id for r in res]) - - res = self.dbapi.get_audit_list( - self.context, - filters={'state': objects.audit.State.PENDING}) - self.assertEqual([audit2['id']], [r.id for r in res]) - - def test_get_audit_list_with_filter_by_uuid(self): - audit = self._create_test_audit() - res = self.dbapi.get_audit_list( - self.context, filters={'uuid': audit["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(audit['uuid'], res[0].uuid) - - def test_get_audit_by_id(self): - audit = self._create_test_audit() - audit = self.dbapi.get_audit_by_id(self.context, audit['id']) - self.assertEqual(audit['uuid'], audit.uuid) - - def test_get_audit_by_uuid(self): - audit = self._create_test_audit() - audit = self.dbapi.get_audit_by_uuid(self.context, audit['uuid']) - self.assertEqual(audit['id'], audit.id) - - def test_get_audit_that_does_not_exist(self): - self.assertRaises(exception.AuditNotFound, - self.dbapi.get_audit_by_id, self.context, 1234) - - def test_update_audit(self): - audit = self._create_test_audit() - res = self.dbapi.update_audit(audit['id'], {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_audit_that_does_not_exist(self): - self.assertRaises(exception.AuditNotFound, - self.dbapi.update_audit, 1234, {'name': ''}) - - def test_update_audit_uuid(self): - audit = self._create_test_audit() - self.assertRaises(exception.Invalid, - self.dbapi.update_audit, audit['id'], - {'uuid': 'hello'}) - - def test_destroy_audit(self): - audit = self._create_test_audit() - self.dbapi.destroy_audit(audit['id']) - self.assertRaises(exception.AuditNotFound, - self.dbapi.get_audit_by_id, - self.context, audit['id']) - - def test_destroy_audit_by_uuid(self): - audit = self._create_test_audit() - self.assertIsNotNone(self.dbapi.get_audit_by_uuid(self.context, - audit['uuid'])) - self.dbapi.destroy_audit(audit['uuid']) - self.assertRaises(exception.AuditNotFound, - self.dbapi.get_audit_by_uuid, self.context, - audit['uuid']) - - def test_destroy_audit_that_does_not_exist(self): - self.assertRaises(exception.AuditNotFound, - self.dbapi.destroy_audit, 1234) - - def test_destroy_audit_that_referenced_by_action_plans(self): - audit = self._create_test_audit() - action_plan = utils.create_test_action_plan(audit_id=audit['id']) - self.assertEqual(audit['id'], action_plan.audit_id) - self.assertRaises(exception.AuditReferenced, - self.dbapi.destroy_audit, audit['id']) diff --git a/watcher/tests/db/test_audit_template.py b/watcher/tests/db/test_audit_template.py deleted file mode 100644 index a885be4..0000000 --- a/watcher/tests/db/test_audit_template.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating AuditTemplate via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbAuditTemplateFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbAuditTemplateFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - gen_name = lambda: "Audit Template %s" % w_utils.generate_uuid() - self.audit_template1_name = gen_name() - self.audit_template2_name = gen_name() - self.audit_template3_name = gen_name() - - with freezegun.freeze_time(self.FAKE_TODAY): - self.audit_template1 = utils.create_test_audit_template( - name=self.audit_template1_name, id=1, uuid=None) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.audit_template2 = utils.create_test_audit_template( - name=self.audit_template2_name, id=2, uuid=None) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.audit_template3 = utils.create_test_audit_template( - name=self.audit_template3_name, id=3, uuid=None) - - def _soft_delete_audit_templates(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_audit_template(self.audit_template2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_audit_template(self.audit_template3.uuid) - - def _update_audit_templates(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_audit_template( - self.audit_template1.uuid, values={"name": "audit_template1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_audit_template( - self.audit_template2.uuid, values={"name": "audit_template2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_audit_template( - self.audit_template3.uuid, values={"name": "audit_template3"}) - - def test_get_audit_template_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_eq(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_lt(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_lte(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_gt(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_deleted_at_gte(self): - self._soft_delete_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template1['id'], self.audit_template2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_audit_template_list_filter_created_at_eq(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_lt(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_lte(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_gt(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_created_at_gte(self): - res = self.dbapi.get_audit_template_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template1['id'], self.audit_template2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_audit_template_list_filter_updated_at_eq(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_lt(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_lte(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template2['id'], self.audit_template3['id']], - [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_gt(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.audit_template1['id']], [r.id for r in res]) - - def test_get_audit_template_list_filter_updated_at_gte(self): - self._update_audit_templates() - - res = self.dbapi.get_audit_template_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.audit_template1['id'], self.audit_template2['id']], - [r.id for r in res]) - - -class DbAuditTemplateTestCase(base.DbTestCase): - - def _create_test_goal(self, **kwargs): - goal = utils.get_test_goal(**kwargs) - self.dbapi.create_goal(goal) - return goal - - def _create_test_audit_template(self, **kwargs): - audit_template = utils.get_test_audit_template(**kwargs) - self.dbapi.create_audit_template(audit_template) - return audit_template - - def test_get_audit_template_list(self): - uuids = [] - for i in range(1, 4): - audit_template = utils.create_test_audit_template( - id=i, - uuid=w_utils.generate_uuid(), - name='My Audit Template {0}'.format(i)) - uuids.append(six.text_type(audit_template['uuid'])) - audit_templates = self.dbapi.get_audit_template_list(self.context) - audit_template_uuids = [at.uuid for at in audit_templates] - self.assertEqual(sorted(uuids), sorted(audit_template_uuids)) - for audit_template in audit_templates: - self.assertIsNone(audit_template.goal) - self.assertIsNone(audit_template.strategy) - - def test_get_audit_template_list_eager(self): - _goal = utils.get_test_goal() - goal = self.dbapi.create_goal(_goal) - _strategy = utils.get_test_strategy() - strategy = self.dbapi.create_strategy(_strategy) - - uuids = [] - for i in range(1, 4): - audit_template = utils.create_test_audit_template( - id=i, uuid=w_utils.generate_uuid(), - name='My Audit Template {0}'.format(i), - goal_id=goal.id, strategy_id=strategy.id) - uuids.append(six.text_type(audit_template['uuid'])) - audit_templates = self.dbapi.get_audit_template_list( - self.context, eager=True) - audit_template_map = {a.uuid: a for a in audit_templates} - self.assertEqual(sorted(uuids), sorted(audit_template_map.keys())) - eager_audit_template = audit_template_map[audit_template.uuid] - self.assertEqual(goal.as_dict(), eager_audit_template.goal.as_dict()) - self.assertEqual( - strategy.as_dict(), eager_audit_template.strategy.as_dict()) - - def test_get_audit_template_list_with_filters(self): - goal = self._create_test_goal(name='DUMMY') - audit_template1 = self._create_test_audit_template( - id=1, - uuid=w_utils.generate_uuid(), - name='My Audit Template 1', - description='Description of my audit template 1', - goal_id=goal['id']) - audit_template2 = self._create_test_audit_template( - id=2, - uuid=w_utils.generate_uuid(), - name='My Audit Template 2', - description='Description of my audit template 2', - goal_id=goal['id']) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'name': 'My Audit Template 1'}) - self.assertEqual([audit_template1['id']], [r.id for r in res]) - - res = self.dbapi.get_audit_template_list( - self.context, filters={'name': 'Does not exist'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_audit_template_list( - self.context, - filters={'goal': 'DUMMY'}) - self.assertEqual([audit_template1['id'], audit_template2['id']], - [r.id for r in res]) - - res = self.dbapi.get_audit_template_list( - self.context, - filters={'name': 'My Audit Template 2'}) - self.assertEqual([audit_template2['id']], [r.id for r in res]) - - def test_get_audit_template_list_with_filter_by_uuid(self): - audit_template = self._create_test_audit_template() - res = self.dbapi.get_audit_template_list( - self.context, filters={'uuid': audit_template["uuid"]}) - - self.assertEqual(len(res), 1) - self.assertEqual(audit_template['uuid'], res[0].uuid) - - def test_get_audit_template_by_id(self): - audit_template = self._create_test_audit_template() - audit_template = self.dbapi.get_audit_template_by_id( - self.context, audit_template['id']) - self.assertEqual(audit_template['uuid'], audit_template.uuid) - - def test_get_audit_template_by_uuid(self): - audit_template = self._create_test_audit_template() - audit_template = self.dbapi.get_audit_template_by_uuid( - self.context, audit_template['uuid']) - self.assertEqual(audit_template['id'], audit_template.id) - - def test_get_audit_template_that_does_not_exist(self): - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.get_audit_template_by_id, - self.context, 1234) - - def test_update_audit_template(self): - audit_template = self._create_test_audit_template() - res = self.dbapi.update_audit_template(audit_template['id'], - {'name': 'updated-model'}) - self.assertEqual('updated-model', res.name) - - def test_update_audit_template_that_does_not_exist(self): - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.update_audit_template, 1234, {'name': ''}) - - def test_update_audit_template_uuid(self): - audit_template = self._create_test_audit_template() - self.assertRaises(exception.Invalid, - self.dbapi.update_audit_template, - audit_template['id'], - {'uuid': 'hello'}) - - def test_destroy_audit_template(self): - audit_template = self._create_test_audit_template() - self.dbapi.destroy_audit_template(audit_template['id']) - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.get_audit_template_by_id, - self.context, audit_template['id']) - - def test_destroy_audit_template_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_audit_template(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_audit_template_by_uuid( - self.context, uuid)) - self.dbapi.destroy_audit_template(uuid) - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.get_audit_template_by_uuid, - self.context, uuid) - - def test_destroy_audit_template_that_does_not_exist(self): - self.assertRaises(exception.AuditTemplateNotFound, - self.dbapi.destroy_audit_template, 1234) - - def test_create_audit_template_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_audit_template(id=1, uuid=uuid) - self.assertRaises(exception.AuditTemplateAlreadyExists, - self._create_test_audit_template, - id=2, uuid=uuid) - - def test_audit_template_create_same_name(self): - audit_template1 = utils.create_test_audit_template( - uuid=w_utils.generate_uuid(), - name='audit_template_name') - self.assertEqual(audit_template1['uuid'], audit_template1.uuid) - self.assertRaises( - exception.AuditTemplateAlreadyExists, - utils.create_test_audit_template, - uuid=w_utils.generate_uuid(), - name='audit_template_name') diff --git a/watcher/tests/db/test_efficacy_indicator.py b/watcher/tests/db/test_efficacy_indicator.py deleted file mode 100644 index 673678c..0000000 --- a/watcher/tests/db/test_efficacy_indicator.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating EfficacyIndicator via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbEfficacyIndicatorFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbEfficacyIndicatorFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - self.audit_template_name = "Audit Template" - - self.audit_template = utils.create_test_audit_template( - name=self.audit_template_name, id=1, uuid=None) - self.audit = utils.create_test_audit( - audit_template_id=self.audit_template.id, id=1, uuid=None) - self.action_plan = utils.create_test_action_plan( - audit_id=self.audit.id, id=1, uuid=None) - - with freezegun.freeze_time(self.FAKE_TODAY): - self.efficacy_indicator1 = utils.create_test_efficacy_indicator( - action_plan_id=self.action_plan.id, id=1, uuid=None, - name="efficacy_indicator1", description="Test Indicator 1") - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.efficacy_indicator2 = utils.create_test_efficacy_indicator( - action_plan_id=self.action_plan.id, id=2, uuid=None, - name="efficacy_indicator2", description="Test Indicator 2") - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.efficacy_indicator3 = utils.create_test_efficacy_indicator( - action_plan_id=self.action_plan.id, id=3, uuid=None, - name="efficacy_indicator3", description="Test Indicator 3") - - def _soft_delete_efficacy_indicators(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator1.uuid) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator2.uuid) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator3.uuid) - - def _update_efficacy_indicators(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_efficacy_indicator( - self.efficacy_indicator1.uuid, - values={"description": "New description 1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_efficacy_indicator( - self.efficacy_indicator2.uuid, - values={"description": "New description 2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_efficacy_indicator( - self.efficacy_indicator3.uuid, - values={"description": "New description 3"}) - - def test_get_efficacy_indicator_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator1.uuid) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_efficacy_indicator( - self.efficacy_indicator1.uuid) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted': False}) - - self.assertEqual([self.efficacy_indicator2['id'], - self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_eq(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_lt(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_lte(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_gt(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_deleted_at_gte(self): - self._soft_delete_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], - [r.id for r in res]) - - # created_at # - - def test_get_efficacy_indicator_filter_created_at_eq(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_lt(self): - with freezegun.freeze_time(self.FAKE_TODAY): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_lte(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_gt(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_created_at_gte(self): - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], - [r.id for r in res]) - - # updated_at # - - def test_get_efficacy_indicator_filter_updated_at_eq(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_lt(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_lte(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], - [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_gt(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) - - def test_get_efficacy_indicator_filter_updated_at_gte(self): - self._update_efficacy_indicators() - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], - [r.id for r in res]) - - -class DbEfficacyIndicatorTestCase(base.DbTestCase): - - def _create_test_efficacy_indicator(self, **kwargs): - efficacy_indicator_dict = utils.get_test_efficacy_indicator(**kwargs) - efficacy_indicator = self.dbapi.create_efficacy_indicator( - efficacy_indicator_dict) - return efficacy_indicator - - def _create_test_action_plan(self, **kwargs): - action_plan_dict = utils.get_test_action_plan(**kwargs) - action_plan = self.dbapi.create_action_plan(action_plan_dict) - return action_plan - - def test_get_efficacy_indicator_list(self): - uuids = [] - action_plan = self._create_test_action_plan() - for id_ in range(1, 4): - efficacy_indicator = utils.create_test_efficacy_indicator( - action_plan_id=action_plan.id, id=id_, uuid=None, - name="efficacy_indicator", description="Test Indicator ") - uuids.append(six.text_type(efficacy_indicator['uuid'])) - efficacy_indicators = self.dbapi.get_efficacy_indicator_list( - self.context) - efficacy_indicator_uuids = [ei.uuid for ei in efficacy_indicators] - self.assertEqual(sorted(uuids), sorted(efficacy_indicator_uuids)) - for efficacy_indicator in efficacy_indicators: - self.assertIsNone(efficacy_indicator.action_plan) - - def test_get_efficacy_indicator_list_eager(self): - _action_plan = utils.get_test_action_plan() - action_plan = self.dbapi.create_action_plan(_action_plan) - - uuids = [] - for i in range(1, 4): - efficacy_indicator = utils.create_test_efficacy_indicator( - id=i, uuid=w_utils.generate_uuid(), - action_plan_id=action_plan.id) - uuids.append(six.text_type(efficacy_indicator['uuid'])) - efficacy_indicators = self.dbapi.get_efficacy_indicator_list( - self.context, eager=True) - efficacy_indicator_map = {a.uuid: a for a in efficacy_indicators} - self.assertEqual(sorted(uuids), sorted(efficacy_indicator_map.keys())) - eager_efficacy_indicator = efficacy_indicator_map[ - efficacy_indicator.uuid] - self.assertEqual( - action_plan.as_dict(), - eager_efficacy_indicator.action_plan.as_dict()) - - def test_get_efficacy_indicator_list_with_filters(self): - audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) - action_plan = self._create_test_action_plan( - id=1, - uuid=w_utils.generate_uuid(), - audit_id=audit.id, - first_efficacy_indicator_id=None, - state=objects.action_plan.State.RECOMMENDED) - efficacy_indicator1 = self._create_test_efficacy_indicator( - id=1, - name='indicator_1', - uuid=w_utils.generate_uuid(), - action_plan_id=1, - description='Description efficacy indicator 1', - unit='%') - efficacy_indicator2 = self._create_test_efficacy_indicator( - id=2, - name='indicator_2', - uuid=w_utils.generate_uuid(), - action_plan_id=2, - description='Description efficacy indicator 2', - unit='%') - efficacy_indicator3 = self._create_test_efficacy_indicator( - id=3, - name='indicator_3', - uuid=w_utils.generate_uuid(), - action_plan_id=1, - description='Description efficacy indicator 3', - unit='%') - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'name': 'indicator_3'}) - self.assertEqual([efficacy_indicator3['id']], [r.id for r in res]) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'unit': 'kWh'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, - filters={'action_plan_id': 2}) - self.assertEqual([efficacy_indicator2['id']], [r.id for r in res]) - - res = self.dbapi.get_efficacy_indicator_list( - self.context, - filters={'action_plan_uuid': action_plan['uuid']}) - self.assertEqual( - sorted([efficacy_indicator1['id'], efficacy_indicator3['id']]), - sorted([r.id for r in res])) - - def test_get_efficacy_indicator_list_with_filter_by_uuid(self): - efficacy_indicator = self._create_test_efficacy_indicator() - res = self.dbapi.get_efficacy_indicator_list( - self.context, filters={'uuid': efficacy_indicator.uuid}) - - self.assertEqual(len(res), 1) - self.assertEqual(efficacy_indicator.uuid, res[0].uuid) - - def test_get_efficacy_indicator_by_id(self): - efficacy_indicator = self._create_test_efficacy_indicator() - efficacy_indicator = self.dbapi.get_efficacy_indicator_by_id( - self.context, efficacy_indicator.id) - self.assertEqual(efficacy_indicator.uuid, efficacy_indicator.uuid) - - def test_get_efficacy_indicator_by_uuid(self): - efficacy_indicator = self._create_test_efficacy_indicator() - efficacy_indicator = self.dbapi.get_efficacy_indicator_by_uuid( - self.context, efficacy_indicator.uuid) - self.assertEqual(efficacy_indicator['id'], efficacy_indicator.id) - - def test_get_efficacy_indicator_that_does_not_exist(self): - self.assertRaises( - exception.EfficacyIndicatorNotFound, - self.dbapi.get_efficacy_indicator_by_id, self.context, 1234) - - def test_update_efficacy_indicator(self): - efficacy_indicator = self._create_test_efficacy_indicator() - res = self.dbapi.update_efficacy_indicator( - efficacy_indicator.id, - {'state': objects.action_plan.State.CANCELLED}) - self.assertEqual('CANCELLED', res.state) - - def test_update_efficacy_indicator_that_does_not_exist(self): - self.assertRaises( - exception.EfficacyIndicatorNotFound, - self.dbapi.update_efficacy_indicator, 1234, {'state': ''}) - - def test_update_efficacy_indicator_uuid(self): - efficacy_indicator = self._create_test_efficacy_indicator() - self.assertRaises( - exception.Invalid, - self.dbapi.update_efficacy_indicator, efficacy_indicator.id, - {'uuid': 'hello'}) - - def test_destroy_efficacy_indicator(self): - efficacy_indicator = self._create_test_efficacy_indicator() - self.dbapi.destroy_efficacy_indicator(efficacy_indicator['id']) - self.assertRaises(exception.EfficacyIndicatorNotFound, - self.dbapi.get_efficacy_indicator_by_id, - self.context, efficacy_indicator['id']) - - def test_destroy_efficacy_indicator_by_uuid(self): - uuid = w_utils.generate_uuid() - self._create_test_efficacy_indicator(uuid=uuid) - self.assertIsNotNone(self.dbapi.get_efficacy_indicator_by_uuid( - self.context, uuid)) - self.dbapi.destroy_efficacy_indicator(uuid) - self.assertRaises( - exception.EfficacyIndicatorNotFound, - self.dbapi.get_efficacy_indicator_by_uuid, self.context, uuid) - - def test_destroy_efficacy_indicator_that_does_not_exist(self): - self.assertRaises(exception.EfficacyIndicatorNotFound, - self.dbapi.destroy_efficacy_indicator, 1234) - - def test_create_efficacy_indicator_already_exists(self): - uuid = w_utils.generate_uuid() - self._create_test_efficacy_indicator(id=1, uuid=uuid) - self.assertRaises(exception.EfficacyIndicatorAlreadyExists, - self._create_test_efficacy_indicator, - id=2, uuid=uuid) diff --git a/watcher/tests/db/test_goal.py b/watcher/tests/db/test_goal.py deleted file mode 100644 index cae9449..0000000 --- a/watcher/tests/db/test_goal.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for manipulating Goal via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbGoalFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbGoalFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.goal1 = utils.create_test_goal( - id=1, uuid=w_utils.generate_uuid(), name="GOAL_1", - display_name="Goal 1") - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.goal2 = utils.create_test_goal( - id=2, uuid=w_utils.generate_uuid(), - name="GOAL_2", display_name="Goal 2") - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.goal3 = utils.create_test_goal( - id=3, uuid=w_utils.generate_uuid(), - name="GOAL_3", display_name="Goal 3") - - def _soft_delete_goals(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_goal(self.goal1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_goal(self.goal2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_goal(self.goal3.id) - - def _update_goals(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_goal( - self.goal1.uuid, values={"display_name": "goal1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_goal( - self.goal2.uuid, values={"display_name": "goal2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_goal( - self.goal3.uuid, values={"display_name": "goal3"}) - - def test_get_goal_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_goal(self.goal1.id) - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_goal(self.goal1.id) - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_deleted_at_eq(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_deleted_at_lt(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_deleted_at_lte(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_deleted_at_gt(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_deleted_at_gte(self): - self._soft_delete_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal1.uuid, self.goal2.uuid]), - set([r.uuid for r in res])) - - # created_at # - - def test_get_goal_list_filter_created_at_eq(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_created_at_lt(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_created_at_lte(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_created_at_gt(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_created_at_gte(self): - res = self.dbapi.get_goal_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal1.uuid, self.goal2.uuid]), - set([r.uuid for r in res])) - - # updated_at # - - def test_get_goal_list_filter_updated_at_eq(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_updated_at_lt(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_updated_at_lte(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal2.uuid, self.goal3.uuid]), - set([r.uuid for r in res])) - - def test_get_goal_list_filter_updated_at_gt(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) - - def test_get_goal_list_filter_updated_at_gte(self): - self._update_goals() - - res = self.dbapi.get_goal_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.goal1.uuid, self.goal2.uuid]), - set([r.uuid for r in res])) - - -class DbGoalTestCase(base.DbTestCase): - - def _create_test_goal(self, **kwargs): - goal = utils.get_test_goal(**kwargs) - self.dbapi.create_goal(goal) - return goal - - def test_get_goal_list(self): - uuids = [] - for i in range(1, 4): - goal = utils.create_test_goal( - id=i, - uuid=w_utils.generate_uuid(), - name="GOAL_%s" % i, - display_name='My Goal %s' % i) - uuids.append(six.text_type(goal['uuid'])) - goals = self.dbapi.get_goal_list(self.context) - goal_uuids = [g.uuid for g in goals] - self.assertEqual(sorted(uuids), sorted(goal_uuids)) - - def test_get_goal_list_with_filters(self): - goal1 = self._create_test_goal( - id=1, - uuid=w_utils.generate_uuid(), - name="GOAL_1", - display_name='Goal 1', - ) - goal2 = self._create_test_goal( - id=2, - uuid=w_utils.generate_uuid(), - name="GOAL_2", - display_name='Goal 2', - ) - - res = self.dbapi.get_goal_list(self.context, - filters={'display_name': 'Goal 1'}) - self.assertEqual([goal1['uuid']], [r.uuid for r in res]) - - res = self.dbapi.get_goal_list(self.context, - filters={'display_name': 'Goal 3'}) - self.assertEqual([], [r.uuid for r in res]) - - res = self.dbapi.get_goal_list( - self.context, filters={'name': 'GOAL_1'}) - self.assertEqual([goal1['uuid']], [r.uuid for r in res]) - - res = self.dbapi.get_goal_list( - self.context, - filters={'display_name': 'Goal 2'}) - self.assertEqual([goal2['uuid']], [r.uuid for r in res]) - - def test_get_goal_by_uuid(self): - efficacy_spec = [{"unit": "%", "name": "dummy", - "schema": "Range(min=0, max=100, min_included=True, " - "max_included=True, msg=None)", - "description": "Dummy indicator"}] - created_goal = self._create_test_goal( - efficacy_specification=efficacy_spec) - goal = self.dbapi.get_goal_by_uuid(self.context, created_goal['uuid']) - self.assertEqual(goal.uuid, created_goal['uuid']) - - def test_get_goal_that_does_not_exist(self): - random_uuid = w_utils.generate_uuid() - self.assertRaises(exception.GoalNotFound, - self.dbapi.get_goal_by_uuid, - self.context, random_uuid) - - def test_update_goal(self): - goal = self._create_test_goal() - res = self.dbapi.update_goal(goal['uuid'], - {'display_name': 'updated-model'}) - self.assertEqual('updated-model', res.display_name) - - def test_update_goal_id(self): - goal = self._create_test_goal() - self.assertRaises(exception.Invalid, - self.dbapi.update_goal, goal['uuid'], - {'uuid': 'NEW_GOAL'}) - - def test_update_goal_that_does_not_exist(self): - random_uuid = w_utils.generate_uuid() - self.assertRaises(exception.GoalNotFound, - self.dbapi.update_goal, - random_uuid, - {'display_name': ''}) - - def test_destroy_goal(self): - goal = self._create_test_goal() - self.dbapi.destroy_goal(goal['uuid']) - self.assertRaises(exception.GoalNotFound, - self.dbapi.get_goal_by_uuid, - self.context, goal['uuid']) - - def test_destroy_goal_that_does_not_exist(self): - random_uuid = w_utils.generate_uuid() - self.assertRaises(exception.GoalNotFound, - self.dbapi.destroy_goal, random_uuid) - - def test_create_goal_already_exists(self): - goal_uuid = w_utils.generate_uuid() - self._create_test_goal(uuid=goal_uuid) - self.assertRaises(exception.GoalAlreadyExists, - self._create_test_goal, - uuid=goal_uuid) diff --git a/watcher/tests/db/test_purge.py b/watcher/tests/db/test_purge.py deleted file mode 100644 index 5a0dde7..0000000 --- a/watcher/tests/db/test_purge.py +++ /dev/null @@ -1,502 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils - -import freezegun -import mock - -from watcher.common import context as watcher_context -from watcher.common import utils -from watcher.db import purge -from watcher.db.sqlalchemy import api as dbapi -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestPurgeCommand(base.DbTestCase): - - def setUp(self): - super(TestPurgeCommand, self).setUp() - self.cmd = purge.PurgeCommand() - token_info = { - 'token': { - 'project': { - 'id': 'fake_project' - }, - 'user': { - 'id': 'fake_user' - } - } - } - self.context = watcher_context.RequestContext( - auth_token_info=token_info, - project_id='fake_project', - user_id='fake_user', - show_deleted=True, - ) - - self.fake_today = '2016-02-24T09:52:05.219414+00:00' - self.expired_date = '2016-01-24T09:52:05.219414+00:00' - - self.m_input = mock.Mock() - p = mock.patch("watcher.db.purge.input", self.m_input) - self.m_input.return_value = 'y' - p.start() - self.addCleanup(p.stop) - - self._id_generator = None - self._data_setup() - - def _generate_id(self): - if self._id_generator is None: - self._id_generator = self._get_id_generator() - return next(self._id_generator) - - def _get_id_generator(self): - seed = 1 - while True: - yield seed - seed += 1 - - def generate_unique_name(self, prefix): - return "%s%s" % (prefix, uuidutils.generate_uuid()) - - def _data_setup(self): - # All the 1's are soft_deleted and are expired - # All the 2's are soft_deleted but are not expired - # All the 3's are *not* soft_deleted - - # Number of days we want to keep in DB (no purge for them) - self.cmd.age_in_days = 10 - self.cmd.max_number = None - self.cmd.orphans = True - - goal1_name = "GOAL_1" - goal2_name = "GOAL_2" - goal3_name = "GOAL_3" - - strategy1_name = "strategy_1" - strategy2_name = "strategy_2" - strategy3_name = "strategy_3" - - self.audit_template1_name = self.generate_unique_name( - prefix="Audit Template 1 ") - self.audit_template2_name = self.generate_unique_name( - prefix="Audit Template 2 ") - self.audit_template3_name = self.generate_unique_name( - prefix="Audit Template 3 ") - - with freezegun.freeze_time(self.expired_date): - self.goal1 = obj_utils.create_test_goal( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=goal1_name, display_name=goal1_name.lower()) - self.goal2 = obj_utils.create_test_goal( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=goal2_name, display_name=goal2_name.lower()) - self.goal3 = obj_utils.create_test_goal( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=goal3_name, display_name=goal3_name.lower()) - self.goal1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.strategy1 = obj_utils.create_test_strategy( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=strategy1_name, display_name=strategy1_name.lower(), - goal_id=self.goal1.id) - self.strategy2 = obj_utils.create_test_strategy( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=strategy2_name, display_name=strategy2_name.lower(), - goal_id=self.goal2.id) - self.strategy3 = obj_utils.create_test_strategy( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - name=strategy3_name, display_name=strategy3_name.lower(), - goal_id=self.goal3.id) - self.strategy1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.audit_template1 = obj_utils.create_test_audit_template( - self.context, name=self.audit_template1_name, - id=self._generate_id(), - uuid=utils.generate_uuid(), goal_id=self.goal1.id, - strategy_id=self.strategy1.id) - self.audit_template2 = obj_utils.create_test_audit_template( - self.context, name=self.audit_template2_name, - id=self._generate_id(), - uuid=utils.generate_uuid(), goal_id=self.goal2.id, - strategy_id=self.strategy2.id) - self.audit_template3 = obj_utils.create_test_audit_template( - self.context, name=self.audit_template3_name, - id=self._generate_id(), - uuid=utils.generate_uuid(), goal_id=self.goal3.id, - strategy_id=self.strategy3.id) - self.audit_template1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.audit1 = obj_utils.create_test_audit( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - goal_id=self.goal1.id, strategy_id=self.strategy1.id) - self.audit2 = obj_utils.create_test_audit( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - goal_id=self.goal2.id, strategy_id=self.strategy2.id) - self.audit3 = obj_utils.create_test_audit( - self.context, id=self._generate_id(), - uuid=utils.generate_uuid(), - goal_id=self.goal3.id, strategy_id=self.strategy3.id) - self.audit1.soft_delete() - - with freezegun.freeze_time(self.expired_date): - self.action_plan1 = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit1.id, - id=self._generate_id(), uuid=utils.generate_uuid(), - strategy_id=self.strategy1.id) - self.action_plan2 = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit2.id, - id=self._generate_id(), - strategy_id=self.strategy2.id, - uuid=utils.generate_uuid()) - self.action_plan3 = obj_utils.create_test_action_plan( - self.context, audit_id=self.audit3.id, - id=self._generate_id(), uuid=utils.generate_uuid(), - strategy_id=self.strategy3.id) - - self.action1 = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan1.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - self.action2 = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan2.id, - id=self._generate_id(), uuid=utils.generate_uuid()) - self.action3 = obj_utils.create_test_action( - self.context, action_plan_id=self.action_plan3.id, - id=self._generate_id(), uuid=utils.generate_uuid()) - self.action_plan1.soft_delete() - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_execute_max_number_exceeded(self, - m_destroy_goal, - m_destroy_strategy, - m_destroy_audit_template, - m_destroy_audit, - m_destroy_action_plan, - m_destroy_action): - self.cmd.age_in_days = None - self.cmd.max_number = 10 - - with freezegun.freeze_time(self.fake_today): - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - # The 1's and the 2's are purgeable (due to age of day set to 0), - # but max_number = 10, and because of no Db integrity violation, we - # should be able to purge only 6 objects. - self.assertEqual(m_destroy_goal.call_count, 1) - self.assertEqual(m_destroy_strategy.call_count, 1) - self.assertEqual(m_destroy_audit_template.call_count, 1) - self.assertEqual(m_destroy_audit.call_count, 1) - self.assertEqual(m_destroy_action_plan.call_count, 1) - self.assertEqual(m_destroy_action.call_count, 1) - - def test_find_deleted_entries(self): - self.cmd.age_in_days = None - - with freezegun.freeze_time(self.fake_today): - objects_map = self.cmd.find_objects_to_delete() - - self.assertEqual(len(objects_map.goals), 1) - self.assertEqual(len(objects_map.strategies), 1) - self.assertEqual(len(objects_map.audit_templates), 1) - self.assertEqual(len(objects_map.audits), 1) - self.assertEqual(len(objects_map.action_plans), 1) - self.assertEqual(len(objects_map.actions), 1) - - def test_find_deleted_and_expired_entries(self): - with freezegun.freeze_time(self.fake_today): - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - with freezegun.freeze_time(self.fake_today): - objects_map = self.cmd.find_objects_to_delete() - - # The 1's are purgeable (due to age of day set to 10) - self.assertEqual(len(objects_map.goals), 1) - self.assertEqual(len(objects_map.strategies), 1) - self.assertEqual(len(objects_map.audit_templates), 1) - self.assertEqual(len(objects_map.audits), 1) - self.assertEqual(len(objects_map.action_plans), 1) - self.assertEqual(len(objects_map.actions), 1) - - def test_find_deleted_and_nonexpired_related_entries(self): - with freezegun.freeze_time(self.fake_today): - # orphan audit template - audit_template4 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal2.id, - name=self.generate_unique_name(prefix="Audit Template 4 "), - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit4 = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template4.id, - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - action_plan4 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit4.id, strategy_id=self.strategy1.id) - action4 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan4.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - audit_template5 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal1.id, - name=self.generate_unique_name(prefix="Audit Template 5 "), - strategy_id=None, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit5 = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template5.id, - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - action_plan5 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit5.id, strategy_id=self.strategy1.id) - action5 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan5.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - # All the 4's should be purged as well because they are orphans - # even though they were not deleted - - # All the 5's should be purged as well even though they are not - # expired because their related audit template is itself expired - audit_template5.soft_delete() - audit5.soft_delete() - action_plan5.soft_delete() - - with freezegun.freeze_time(self.fake_today): - objects_map = self.cmd.find_objects_to_delete() - - self.assertEqual(len(objects_map.goals), 1) - self.assertEqual(len(objects_map.strategies), 1) - self.assertEqual(len(objects_map.audit_templates), 3) - self.assertEqual(len(objects_map.audits), 3) - self.assertEqual(len(objects_map.action_plans), 3) - self.assertEqual(len(objects_map.actions), 3) - self.assertEqual( - set([self.action1.id, action4.id, action5.id]), - set([entry.id for entry in objects_map.actions])) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command(self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - m_destroy_audit_template.assert_called_once_with( - self.audit_template1.uuid) - m_destroy_audit.assert_called_with( - self.audit1.uuid) - m_destroy_action_plan.assert_called_with( - self.action_plan1.uuid) - m_destroy_action.assert_called_with( - self.action1.uuid) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_nonexpired_related_entries( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - with freezegun.freeze_time(self.fake_today): - # orphan audit template - audit_template4 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal2.id, - name=self.generate_unique_name(prefix="Audit Template 4 "), - strategy_id=None, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit4 = obj_utils.create_test_audit( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_template_id=audit_template4.id) - action_plan4 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit4.id, strategy_id=self.strategy1.id) - action4 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan4.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - audit_template5 = obj_utils.create_test_audit_template( - self.context, goal_id=self.goal1.id, - name=self.generate_unique_name(prefix="Audit Template 5 "), - strategy_id=None, id=self._generate_id(), - uuid=utils.generate_uuid()) - audit5 = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template5.id, - strategy_id=self.strategy1.id, id=self._generate_id(), - uuid=utils.generate_uuid()) - action_plan5 = obj_utils.create_test_action_plan( - self.context, - id=self._generate_id(), uuid=utils.generate_uuid(), - audit_id=audit5.id, strategy_id=self.strategy1.id) - action5 = obj_utils.create_test_action( - self.context, action_plan_id=action_plan5.id, - id=self._generate_id(), - uuid=utils.generate_uuid()) - - self.goal2.soft_delete() - self.strategy2.soft_delete() - self.audit_template2.soft_delete() - self.audit2.soft_delete() - self.action_plan2.soft_delete() - - # All the 4's should be purged as well because they are orphans - # even though they were not deleted - - # All the 5's should be purged as well even though they are not - # expired because their related audit template is itself expired - audit_template5.soft_delete() - audit5.soft_delete() - action_plan5.soft_delete() - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 1) - self.assertEqual(m_destroy_strategy.call_count, 1) - self.assertEqual(m_destroy_audit_template.call_count, 3) - self.assertEqual(m_destroy_audit.call_count, 3) - self.assertEqual(m_destroy_action_plan.call_count, 3) - self.assertEqual(m_destroy_action.call_count, 3) - - m_destroy_audit_template.assert_any_call(self.audit_template1.uuid) - m_destroy_audit.assert_any_call(self.audit1.uuid) - m_destroy_audit.assert_any_call(audit4.uuid) - m_destroy_action_plan.assert_any_call(self.action_plan1.uuid) - m_destroy_action_plan.assert_any_call(action_plan4.uuid) - m_destroy_action_plan.assert_any_call(action_plan5.uuid) - m_destroy_action.assert_any_call(self.action1.uuid) - m_destroy_action.assert_any_call(action4.uuid) - m_destroy_action.assert_any_call(action5.uuid) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_strategy_uuid( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - self.cmd.exclude_orphans = False - self.cmd.uuid = self.strategy1.uuid - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 0) - self.assertEqual(m_destroy_strategy.call_count, 1) - self.assertEqual(m_destroy_audit_template.call_count, 1) - self.assertEqual(m_destroy_audit.call_count, 1) - self.assertEqual(m_destroy_action_plan.call_count, 1) - self.assertEqual(m_destroy_action.call_count, 1) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_audit_template_not_expired( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - self.cmd.exclude_orphans = True - self.cmd.uuid = self.audit_template2.uuid - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 0) - self.assertEqual(m_destroy_strategy.call_count, 0) - self.assertEqual(m_destroy_audit_template.call_count, 0) - self.assertEqual(m_destroy_audit.call_count, 0) - self.assertEqual(m_destroy_action_plan.call_count, 0) - self.assertEqual(m_destroy_action.call_count, 0) - - @mock.patch.object(dbapi.Connection, "destroy_action") - @mock.patch.object(dbapi.Connection, "destroy_action_plan") - @mock.patch.object(dbapi.Connection, "destroy_audit") - @mock.patch.object(dbapi.Connection, "destroy_audit_template") - @mock.patch.object(dbapi.Connection, "destroy_strategy") - @mock.patch.object(dbapi.Connection, "destroy_goal") - def test_purge_command_with_audit_template_not_soft_deleted( - self, m_destroy_goal, m_destroy_strategy, - m_destroy_audit_template, m_destroy_audit, - m_destroy_action_plan, m_destroy_action): - self.cmd.exclude_orphans = False - self.cmd.uuid = self.audit_template3.uuid - - with freezegun.freeze_time(self.fake_today): - self.cmd.execute() - - self.assertEqual(m_destroy_goal.call_count, 0) - self.assertEqual(m_destroy_strategy.call_count, 0) - self.assertEqual(m_destroy_audit_template.call_count, 0) - self.assertEqual(m_destroy_audit.call_count, 0) - self.assertEqual(m_destroy_action_plan.call_count, 0) - self.assertEqual(m_destroy_action.call_count, 0) diff --git a/watcher/tests/db/test_scoring_engine.py b/watcher/tests/db/test_scoring_engine.py deleted file mode 100644 index 02da05e..0000000 --- a/watcher/tests/db/test_scoring_engine.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Tests for manipulating ScoringEngine via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbScoringEngineFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbScoringEngineFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.scoring_engine1 = utils.create_test_scoring_engine( - id=1, uuid='e8370ede-4f39-11e6-9ffa-08002722cb22', - name="se-1", description="Scoring Engine 1", metainfo="a1=b1") - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.scoring_engine2 = utils.create_test_scoring_engine( - id=2, uuid='e8370ede-4f39-11e6-9ffa-08002722cb23', - name="se-2", description="Scoring Engine 2", metainfo="a2=b2") - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.scoring_engine3 = utils.create_test_scoring_engine( - id=3, uuid='e8370ede-4f39-11e6-9ffa-08002722cb24', - name="se-3", description="Scoring Engine 3", metainfo="a3=b3") - - def _soft_delete_scoring_engines(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine3.id) - - def _update_scoring_engines(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_scoring_engine( - self.scoring_engine1.id, - values={"description": "scoring_engine1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_scoring_engine( - self.scoring_engine2.id, - values={"description": "scoring_engine2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_scoring_engine( - self.scoring_engine3.id, - values={"description": "scoring_engine3"}) - - def test_get_scoring_engine_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_deleted_at_eq(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_deleted_at_lt(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_deleted_at_lte(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_deleted_at_gt(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_deleted_at_gte(self): - self._soft_delete_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine1['id'], self.scoring_engine2['id']]), - set([r.id for r in res])) - - # created_at # - - def test_get_scoring_engine_list_filter_created_at_eq(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_created_at_lt(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_created_at_lte(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_created_at_gt(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_created_at_gte(self): - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine1['id'], self.scoring_engine2['id']]), - set([r.id for r in res])) - - # updated_at # - - def test_get_scoring_engine_list_filter_updated_at_eq(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_updated_at_lt(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_updated_at_lte(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine2['id'], self.scoring_engine3['id']]), - set([r.id for r in res])) - - def test_get_scoring_engine_list_filter_updated_at_gt(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) - - def test_get_scoring_engine_list_filter_updated_at_gte(self): - self._update_scoring_engines() - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.scoring_engine1['id'], self.scoring_engine2['id']]), - set([r.id for r in res])) - - -class DbScoringEngineTestCase(base.DbTestCase): - - def _create_test_scoring_engine(self, **kwargs): - scoring_engine = utils.get_test_scoring_engine(**kwargs) - self.dbapi.create_scoring_engine(scoring_engine) - return scoring_engine - - def test_get_scoring_engine_list(self): - names = [] - for i in range(1, 4): - scoring_engine = utils.create_test_scoring_engine( - id=i, - uuid=w_utils.generate_uuid(), - name="SE_ID_%s" % i, - description='My ScoringEngine {0}'.format(i), - metainfo='a{0}=b{0}'.format(i)) - names.append(six.text_type(scoring_engine['name'])) - scoring_engines = self.dbapi.get_scoring_engine_list(self.context) - scoring_engines_names = [se.name for se in scoring_engines] - self.assertEqual(sorted(names), sorted(scoring_engines_names)) - - def test_get_scoring_engine_list_with_filters(self): - scoring_engine1 = self._create_test_scoring_engine( - id=1, - uuid=w_utils.generate_uuid(), - name="SE_ID_1", - description='ScoringEngine 1', - metainfo="a1=b1", - ) - scoring_engine2 = self._create_test_scoring_engine( - id=2, - uuid=w_utils.generate_uuid(), - name="SE_ID_2", - description='ScoringEngine 2', - metainfo="a2=b2", - ) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'description': 'ScoringEngine 1'}) - self.assertEqual([scoring_engine1['name']], [r.name for r in res]) - - res = self.dbapi.get_scoring_engine_list( - self.context, filters={'description': 'ScoringEngine 3'}) - self.assertEqual([], [r.name for r in res]) - - res = self.dbapi.get_scoring_engine_list( - self.context, - filters={'description': 'ScoringEngine 2'}) - self.assertEqual([scoring_engine2['name']], [r.name for r in res]) - - def test_get_scoring_engine_by_id(self): - created_scoring_engine = self._create_test_scoring_engine() - scoring_engine = self.dbapi.get_scoring_engine_by_id( - self.context, created_scoring_engine['id']) - self.assertEqual(scoring_engine.id, created_scoring_engine['id']) - - def test_get_scoring_engine_by_uuid(self): - created_scoring_engine = self._create_test_scoring_engine() - scoring_engine = self.dbapi.get_scoring_engine_by_uuid( - self.context, created_scoring_engine['uuid']) - self.assertEqual(scoring_engine.uuid, created_scoring_engine['uuid']) - - def test_get_scoring_engine_by_name(self): - created_scoring_engine = self._create_test_scoring_engine() - scoring_engine = self.dbapi.get_scoring_engine_by_name( - self.context, created_scoring_engine['name']) - self.assertEqual(scoring_engine.name, created_scoring_engine['name']) - - def test_get_scoring_engine_that_does_not_exist(self): - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.get_scoring_engine_by_id, - self.context, 404) - - def test_update_scoring_engine(self): - scoring_engine = self._create_test_scoring_engine() - res = self.dbapi.update_scoring_engine( - scoring_engine['id'], {'description': 'updated-model'}) - self.assertEqual('updated-model', res.description) - - def test_update_scoring_engine_id(self): - scoring_engine = self._create_test_scoring_engine() - self.assertRaises(exception.Invalid, - self.dbapi.update_scoring_engine, - scoring_engine['id'], - {'uuid': w_utils.generate_uuid()}) - - def test_update_scoring_engine_that_does_not_exist(self): - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.update_scoring_engine, - 404, - {'description': ''}) - - def test_destroy_scoring_engine(self): - scoring_engine = self._create_test_scoring_engine() - self.dbapi.destroy_scoring_engine(scoring_engine['id']) - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.get_scoring_engine_by_id, - self.context, scoring_engine['id']) - - def test_destroy_scoring_engine_that_does_not_exist(self): - self.assertRaises(exception.ScoringEngineNotFound, - self.dbapi.destroy_scoring_engine, 404) - - def test_create_scoring_engine_already_exists(self): - scoring_engine_id = "SE_ID" - self._create_test_scoring_engine(name=scoring_engine_id) - self.assertRaises(exception.ScoringEngineAlreadyExists, - self._create_test_scoring_engine, - name=scoring_engine_id) diff --git a/watcher/tests/db/test_service.py b/watcher/tests/db/test_service.py deleted file mode 100644 index cda5470..0000000 --- a/watcher/tests/db/test_service.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Tests for manipulating Service via the DB API""" - -import freezegun - -from oslo_utils import timeutils - -from watcher.common import exception -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbServiceFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbServiceFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - service1_name = "SERVICE_ID_1" - service2_name = "SERVICE_ID_2" - service3_name = "SERVICE_ID_3" - - with freezegun.freeze_time(self.FAKE_TODAY): - self.service1 = utils.create_test_service( - id=1, name=service1_name, host="controller", - last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.service2 = utils.create_test_service( - id=2, name=service2_name, host="controller", - last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.service3 = utils.create_test_service( - id=3, name=service3_name, host="controller", - last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) - - def _soft_delete_services(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_service(self.service1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_service(self.service2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_service(self.service3.id) - - def _update_services(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_service( - self.service1.id, values={"host": "controller1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_service( - self.service2.id, values={"host": "controller2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_service( - self.service3.id, values={"host": "controller3"}) - - def test_get_service_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_service(self.service1.id) - - res = self.dbapi.get_service_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.service1['name']], [r.name for r in res]) - - def test_get_service_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_service(self.service1.id) - - res = self.dbapi.get_service_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.service2['name'], self.service3['name']]), - set([r.name for r in res])) - - def test_get_service_list_filter_deleted_at_eq(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_deleted_at_lt(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_deleted_at_lte(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_deleted_at_gt(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_deleted_at_gte(self): - self._soft_delete_services() - - res = self.dbapi.get_service_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service1['id'], self.service2['id']]), - set([r.id for r in res])) - - # created_at # - - def test_get_service_list_filter_created_at_eq(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_created_at_lt(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_created_at_lte(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_created_at_gt(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_created_at_gte(self): - res = self.dbapi.get_service_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service1['id'], self.service2['id']]), - set([r.id for r in res])) - - # updated_at # - - def test_get_service_list_filter_updated_at_eq(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_updated_at_lt(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_updated_at_lte(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service2['id'], self.service3['id']]), - set([r.id for r in res])) - - def test_get_service_list_filter_updated_at_gt(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.service1['id']], [r.id for r in res]) - - def test_get_service_list_filter_updated_at_gte(self): - self._update_services() - - res = self.dbapi.get_service_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.service1['id'], self.service2['id']]), - set([r.id for r in res])) - - -class DbServiceTestCase(base.DbTestCase): - - def _create_test_service(self, **kwargs): - service = utils.get_test_service(**kwargs) - self.dbapi.create_service(service) - return service - - def test_get_service_list(self): - ids = [] - for i in range(1, 4): - service = utils.create_test_service( - id=i, - name="SERVICE_ID_%s" % i, - host="controller_{0}".format(i)) - ids.append(service['id']) - services = self.dbapi.get_service_list(self.context) - service_ids = [s.id for s in services] - self.assertEqual(sorted(ids), sorted(service_ids)) - - def test_get_service_list_with_filters(self): - service1 = self._create_test_service( - id=1, - name="SERVICE_ID_1", - host="controller_1", - ) - service2 = self._create_test_service( - id=2, - name="SERVICE_ID_2", - host="controller_2", - ) - - res = self.dbapi.get_service_list( - self.context, filters={'host': 'controller_1'}) - self.assertEqual([service1['id']], [r.id for r in res]) - - res = self.dbapi.get_service_list( - self.context, filters={'host': 'controller_3'}) - self.assertEqual([], [r.id for r in res]) - - res = self.dbapi.get_service_list( - self.context, - filters={'host': 'controller_2'}) - self.assertEqual([service2['id']], [r.id for r in res]) - - def test_get_service_by_name(self): - created_service = self._create_test_service() - service = self.dbapi.get_service_by_name( - self.context, created_service['name']) - self.assertEqual(service.name, created_service['name']) - - def test_get_service_that_does_not_exist(self): - self.assertRaises(exception.ServiceNotFound, - self.dbapi.get_service_by_id, - self.context, 404) - - def test_update_service(self): - service = self._create_test_service() - res = self.dbapi.update_service( - service['id'], {'host': 'controller_test'}) - self.assertEqual('controller_test', res.host) - - def test_update_service_that_does_not_exist(self): - self.assertRaises(exception.ServiceNotFound, - self.dbapi.update_service, - 405, - {'name': ''}) - - def test_create_service_already_exists(self): - service_id = "STRATEGY_ID" - self._create_test_service(name=service_id) - self.assertRaises(exception.ServiceAlreadyExists, - self._create_test_service, - name=service_id) diff --git a/watcher/tests/db/test_strategy.py b/watcher/tests/db/test_strategy.py deleted file mode 100644 index 081fa79..0000000 --- a/watcher/tests/db/test_strategy.py +++ /dev/null @@ -1,364 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Tests for manipulating Strategy via the DB API""" - -import freezegun -import six - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestDbStrategyFilters(base.DbTestCase): - - FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' - FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' - FAKE_TODAY = '2016-02-24T09:52:05.219414' - - def setUp(self): - super(TestDbStrategyFilters, self).setUp() - self.context.show_deleted = True - self._data_setup() - - def _data_setup(self): - strategy1_name = "STRATEGY_ID_1" - strategy2_name = "STRATEGY_ID_2" - strategy3_name = "STRATEGY_ID_3" - - self.goal1 = utils.create_test_goal( - id=1, uuid=w_utils.generate_uuid(), - name="GOAL_ID", display_name="Goal") - self.goal2 = utils.create_test_goal( - id=2, uuid=w_utils.generate_uuid(), - name="DUMMY", display_name="Dummy") - - with freezegun.freeze_time(self.FAKE_TODAY): - self.strategy1 = utils.create_test_strategy( - id=1, uuid=w_utils.generate_uuid(), - name=strategy1_name, display_name="Strategy 1", - goal_id=self.goal1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.strategy2 = utils.create_test_strategy( - id=2, uuid=w_utils.generate_uuid(), - name=strategy2_name, display_name="Strategy 2", - goal_id=self.goal1.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.strategy3 = utils.create_test_strategy( - id=3, uuid=w_utils.generate_uuid(), - name=strategy3_name, display_name="Strategy 3", - goal_id=self.goal2.id) - - def _soft_delete_strategys(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_strategy(self.strategy1.id) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.soft_delete_strategy(self.strategy2.id) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.soft_delete_strategy(self.strategy3.id) - - def _update_strategies(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.update_strategy( - self.strategy1.id, values={"display_name": "strategy1"}) - with freezegun.freeze_time(self.FAKE_OLD_DATE): - self.dbapi.update_strategy( - self.strategy2.id, values={"display_name": "strategy2"}) - with freezegun.freeze_time(self.FAKE_OLDER_DATE): - self.dbapi.update_strategy( - self.strategy3.id, values={"display_name": "strategy3"}) - - def test_get_strategy_list_filter_deleted_true(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_strategy(self.strategy1.id) - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted': True}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_deleted_false(self): - with freezegun.freeze_time(self.FAKE_TODAY): - self.dbapi.soft_delete_strategy(self.strategy1.id) - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted': False}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_deleted_at_eq(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_deleted_at_lt(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_deleted_at_lte(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_deleted_at_gt(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_deleted_at_gte(self): - self._soft_delete_strategys() - - res = self.dbapi.get_strategy_list( - self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy1['uuid'], self.strategy2['uuid']]), - set([r.uuid for r in res])) - - # created_at # - - def test_get_strategy_list_filter_created_at_eq(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_created_at_lt(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_created_at_lte(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_created_at_gt(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_created_at_gte(self): - res = self.dbapi.get_strategy_list( - self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy1['uuid'], self.strategy2['uuid']]), - set([r.uuid for r in res])) - - # updated_at # - - def test_get_strategy_list_filter_updated_at_eq(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__eq': self.FAKE_TODAY}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_updated_at_lt(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__lt': self.FAKE_TODAY}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_updated_at_lte(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy2['uuid'], self.strategy3['uuid']]), - set([r.uuid for r in res])) - - def test_get_strategy_list_filter_updated_at_gt(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) - - self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) - - def test_get_strategy_list_filter_updated_at_gte(self): - self._update_strategies() - - res = self.dbapi.get_strategy_list( - self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) - - self.assertEqual( - set([self.strategy1['uuid'], self.strategy2['uuid']]), - set([r.uuid for r in res])) - - -class DbStrategyTestCase(base.DbTestCase): - - def _create_test_strategy(self, **kwargs): - strategy = utils.get_test_strategy(**kwargs) - self.dbapi.create_strategy(strategy) - return strategy - - def test_get_strategy_list(self): - uuids = [] - for i in range(1, 4): - strategy = utils.create_test_strategy( - id=i, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_%s" % i, - display_name='My Strategy {0}'.format(i)) - uuids.append(six.text_type(strategy['uuid'])) - strategies = self.dbapi.get_strategy_list(self.context) - strategy_uuids = [s.uuid for s in strategies] - self.assertEqual(sorted(uuids), sorted(strategy_uuids)) - for strategy in strategies: - self.assertIsNone(strategy.goal) - - def test_get_strategy_list_eager(self): - _goal = utils.get_test_goal() - goal = self.dbapi.create_goal(_goal) - uuids = [] - for i in range(1, 4): - strategy = utils.create_test_strategy( - id=i, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_%s" % i, - display_name='My Strategy {0}'.format(i), - goal_id=goal.id) - uuids.append(six.text_type(strategy['uuid'])) - strategys = self.dbapi.get_strategy_list(self.context, eager=True) - strategy_map = {a.uuid: a for a in strategys} - self.assertEqual(sorted(uuids), sorted(strategy_map.keys())) - eager_strategy = strategy_map[strategy.uuid] - self.assertEqual(goal.as_dict(), eager_strategy.goal.as_dict()) - - def test_get_strategy_list_with_filters(self): - strategy1 = self._create_test_strategy( - id=1, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_1", - display_name='Strategy 1', - ) - strategy2 = self._create_test_strategy( - id=2, - uuid=w_utils.generate_uuid(), - name="STRATEGY_ID_2", - display_name='Strategy 2', - ) - - res = self.dbapi.get_strategy_list( - self.context, filters={'display_name': 'Strategy 1'}) - self.assertEqual([strategy1['uuid']], [r.uuid for r in res]) - - res = self.dbapi.get_strategy_list( - self.context, filters={'display_name': 'Strategy 3'}) - self.assertEqual([], [r.uuid for r in res]) - - res = self.dbapi.get_strategy_list( - self.context, - filters={'goal_id': 1}) - self.assertEqual([strategy1['uuid'], strategy2['uuid']], - [r.uuid for r in res]) - - res = self.dbapi.get_strategy_list( - self.context, - filters={'display_name': 'Strategy 2'}) - self.assertEqual([strategy2['uuid']], [r.uuid for r in res]) - - def test_get_strategy_by_uuid(self): - created_strategy = self._create_test_strategy() - strategy = self.dbapi.get_strategy_by_uuid( - self.context, created_strategy['uuid']) - self.assertEqual(strategy.uuid, created_strategy['uuid']) - - def test_get_strategy_by_name(self): - created_strategy = self._create_test_strategy() - strategy = self.dbapi.get_strategy_by_name( - self.context, created_strategy['name']) - self.assertEqual(strategy.name, created_strategy['name']) - - def test_get_strategy_that_does_not_exist(self): - self.assertRaises(exception.StrategyNotFound, - self.dbapi.get_strategy_by_id, - self.context, 404) - - def test_update_strategy(self): - strategy = self._create_test_strategy() - res = self.dbapi.update_strategy( - strategy['uuid'], {'display_name': 'updated-model'}) - self.assertEqual('updated-model', res.display_name) - - def test_update_goal_id(self): - strategy = self._create_test_strategy() - self.assertRaises(exception.Invalid, - self.dbapi.update_strategy, strategy['uuid'], - {'uuid': 'new_strategy_id'}) - - def test_update_strategy_that_does_not_exist(self): - self.assertRaises(exception.StrategyNotFound, - self.dbapi.update_strategy, - 404, - {'display_name': ''}) - - def test_destroy_strategy(self): - strategy = self._create_test_strategy() - self.dbapi.destroy_strategy(strategy['uuid']) - self.assertRaises(exception.StrategyNotFound, - self.dbapi.get_strategy_by_id, - self.context, strategy['uuid']) - - def test_destroy_strategy_that_does_not_exist(self): - self.assertRaises(exception.StrategyNotFound, - self.dbapi.destroy_strategy, 404) - - def test_create_strategy_already_exists(self): - strategy_id = "STRATEGY_ID" - self._create_test_strategy(name=strategy_id) - self.assertRaises(exception.StrategyAlreadyExists, - self._create_test_strategy, - name=strategy_id) diff --git a/watcher/tests/db/utils.py b/watcher/tests/db/utils.py deleted file mode 100644 index 65b88c6..0000000 --- a/watcher/tests/db/utils.py +++ /dev/null @@ -1,333 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Watcher test utilities.""" - -from oslo_utils import timeutils - -from watcher.db import api as db_api -from watcher.db.sqlalchemy import models -from watcher import objects - - -def id_generator(): - id_ = 1 - while True: - yield id_ - id_ += 1 - - -def _load_relationships(model, db_data): - rel_data = {} - relationships = db_api.get_instance()._get_relationships(model) - for name, relationship in relationships.items(): - related_model = relationship.argument - if not db_data.get(name): - rel_data[name] = None - else: - rel_data[name] = related_model(**db_data.get(name)) - - return rel_data - - -def get_test_audit_template(**kwargs): - audit_template_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), - 'goal_id': kwargs.get('goal_id', 1), - 'strategy_id': kwargs.get('strategy_id', None), - 'name': kwargs.get('name', 'My Audit Template'), - 'description': kwargs.get('description', 'Desc. Of My Audit Template'), - 'scope': kwargs.get('scope', []), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - audit_template_data.update( - _load_relationships(models.AuditTemplate, kwargs)) - - return audit_template_data - - -def create_test_audit_template(**kwargs): - """Create test audit template entry in DB and return AuditTemplate DB object. - - Function to be used to create test AuditTemplate objects in the database. - :param kwargs: kwargsargs with overriding values for audit template's - attributes. - :returns: Test AuditTemplate DB object. - """ - audit_template = get_test_audit_template(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del audit_template['id'] - dbapi = db_api.get_instance() - return dbapi.create_audit_template(audit_template) - - -def get_test_audit(**kwargs): - audit_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), - 'audit_type': kwargs.get('audit_type', 'ONESHOT'), - 'state': kwargs.get('state', objects.audit.State.PENDING), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - 'parameters': kwargs.get('parameters', {}), - 'interval': kwargs.get('interval', '3600'), - 'goal_id': kwargs.get('goal_id', 1), - 'strategy_id': kwargs.get('strategy_id', None), - 'scope': kwargs.get('scope', []), - 'auto_trigger': kwargs.get('auto_trigger', False), - 'next_run_time': kwargs.get('next_run_time') - } - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - audit_data.update(_load_relationships(models.Audit, kwargs)) - - return audit_data - - -def create_test_audit(**kwargs): - """Create test audit entry in DB and return Audit DB object. - - Function to be used to create test Audit objects in the database. - :param kwargs: kwargsargs with overriding values for audit's attributes. - :returns: Test Audit DB object. - """ - audit = get_test_audit(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del audit['id'] - dbapi = db_api.get_instance() - return dbapi.create_audit(audit) - - -def get_test_action(**kwargs): - action_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), - 'action_plan_id': kwargs.get('action_plan_id', 1), - 'action_type': kwargs.get('action_type', 'nop'), - 'input_parameters': - kwargs.get('input_parameters', - {'key1': 'val1', - 'key2': 'val2', - 'resource_id': - '10a47dd1-4874-4298-91cf-eff046dbdb8d'}), - 'state': kwargs.get('state', objects.action_plan.State.PENDING), - 'parents': kwargs.get('parents', []), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - action_data.update(_load_relationships(models.Action, kwargs)) - - return action_data - - -def create_test_action(**kwargs): - """Create test action entry in DB and return Action DB object. - - Function to be used to create test Action objects in the database. - :param kwargs: kwargsargs with overriding values for action's attributes. - :returns: Test Action DB object. - """ - action = get_test_action(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del action['id'] - dbapi = db_api.get_instance() - return dbapi.create_action(action) - - -def get_test_action_plan(**kwargs): - action_plan_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '76be87bd-3422-43f9-93a0-e85a577e3061'), - 'state': kwargs.get('state', objects.action_plan.State.ONGOING), - 'audit_id': kwargs.get('audit_id', 1), - 'strategy_id': kwargs.get('strategy_id', 1), - 'global_efficacy': kwargs.get('global_efficacy', {}), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - action_plan_data.update(_load_relationships(models.ActionPlan, kwargs)) - - return action_plan_data - - -def create_test_action_plan(**kwargs): - """Create test action plan entry in DB and return Action Plan DB object. - - Function to be used to create test Action objects in the database. - :param kwargs: kwargsargs with overriding values for action's attributes. - :returns: Test Action DB object. - """ - action = get_test_action_plan(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del action['id'] - dbapi = db_api.get_instance() - return dbapi.create_action_plan(action) - - -def get_test_goal(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'f7ad87ae-4298-91cf-93a0-f35a852e3652'), - 'name': kwargs.get('name', 'TEST'), - 'display_name': kwargs.get('display_name', 'test goal'), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - 'efficacy_specification': kwargs.get('efficacy_specification', []), - } - - -def create_test_goal(**kwargs): - """Create test goal entry in DB and return Goal DB object. - - Function to be used to create test Goal objects in the database. - :param kwargs: kwargs which override default goal values of its attributes. - :returns: Test Goal DB object. - """ - goal = get_test_goal(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_goal(goal) - - -def get_test_scoring_engine(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'e8370ede-4f39-11e6-9ffa-08002722cb21'), - 'name': kwargs.get('name', 'test-se-01'), - 'description': kwargs.get('description', 'test scoring engine 01'), - 'metainfo': kwargs.get('metainfo', 'test_attr=test_val'), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - -def create_test_scoring_engine(**kwargs): - """Create test scoring engine in DB and return ScoringEngine DB object. - - Function to be used to create test ScoringEngine objects in the database. - :param kwargs: kwargs with overriding values for SE'sattributes. - :returns: Test ScoringEngine DB object. - """ - scoring_engine = get_test_scoring_engine(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_scoring_engine(scoring_engine) - - -def get_test_strategy(**kwargs): - strategy_data = { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', 'cb3d0b58-4415-4d90-b75b-1e96878730e3'), - 'name': kwargs.get('name', 'TEST'), - 'display_name': kwargs.get('display_name', 'test strategy'), - 'goal_id': kwargs.get('goal_id', 1), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - 'parameters_spec': kwargs.get('parameters_spec', {}), - } - - # ObjectField doesn't allow None nor dict, so if we want to simulate a - # non-eager object loading, the field should not be referenced at all. - strategy_data.update(_load_relationships(models.Strategy, kwargs)) - - return strategy_data - - -def get_test_service(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'name': kwargs.get('name', 'watcher-service'), - 'host': kwargs.get('host', 'controller'), - 'last_seen_up': kwargs.get( - 'last_seen_up', - timeutils.parse_isotime('2016-09-22T08:32:06').replace(tzinfo=None) - ), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - -def create_test_service(**kwargs): - """Create test service entry in DB and return Service DB object. - - Function to be used to create test Service objects in the database. - :param kwargs: kwargs with overriding values for service's attributes. - :returns: Test Service DB object. - """ - service = get_test_service(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_service(service) - - -def create_test_strategy(**kwargs): - """Create test strategy entry in DB and return Strategy DB object. - - Function to be used to create test Strategy objects in the database. - :param kwargs: kwargs with overriding values for strategy's attributes. - :returns: Test Strategy DB object. - """ - strategy = get_test_strategy(**kwargs) - dbapi = db_api.get_instance() - return dbapi.create_strategy(strategy) - - -def get_test_efficacy_indicator(**kwargs): - return { - 'id': kwargs.get('id', 1), - 'uuid': kwargs.get('uuid', '202cfcf9-811c-411a-8a35-d8351f64eb24'), - 'name': kwargs.get('name', 'test_indicator'), - 'description': kwargs.get('description', 'Test indicator'), - 'unit': kwargs.get('unit', '%'), - 'value': kwargs.get('value', 0), - 'action_plan_id': kwargs.get('action_plan_id', 1), - 'created_at': kwargs.get('created_at'), - 'updated_at': kwargs.get('updated_at'), - 'deleted_at': kwargs.get('deleted_at'), - } - - -def create_test_efficacy_indicator(**kwargs): - """Create and return a test efficacy indicator entry in DB. - - Function to be used to create test EfficacyIndicator objects in the DB. - :param kwargs: kwargs for overriding the values of the attributes - :returns: Test EfficacyIndicator DB object. - """ - efficacy_indicator = get_test_efficacy_indicator(**kwargs) - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kwargs: - del efficacy_indicator['id'] - dbapi = db_api.get_instance() - return dbapi.create_efficacy_indicator(efficacy_indicator) diff --git a/watcher/tests/decision_engine/__init__.py b/watcher/tests/decision_engine/__init__.py deleted file mode 100644 index 2327bf1..0000000 --- a/watcher/tests/decision_engine/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'Jean-Emile DARTOIS ' diff --git a/watcher/tests/decision_engine/audit/__init__.py b/watcher/tests/decision_engine/audit/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/audit/test_audit_handlers.py b/watcher/tests/decision_engine/audit/test_audit_handlers.py deleted file mode 100644 index ed5ca96..0000000 --- a/watcher/tests/decision_engine/audit/test_audit_handlers.py +++ /dev/null @@ -1,363 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import mock -from oslo_utils import uuidutils - -from apscheduler import job - -from watcher.applier import rpcapi -from watcher.common import exception -from watcher.common import scheduling -from watcher.db.sqlalchemy import api as sq_api -from watcher.decision_engine.audit import continuous -from watcher.decision_engine.audit import oneshot -from watcher.decision_engine.model.collector import manager -from watcher.decision_engine.strategy.strategies import dummy_strategy -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.decision_engine.model import faker_cluster_state as faker -from watcher.tests.objects import utils as obj_utils - - -class TestOneShotAuditHandler(base.DbTestCase): - - def setUp(self): - super(TestOneShotAuditHandler, self).setUp() - p_audit_notifications = mock.patch.object( - notifications, 'audit', autospec=True) - self.m_audit_notifications = p_audit_notifications.start() - self.addCleanup(p_audit_notifications.stop) - - self.goal = obj_utils.create_test_goal( - self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) - self.strategy = obj_utils.create_test_strategy( - self.context, name=dummy_strategy.DummyStrategy.get_name(), - goal_id=self.goal.id) - audit_template = obj_utils.create_test_audit_template( - self.context, strategy_id=self.strategy.id) - self.audit = obj_utils.create_test_audit( - self.context, - uuid=uuidutils.generate_uuid(), - goal_id=self.goal.id, - strategy_id=self.strategy.id, - audit_template_id=audit_template.id, - goal=self.goal) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_without_errors(self, m_collector): - m_collector.return_value = faker.FakerModelCollector() - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.END), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - @mock.patch.object(dummy_strategy.DummyStrategy, "do_execute") - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_with_error(self, m_collector, m_do_execute): - m_collector.return_value = faker.FakerModelCollector() - m_do_execute.side_effect = Exception - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - priority=objects.fields.NotificationPriority.ERROR, - phase=objects.fields.NotificationPhase.ERROR)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_state_succeeded(self, m_collector): - m_collector.return_value = faker.FakerModelCollector() - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - audit = objects.audit.Audit.get_by_uuid(self.context, self.audit.uuid) - self.assertEqual(objects.audit.State.SUCCEEDED, audit.state) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.END), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit_send_notification(self, m_collector): - m_collector.return_value = faker.FakerModelCollector() - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - - expected_calls = [ - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.STRATEGY, - phase=objects.fields.NotificationPhase.END), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.START), - mock.call(self.context, self.audit, - action=objects.fields.NotificationAction.PLANNER, - phase=objects.fields.NotificationPhase.END)] - - self.assertEqual( - expected_calls, - self.m_audit_notifications.send_action_notification.call_args_list) - - -class TestAutoTriggerActionPlan(base.DbTestCase): - - def setUp(self): - super(TestAutoTriggerActionPlan, self).setUp() - self.goal = obj_utils.create_test_goal( - self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) - self.strategy = obj_utils.create_test_strategy( - self.context, name=dummy_strategy.DummyStrategy.get_name(), - goal_id=self.goal.id) - audit_template = obj_utils.create_test_audit_template( - self.context) - self.audit = obj_utils.create_test_audit( - self.context, - id=0, - uuid=uuidutils.generate_uuid(), - audit_template_id=audit_template.id, - goal_id=self.goal.id, - audit_type=objects.audit.AuditType.CONTINUOUS.value, - goal=self.goal, - auto_trigger=True) - self.ongoing_action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=uuidutils.generate_uuid(), - audit_id=self.audit.id, - strategy_id=self.strategy.id, - audit=self.audit, - strategy=self.strategy, - ) - self.recommended_action_plan = obj_utils.create_test_action_plan( - self.context, - uuid=uuidutils.generate_uuid(), - state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, - strategy_id=self.strategy.id, - audit=self.audit, - strategy=self.strategy, - ) - - @mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute') - @mock.patch.object(objects.action_plan.ActionPlan, 'list') - def test_trigger_audit_with_actionplan_ongoing(self, mock_list, - mock_do_execute): - mock_list.return_value = [self.ongoing_action_plan] - audit_handler = oneshot.OneShotAuditHandler() - audit_handler.execute(self.audit, self.context) - self.assertFalse(mock_do_execute.called) - - @mock.patch.object(rpcapi.ApplierAPI, 'launch_action_plan') - @mock.patch.object(objects.action_plan.ActionPlan, 'list') - @mock.patch.object(objects.audit.Audit, 'get_by_id') - def test_trigger_action_plan_without_ongoing(self, mock_get_by_id, - mock_list, mock_applier): - mock_get_by_id.return_value = self.audit - mock_list.return_value = [] - auto_trigger_handler = oneshot.OneShotAuditHandler() - with mock.patch.object(auto_trigger_handler, - 'do_schedule') as m_schedule: - m_schedule().uuid = self.recommended_action_plan.uuid - auto_trigger_handler.post_execute(self.audit, mock.MagicMock(), - self.context) - mock_applier.assert_called_once_with(self.context, - self.recommended_action_plan.uuid) - - -class TestContinuousAuditHandler(base.DbTestCase): - - def setUp(self): - super(TestContinuousAuditHandler, self).setUp() - self.goal = obj_utils.create_test_goal( - self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) - audit_template = obj_utils.create_test_audit_template( - self.context) - self.audits = [ - obj_utils.create_test_audit( - self.context, - id=id_, - uuid=uuidutils.generate_uuid(), - audit_template_id=audit_template.id, - goal_id=self.goal.id, - audit_type=objects.audit.AuditType.CONTINUOUS.value, - goal=self.goal) - for id_ in range(2, 4)] - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_audits_periodically_with_interval( - self, mock_list, mock_jobs, m_add_job, m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - self.audits[0].next_run_time = (datetime.datetime.now() - - datetime.timedelta(seconds=1800)) - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_add_job.return_value = mock.MagicMock() - - audit_handler.launch_audits_periodically() - m_service.assert_called() - m_engine.assert_called() - m_add_job.assert_called() - mock_jobs.assert_called() - self.assertIsNotNone(self.audits[0].next_run_time) - self.assertIsNone(self.audits[1].next_run_time) - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_audits_periodically_with_cron( - self, mock_list, mock_jobs, m_add_job, m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - self.audits[0].interval = "*/5 * * * *" - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_add_job.return_value = mock.MagicMock() - - audit_handler.launch_audits_periodically() - m_service.assert_called() - m_engine.assert_called() - m_add_job.assert_called() - mock_jobs.assert_called() - self.assertIsNotNone(self.audits[0].next_run_time) - self.assertIsNone(self.audits[1].next_run_time) - - @mock.patch.object(continuous.ContinuousAuditHandler, '_next_cron_time') - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_audits_periodically_with_invalid_cron( - self, mock_list, mock_jobs, m_add_job, m_engine, m_service, - mock_cron): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - self.audits[0].interval = "*/5* * * *" - mock_cron.side_effect = exception.CronFormatIsInvalid - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_add_job.return_value = mock.MagicMock() - - self.assertRaises(exception.CronFormatIsInvalid, - audit_handler.launch_audits_periodically) - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_launch_multiply_audits_periodically(self, mock_list, - mock_jobs, m_add_job, - m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - mock_jobs.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - m_service.return_value = mock.MagicMock() - calls = [mock.call(audit_handler.execute_audit, 'interval', - args=[mock.ANY, mock.ANY], - seconds=3600, - name='execute_audit', - next_run_time=mock.ANY) for _ in self.audits] - audit_handler.launch_audits_periodically() - m_add_job.assert_has_calls(calls) - - @mock.patch.object(objects.service.Service, 'list') - @mock.patch.object(sq_api, 'get_engine') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') - @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') - @mock.patch.object(objects.audit.Audit, 'list') - def test_period_audit_not_called_when_deleted(self, mock_list, - mock_jobs, m_add_job, - m_engine, m_service): - audit_handler = continuous.ContinuousAuditHandler() - mock_list.return_value = self.audits - mock_jobs.return_value = mock.MagicMock() - m_service.return_value = mock.MagicMock() - m_engine.return_value = mock.MagicMock() - self.audits[1].state = objects.audit.State.CANCELLED - self.audits[0].state = objects.audit.State.SUSPENDED - - ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit', - func=audit_handler.execute_audit, - args=(self.audits[0], mock.MagicMock()), - kwargs={}), - job.Job(mock.MagicMock(), name='execute_audit', - func=audit_handler.execute_audit, - args=(self.audits[1], mock.MagicMock()), - kwargs={}) - ] - mock_jobs.return_value = ap_jobs - audit_handler.launch_audits_periodically() - - audit_handler.update_audit_state(self.audits[1], - objects.audit.State.CANCELLED) - audit_handler.update_audit_state(self.audits[0], - objects.audit.State.SUSPENDED) - is_inactive = audit_handler._is_audit_inactive(self.audits[1]) - self.assertTrue(is_inactive) - is_inactive = audit_handler._is_audit_inactive(self.audits[0]) - self.assertTrue(is_inactive) diff --git a/watcher/tests/decision_engine/cluster/__init__.py b/watcher/tests/decision_engine/cluster/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py b/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py deleted file mode 100644 index f0d8433..0000000 --- a/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import model_root -from watcher.tests import base as test_base - - -class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): - - @property - def notification_endpoints(self): - return [] - - def execute(self): - model = model_root.ModelRoot() - # Do something here... - return model - - -class TestClusterDataModelCollector(test_base.TestCase): - - def test_is_singleton(self): - m_config = mock.Mock() - inst1 = DummyClusterDataModelCollector(config=m_config) - inst2 = DummyClusterDataModelCollector(config=m_config) - - self.assertIs(inst1, inst2) - - def test_in_memory_model_is_copied(self): - m_config = mock.Mock() - collector = DummyClusterDataModelCollector(config=m_config) - collector.synchronize() - - self.assertIs( - collector._cluster_data_model, collector.cluster_data_model) - self.assertIsNot( - collector.cluster_data_model, - collector.get_latest_cluster_data_model()) diff --git a/watcher/tests/decision_engine/cluster/test_nova_cdmc.py b/watcher/tests/decision_engine/cluster/test_nova_cdmc.py deleted file mode 100644 index a685766..0000000 --- a/watcher/tests/decision_engine/cluster/test_nova_cdmc.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import nova_helper -from watcher.common import utils -from watcher.decision_engine.model.collector import nova -from watcher.tests import base -from watcher.tests import conf_fixture - - -class TestNovaClusterDataModelCollector(base.TestCase): - - def setUp(self): - super(TestNovaClusterDataModelCollector, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - - @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) - @mock.patch.object(nova_helper, 'NovaHelper') - def test_nova_cdmc_execute(self, m_nova_helper_cls): - m_nova_helper = mock.Mock(name="nova_helper") - m_nova_helper_cls.return_value = m_nova_helper - m_nova_helper.get_service.return_value = mock.Mock( - host="test_hostname") - - fake_compute_node = mock.Mock( - id=1337, - service={'id': 123}, - hypervisor_hostname='test_hostname', - memory_mb=333, - free_disk_gb=222, - local_gb=111, - vcpus=4, - state='TEST_STATE', - status='TEST_STATUS', - ) - fake_instance = mock.Mock( - id='ef500f7e-dac8-470f-960c-169486fce71b', - human_id='fake_instance', - flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, - metadata={'hi': 'hello'}, - ) - setattr(fake_instance, 'OS-EXT-STS:vm_state', 'VM_STATE') - setattr(fake_instance, 'OS-EXT-SRV-ATTR:host', 'test_hostname') - m_nova_helper.get_compute_node_list.return_value = [fake_compute_node] - # m_nova_helper.get_instances_by_node.return_value = [fake_instance] - m_nova_helper.get_instance_list.return_value = [fake_instance] - - m_nova_helper.get_flavor.return_value = utils.Struct(**{ - 'ram': 333, 'disk': 222, 'vcpus': 4}) - - m_config = mock.Mock() - m_osc = mock.Mock() - - nova_cdmc = nova.NovaClusterDataModelCollector( - config=m_config, osc=m_osc) - - model = nova_cdmc.execute() - - compute_nodes = model.get_all_compute_nodes() - instances = model.get_all_instances() - - self.assertEqual(1, len(compute_nodes)) - self.assertEqual(1, len(instances)) - - node = list(compute_nodes.values())[0] - instance = list(instances.values())[0] - - self.assertEqual(node.uuid, 'test_hostname') - self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b') diff --git a/watcher/tests/decision_engine/event_consumer/__init__.py b/watcher/tests/decision_engine/event_consumer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/fake_goals.py b/watcher/tests/decision_engine/fake_goals.py deleted file mode 100644 index 435253d..0000000 --- a/watcher/tests/decision_engine/fake_goals.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import voluptuous - -from watcher.decision_engine.goal import base as base_goal -from watcher.decision_engine.goal.efficacy import base as efficacy_base -from watcher.decision_engine.goal.efficacy import indicators -from watcher.decision_engine.goal.efficacy import specs - - -class FakeGoal(base_goal.Goal): - - NAME = NotImplemented - DISPLAY_NAME = NotImplemented - - @classmethod - def get_name(cls): - return cls.NAME - - @classmethod - def get_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_translatable_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return specs.Unclassified() - - -class DummyIndicator(indicators.IndicatorSpecification): - def __init__(self): - super(DummyIndicator, self).__init__( - name="dummy", - description="Dummy indicator", - unit="%", - ) - - @property - def schema(self): - return voluptuous.Schema( - voluptuous.Range(min=0, max=100), required=True) - - -class DummySpec1(efficacy_base.EfficacySpecification): - - def get_indicators_specifications(self): - return [DummyIndicator()] - - def get_global_efficacy_indicator(self, indicators_map): - return None - - -class FakeDummy1(FakeGoal): - NAME = "dummy_1" - DISPLAY_NAME = "Dummy 1" - - @classmethod - def get_efficacy_specification(cls): - """The efficacy spec for the current goal""" - return DummySpec1() - - -class FakeDummy2(FakeGoal): - NAME = "dummy_2" - DISPLAY_NAME = "Dummy 2" diff --git a/watcher/tests/decision_engine/fake_strategies.py b/watcher/tests/decision_engine/fake_strategies.py deleted file mode 100644 index 002290b..0000000 --- a/watcher/tests/decision_engine/fake_strategies.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.decision_engine.strategy.strategies import base as base_strategy - - -class FakeStrategy(base_strategy.BaseStrategy): - - NAME = NotImplemented - DISPLAY_NAME = NotImplemented - GOAL_NAME = NotImplemented - - @classmethod - def get_name(cls): - return cls.NAME - - @classmethod - def get_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_translatable_display_name(cls): - return cls.DISPLAY_NAME - - @classmethod - def get_goal_name(cls): - return cls.GOAL_NAME - - @classmethod - def get_config_opts(cls): - return [] - - def pre_execute(self): - pass - - def do_execute(self): - pass - - def post_execute(self): - pass - - -class FakeDummy1Strategy1(FakeStrategy): - GOAL_NAME = "dummy_1" - NAME = "strategy_1" - DISPLAY_NAME = "Strategy 1" - - @classmethod - def get_config_opts(cls): - return [ - cfg.StrOpt('test_opt', help="Option used for testing."), - ] - - -class FakeDummy1Strategy2(FakeStrategy): - GOAL_NAME = "dummy_1" - NAME = "strategy_2" - DISPLAY_NAME = "Strategy 2" - - -class FakeDummy2Strategy3(FakeStrategy): - GOAL_NAME = "dummy_2" - NAME = "strategy_3" - DISPLAY_NAME = "Strategy 3" - - -class FakeDummy2Strategy4(FakeStrategy): - GOAL_NAME = "dummy_2" - NAME = "strategy_4" - DISPLAY_NAME = "Strategy 4" diff --git a/watcher/tests/decision_engine/loading/__init__.py b/watcher/tests/decision_engine/loading/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/loading/test_collector_loader.py b/watcher/tests/decision_engine/loading/test_collector_loader.py deleted file mode 100644 index 049c348..0000000 --- a/watcher/tests/decision_engine/loading/test_collector_loader.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import driver as drivermanager -from stevedore import extension as stevedore_extension - -from watcher.common import clients -from watcher.common import exception -from watcher.decision_engine.loading import default as default_loading -from watcher.tests import base -from watcher.tests import conf_fixture -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestClusterDataModelCollectorLoader(base.TestCase): - - def setUp(self): - super(TestClusterDataModelCollectorLoader, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - self.collector_loader = ( - default_loading.ClusterDataModelCollectorLoader()) - - def test_load_collector_with_empty_model(self): - self.assertRaises( - exception.LoadingError, self.collector_loader.load, None) - - def test_collector_loader(self): - fake_driver = "fake" - # Set up the fake Stevedore extensions - fake_driver_call = drivermanager.DriverManager.make_test_instance( - extension=stevedore_extension.Extension( - name=fake_driver, - entry_point="%s:%s" % ( - faker_cluster_state.FakerModelCollector.__module__, - faker_cluster_state.FakerModelCollector.__name__), - plugin=faker_cluster_state.FakerModelCollector, - obj=None, - ), - namespace="watcher_cluster_data_model_collectors", - ) - - with mock.patch.object(drivermanager, - "DriverManager") as m_driver_manager: - m_driver_manager.return_value = fake_driver_call - loaded_collector = self.collector_loader.load("fake") - - self.assertIsInstance( - loaded_collector, faker_cluster_state.FakerModelCollector) - - -class TestLoadClusterDataModelCollectors(base.TestCase): - - collector_loader = default_loading.ClusterDataModelCollectorLoader() - - scenarios = [ - (collector_name, - {"collector_name": collector_name, "collector_cls": collector_cls}) - for collector_name, collector_cls - in collector_loader.list_available().items()] - - def setUp(self): - super(TestLoadClusterDataModelCollectors, self).setUp() - self.useFixture(conf_fixture.ConfReloadFixture()) - - @mock.patch.object(clients, 'OpenStackClients', mock.Mock()) - def test_load_cluster_data_model_collectors(self): - collector = self.collector_loader.load(self.collector_name) - self.assertIsNotNone(collector) diff --git a/watcher/tests/decision_engine/loading/test_default_planner_loader.py b/watcher/tests/decision_engine/loading/test_default_planner_loader.py deleted file mode 100644 index 0354da9..0000000 --- a/watcher/tests/decision_engine/loading/test_default_planner_loader.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.loading import default -from watcher.decision_engine.planner import base as planner -from watcher.tests import base - - -class TestDefaultPlannerLoader(base.TestCase): - def setUp(self): - super(TestDefaultPlannerLoader, self).setUp() - self.loader = default.DefaultPlannerLoader() - - def test_endpoints(self): - for endpoint in self.loader.list_available(): - loaded = self.loader.load(endpoint) - self.assertIsNotNone(loaded) - self.assertIsInstance(loaded, planner.BasePlanner) diff --git a/watcher/tests/decision_engine/loading/test_default_strategy_loader.py b/watcher/tests/decision_engine/loading/test_default_strategy_loader.py deleted file mode 100644 index 5bb3c58..0000000 --- a/watcher/tests/decision_engine/loading/test_default_strategy_loader.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import extension - -from watcher.common import exception -from watcher.decision_engine.loading import default as default_loading -from watcher.decision_engine.strategy.strategies import dummy_strategy -from watcher.tests import base - - -class TestDefaultStrategyLoader(base.TestCase): - - def setUp(self): - super(TestDefaultStrategyLoader, self).setUp() - self.strategy_loader = default_loading.DefaultStrategyLoader() - - def test_load_strategy_with_empty_model(self): - self.assertRaises( - exception.LoadingError, self.strategy_loader.load, None) - - def test_strategy_loader(self): - dummy_strategy_name = "dummy" - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=dummy_strategy_name, - entry_point="%s:%s" % ( - dummy_strategy.DummyStrategy.__module__, - dummy_strategy.DummyStrategy.__name__), - plugin=dummy_strategy.DummyStrategy, - obj=None, - )], - namespace="watcher_strategies", - ) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.return_value = fake_extmanager_call - loaded_strategy = self.strategy_loader.load( - "dummy") - - self.assertEqual("dummy", loaded_strategy.name) - self.assertEqual("Dummy strategy", loaded_strategy.display_name) - - def test_load_dummy_strategy(self): - strategy_loader = default_loading.DefaultStrategyLoader() - loaded_strategy = strategy_loader.load("dummy") - self.assertIsInstance(loaded_strategy, dummy_strategy.DummyStrategy) - - -class TestLoadStrategiesWithDefaultStrategyLoader(base.TestCase): - - strategy_loader = default_loading.DefaultStrategyLoader() - - scenarios = [ - (strategy_name, - {"strategy_name": strategy_name, "strategy_cls": strategy_cls}) - for strategy_name, strategy_cls - in strategy_loader.list_available().items()] - - def test_load_strategies(self): - strategy = self.strategy_loader.load(self.strategy_name) - self.assertIsNotNone(strategy) - self.assertEqual(self.strategy_name, strategy.name) diff --git a/watcher/tests/decision_engine/loading/test_goal_loader.py b/watcher/tests/decision_engine/loading/test_goal_loader.py deleted file mode 100644 index 13ab618..0000000 --- a/watcher/tests/decision_engine/loading/test_goal_loader.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -from stevedore import extension - -from watcher.common import exception -from watcher.decision_engine.goal import goals -from watcher.decision_engine.loading import default as default_loading -from watcher.tests import base - - -class TestDefaultGoalLoader(base.TestCase): - - def setUp(self): - super(TestDefaultGoalLoader, self).setUp() - self.goal_loader = default_loading.DefaultGoalLoader() - - def test_load_goal_with_empty_model(self): - self.assertRaises( - exception.LoadingError, self.goal_loader.load, None) - - def test_goal_loader(self): - dummy_goal_name = "dummy" - # Set up the fake Stevedore extensions - fake_extmanager_call = extension.ExtensionManager.make_test_instance( - extensions=[extension.Extension( - name=dummy_goal_name, - entry_point="%s:%s" % ( - goals.Dummy.__module__, - goals.Dummy.__name__), - plugin=goals.Dummy, - obj=None, - )], - namespace="watcher_goals", - ) - - with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: - m_ext_manager.return_value = fake_extmanager_call - loaded_goal = self.goal_loader.load("dummy") - - self.assertEqual("dummy", loaded_goal.name) - self.assertEqual("Dummy goal", loaded_goal.display_name) - - def test_load_dummy_goal(self): - goal_loader = default_loading.DefaultGoalLoader() - loaded_goal = goal_loader.load("dummy") - self.assertIsInstance(loaded_goal, goals.Dummy) - - -class TestLoadGoalsWithDefaultGoalLoader(base.TestCase): - - goal_loader = default_loading.DefaultGoalLoader() - - # test matrix (1 test execution per goal entry point) - scenarios = [ - (goal_name, - {"goal_name": goal_name, "goal_cls": goal_cls}) - for goal_name, goal_cls - in goal_loader.list_available().items()] - - def test_load_goals(self): - goal = self.goal_loader.load(self.goal_name) - self.assertIsNotNone(goal) - self.assertEqual(self.goal_name, goal.name) diff --git a/watcher/tests/decision_engine/messaging/__init__.py b/watcher/tests/decision_engine/messaging/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/messaging/test_audit_endpoint.py b/watcher/tests/decision_engine/messaging/test_audit_endpoint.py deleted file mode 100644 index 2a72cba..0000000 --- a/watcher/tests/decision_engine/messaging/test_audit_endpoint.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine.audit import continuous as continuous_handler -from watcher.decision_engine.audit import oneshot as oneshot_handler -from watcher.decision_engine.messaging import audit_endpoint -from watcher.decision_engine.model.collector import manager -from watcher.tests.db import base -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.objects import utils as obj_utils - - -class TestAuditEndpoint(base.DbTestCase): - def setUp(self): - super(TestAuditEndpoint, self).setUp() - self.goal = obj_utils.create_test_goal(self.context) - self.audit_template = obj_utils.create_test_audit_template( - self.context) - self.audit = obj_utils.create_test_audit( - self.context, - audit_template_id=self.audit_template.id) - - @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_do_trigger_audit(self, mock_collector, mock_handler): - mock_collector.return_value = faker_cluster_state.FakerModelCollector() - - audit_handler = oneshot_handler.OneShotAuditHandler - endpoint = audit_endpoint.AuditEndpoint(audit_handler) - - with mock.patch.object(oneshot_handler.OneShotAuditHandler, - 'execute') as mock_call: - mock_call.return_value = 0 - endpoint.do_trigger_audit(self.context, self.audit.uuid) - - self.assertEqual(mock_call.call_count, 1) - - @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") - def test_trigger_audit(self, mock_collector, mock_handler): - mock_collector.return_value = faker_cluster_state.FakerModelCollector() - - audit_handler = oneshot_handler.OneShotAuditHandler - endpoint = audit_endpoint.AuditEndpoint(audit_handler) - - with mock.patch.object(endpoint.executor, 'submit') as mock_call: - mock_execute = mock.call(endpoint.do_trigger_audit, - self.context, - self.audit.uuid) - endpoint.trigger_audit(self.context, self.audit.uuid) - - mock_call.assert_has_calls([mock_execute]) - self.assertEqual(mock_call.call_count, 1) diff --git a/watcher/tests/decision_engine/model/__init__.py b/watcher/tests/decision_engine/model/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/model/ceilometer_metrics.py b/watcher/tests/decision_engine/model/ceilometer_metrics.py deleted file mode 100644 index 9c5d336..0000000 --- a/watcher/tests/decision_engine/model/ceilometer_metrics.py +++ /dev/null @@ -1,295 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_utils - - -class FakeCeilometerMetrics(object): - def __init__(self): - self.emptytype = "" - - def empty_one_metric(self, emptytype): - self.emptytype = emptytype - - def mock_get_statistics(self, resource_id, meter_name, period, - aggregate='avg'): - result = 0 - if meter_name == "hardware.cpu.util": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "compute.node.cpu.percent": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "hardware.memory.used": - result = self.get_usage_node_ram(resource_id) - elif meter_name == "cpu_util": - result = self.get_average_usage_instance_cpu(resource_id) - elif meter_name == "memory.resident": - result = self.get_average_usage_instance_memory(resource_id) - elif meter_name == "hardware.ipmi.node.outlet_temperature": - result = self.get_average_outlet_temperature(resource_id) - elif meter_name == "hardware.ipmi.node.airflow": - result = self.get_average_airflow(resource_id) - elif meter_name == "hardware.ipmi.node.temperature": - result = self.get_average_inlet_t(resource_id) - elif meter_name == "hardware.ipmi.node.power": - result = self.get_average_power(resource_id) - return result - - def mock_get_statistics_wb(self, resource_id, meter_name, period, - aggregate='avg'): - result = 0.0 - if meter_name == "cpu_util": - result = self.get_average_usage_instance_cpu_wb(resource_id) - return result - - def mock_get_statistics_nn(self, resource_id, meter_name, period, - aggregate='avg'): - result = 0.0 - if meter_name == "cpu_l3_cache" and period == 100: - result = self.get_average_l3_cache_current(resource_id) - if meter_name == "cpu_l3_cache" and period == 200: - result = self.get_average_l3_cache_previous(resource_id) - return result - - @staticmethod - def get_average_l3_cache_current(uuid): - """The average l3 cache used by instance""" - mock = {} - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 35 * oslo_utils.units.Ki - mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30 * oslo_utils.units.Ki - mock['INSTANCE_3'] = 40 * oslo_utils.units.Ki - mock['INSTANCE_4'] = 35 * oslo_utils.units.Ki - if uuid not in mock.keys(): - mock[uuid] = 25 * oslo_utils.units.Ki - return mock[str(uuid)] - - @staticmethod - def get_average_l3_cache_previous(uuid): - """The average l3 cache used by instance""" - mock = {} - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 34.5 * ( - oslo_utils.units.Ki) - mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30.5 * ( - oslo_utils.units.Ki) - mock['INSTANCE_3'] = 60 * oslo_utils.units.Ki - mock['INSTANCE_4'] = 22.5 * oslo_utils.units.Ki - if uuid not in mock.keys(): - mock[uuid] = 25 * oslo_utils.units.Ki - return mock[str(uuid)] - - @staticmethod - def get_average_outlet_temperature(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 30 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 100 - return float(mock[str(uuid)]) - - @staticmethod - def get_usage_node_ram(uuid): - mock = {} - # Ceilometer returns hardware.memory.used samples in KB. - mock['Node_0'] = 7 * oslo_utils.units.Ki - mock['Node_1'] = 5 * oslo_utils.units.Ki - mock['Node_2'] = 29 * oslo_utils.units.Ki - mock['Node_3'] = 8 * oslo_utils.units.Ki - mock['Node_4'] = 4 * oslo_utils.units.Ki - - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_airflow(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 400 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_average_inlet_t(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 24 - mock['Node_1'] = 26 - if uuid not in mock.keys(): - mock[uuid] = 28 - return mock[str(uuid)] - - @staticmethod - def get_average_power(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 260 - mock['Node_1'] = 240 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_usage_node_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['Node_0_hostname_0'] = 7 - mock['Node_1_hostname_1'] = 7 - # node 1 - mock['Node_2_hostname_2'] = 80 - # node 2 - mock['Node_3_hostname_3'] = 5 - mock['Node_4_hostname_4'] = 5 - mock['Node_5_hostname_5'] = 10 - - # node 3 - mock['Node_6_hostname_6'] = 8 - mock['Node_19_hostname_19'] = 10 - # node 4 - mock['INSTANCE_7_hostname_7'] = 4 - - mock['Node_0'] = 7 - mock['Node_1'] = 5 - mock['Node_2'] = 10 - mock['Node_3'] = 4 - mock['Node_4'] = 2 - - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu_wb(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_1'] = 80 - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 - # node 1 - mock['INSTANCE_3'] = 20 - mock['INSTANCE_4'] = 10 - return float(mock[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_0'] = 7 - mock['INSTANCE_1'] = 7 - # node 1 - mock['INSTANCE_2'] = 10 - # node 2 - mock['INSTANCE_3'] = 5 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 10 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 8 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_memory(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 5 - # node 1 - mock['INSTANCE_2'] = 5 - # node 2 - mock['INSTANCE_3'] = 8 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 16 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 10 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_disk(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 2 - # node 1 - mock['INSTANCE_2'] = 2 - # node 2 - mock['INSTANCE_3'] = 10 - mock['INSTANCE_4'] = 15 - mock['INSTANCE_5'] = 20 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - - if uuid not in mock.keys(): - # mock[uuid] = random.randint(1, 4) - mock[uuid] = 4 - - return mock[str(uuid)] diff --git a/watcher/tests/decision_engine/model/data/scenario_1.xml b/watcher/tests/decision_engine/model/data/scenario_1.xml deleted file mode 100644 index 7476af2..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_1.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml deleted file mode 100644 index d8b80af..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml deleted file mode 100644 index 668ef19..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml deleted file mode 100644 index 189d81e..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml deleted file mode 100644 index 5c60799..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml b/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml deleted file mode 100644 index 963beca..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml b/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml deleted file mode 100644 index a8bffab..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml deleted file mode 100644 index c12eaba..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml deleted file mode 100644 index cf86c00..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml deleted file mode 100644 index a646c6e..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml deleted file mode 100644 index d1d3f94..0000000 --- a/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/data/storage_scenario_1.xml b/watcher/tests/decision_engine/model/data/storage_scenario_1.xml deleted file mode 100644 index af2e416..0000000 --- a/watcher/tests/decision_engine/model/data/storage_scenario_1.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - diff --git a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py b/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py deleted file mode 100644 index 3c40d66..0000000 --- a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py +++ /dev/null @@ -1,242 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vojtech CIMA -# Bruno GRAZIOLI -# Sean MURPHY -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock - -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import model_root as modelroot - - -class FakerModelCollector(base.BaseClusterDataModelCollector): - - def __init__(self, config=None, osc=None): - if config is None: - config = mock.Mock() - super(FakerModelCollector, self).__init__(config) - - @property - def notification_endpoints(self): - return [] - - def execute(self): - return self.generate_scenario_1() - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return modelroot.ModelRoot.from_xml(self.load_data(filename)) - - def generate_scenario_1(self): - """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping""" - return self.load_model('scenario_1_with_metrics.xml') - - def generate_scenario_2(self): - """Simulates a cluster - - With 4 nodes and 6 instances all mapped to a single node - """ - return self.load_model('scenario_2_with_metrics.xml') - - def generate_scenario_3(self): - """Simulates a cluster - - With 4 nodes and 6 instances all mapped to one node - """ - return self.load_model('scenario_3_with_metrics.xml') - - def generate_scenario_4(self): - """Simulates a cluster - - With 4 nodes and 6 instances spread on all nodes - """ - return self.load_model('scenario_4_with_metrics.xml') - - -class FakeCeilometerMetrics(object): - def __init__(self, model): - self.model = model - - def mock_get_statistics(self, resource_id, meter_name, period=3600, - aggregate='avg'): - if meter_name == "compute.node.cpu.percent": - return self.get_node_cpu_util(resource_id) - elif meter_name == "cpu_util": - return self.get_instance_cpu_util(resource_id) - elif meter_name == "memory.usage": - return self.get_instance_ram_util(resource_id) - elif meter_name == "disk.root.size": - return self.get_instance_disk_root_size(resource_id) - - def get_node_cpu_util(self, r_id): - """Calculates node utilization dynamicaly. - - node CPU utilization should consider - and corelate with actual instance-node mappings - provided within a cluster model. - Returns relative node CPU utilization <0, 100>. - :param r_id: resource id - """ - node_uuid = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1]) - node = self.model.get_node_by_uuid(node_uuid) - instances = self.model.get_node_instances(node) - util_sum = 0.0 - for instance_uuid in instances: - instance = self.model.get_instance_by_uuid(instance_uuid) - total_cpu_util = instance.vcpus * self.get_instance_cpu_util( - instance.uuid) - util_sum += total_cpu_util / 100.0 - util_sum /= node.vcpus - return util_sum * 100.0 - - @staticmethod - def get_instance_cpu_util(r_id): - instance_cpu_util = dict() - instance_cpu_util['INSTANCE_0'] = 10 - instance_cpu_util['INSTANCE_1'] = 30 - instance_cpu_util['INSTANCE_2'] = 60 - instance_cpu_util['INSTANCE_3'] = 20 - instance_cpu_util['INSTANCE_4'] = 40 - instance_cpu_util['INSTANCE_5'] = 50 - instance_cpu_util['INSTANCE_6'] = 100 - instance_cpu_util['INSTANCE_7'] = 100 - instance_cpu_util['INSTANCE_8'] = 100 - instance_cpu_util['INSTANCE_9'] = 100 - return instance_cpu_util[str(r_id)] - - @staticmethod - def get_instance_ram_util(r_id): - instance_ram_util = dict() - instance_ram_util['INSTANCE_0'] = 1 - instance_ram_util['INSTANCE_1'] = 2 - instance_ram_util['INSTANCE_2'] = 4 - instance_ram_util['INSTANCE_3'] = 8 - instance_ram_util['INSTANCE_4'] = 3 - instance_ram_util['INSTANCE_5'] = 2 - instance_ram_util['INSTANCE_6'] = 1 - instance_ram_util['INSTANCE_7'] = 2 - instance_ram_util['INSTANCE_8'] = 4 - instance_ram_util['INSTANCE_9'] = 8 - return instance_ram_util[str(r_id)] - - @staticmethod - def get_instance_disk_root_size(r_id): - instance_disk_util = dict() - instance_disk_util['INSTANCE_0'] = 10 - instance_disk_util['INSTANCE_1'] = 15 - instance_disk_util['INSTANCE_2'] = 30 - instance_disk_util['INSTANCE_3'] = 35 - instance_disk_util['INSTANCE_4'] = 20 - instance_disk_util['INSTANCE_5'] = 25 - instance_disk_util['INSTANCE_6'] = 25 - instance_disk_util['INSTANCE_7'] = 25 - instance_disk_util['INSTANCE_8'] = 25 - instance_disk_util['INSTANCE_9'] = 25 - return instance_disk_util[str(r_id)] - - -class FakeGnocchiMetrics(object): - def __init__(self, model): - self.model = model - - def mock_get_statistics(self, resource_id, metric, granularity, - start_time, stop_time, aggregation='mean'): - if metric == "compute.node.cpu.percent": - return self.get_node_cpu_util(resource_id) - elif metric == "cpu_util": - return self.get_instance_cpu_util(resource_id) - elif metric == "memory.usage": - return self.get_instance_ram_util(resource_id) - elif metric == "disk.root.size": - return self.get_instance_disk_root_size(resource_id) - - def get_node_cpu_util(self, r_id): - """Calculates node utilization dynamicaly. - - node CPU utilization should consider - and corelate with actual instance-node mappings - provided within a cluster model. - Returns relative node CPU utilization <0, 100>. - - :param r_id: resource id - """ - node_uuid = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1]) - node = self.model.get_node_by_uuid(node_uuid) - instances = self.model.get_node_instances(node) - util_sum = 0.0 - for instance_uuid in instances: - instance = self.model.get_instance_by_uuid(instance_uuid) - total_cpu_util = instance.vcpus * self.get_instance_cpu_util( - instance.uuid) - util_sum += total_cpu_util / 100.0 - util_sum /= node.vcpus - return util_sum * 100.0 - - @staticmethod - def get_instance_cpu_util(r_id): - instance_cpu_util = dict() - instance_cpu_util['INSTANCE_0'] = 10 - instance_cpu_util['INSTANCE_1'] = 30 - instance_cpu_util['INSTANCE_2'] = 60 - instance_cpu_util['INSTANCE_3'] = 20 - instance_cpu_util['INSTANCE_4'] = 40 - instance_cpu_util['INSTANCE_5'] = 50 - instance_cpu_util['INSTANCE_6'] = 100 - instance_cpu_util['INSTANCE_7'] = 100 - instance_cpu_util['INSTANCE_8'] = 100 - instance_cpu_util['INSTANCE_9'] = 100 - return instance_cpu_util[str(r_id)] - - @staticmethod - def get_instance_ram_util(r_id): - instance_ram_util = dict() - instance_ram_util['INSTANCE_0'] = 1 - instance_ram_util['INSTANCE_1'] = 2 - instance_ram_util['INSTANCE_2'] = 4 - instance_ram_util['INSTANCE_3'] = 8 - instance_ram_util['INSTANCE_4'] = 3 - instance_ram_util['INSTANCE_5'] = 2 - instance_ram_util['INSTANCE_6'] = 1 - instance_ram_util['INSTANCE_7'] = 2 - instance_ram_util['INSTANCE_8'] = 4 - instance_ram_util['INSTANCE_9'] = 8 - return instance_ram_util[str(r_id)] - - @staticmethod - def get_instance_disk_root_size(r_id): - instance_disk_util = dict() - instance_disk_util['INSTANCE_0'] = 10 - instance_disk_util['INSTANCE_1'] = 15 - instance_disk_util['INSTANCE_2'] = 30 - instance_disk_util['INSTANCE_3'] = 35 - instance_disk_util['INSTANCE_4'] = 20 - instance_disk_util['INSTANCE_5'] = 25 - instance_disk_util['INSTANCE_6'] = 25 - instance_disk_util['INSTANCE_7'] = 25 - instance_disk_util['INSTANCE_8'] = 25 - instance_disk_util['INSTANCE_9'] = 25 - return instance_disk_util[str(r_id)] diff --git a/watcher/tests/decision_engine/model/faker_cluster_state.py b/watcher/tests/decision_engine/model/faker_cluster_state.py deleted file mode 100644 index 1893544..0000000 --- a/watcher/tests/decision_engine/model/faker_cluster_state.py +++ /dev/null @@ -1,257 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock - -from watcher.decision_engine.model.collector import base -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root as modelroot - - -class FakerModelCollector(base.BaseClusterDataModelCollector): - - def __init__(self, config=None, osc=None): - if config is None: - config = mock.Mock(period=777) - super(FakerModelCollector, self).__init__(config) - - @property - def notification_endpoints(self): - return [] - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return modelroot.ModelRoot.from_xml(self.load_data(filename)) - - def execute(self): - return self._cluster_data_model or self.build_scenario_1() - - def build_scenario_1(self): - instances = [] - - model = modelroot.ModelRoot() - # number of nodes - node_count = 5 - # number max of instance per node - node_instance_count = 7 - # total number of virtual machine - instance_count = (node_count * node_instance_count) - - for id_ in range(0, node_count): - node_uuid = "Node_{0}".format(id_) - hostname = "hostname_{0}".format(id_) - node_attributes = { - "id": id_, - "uuid": node_uuid, - "hostname": hostname, - "memory": 132, - "disk": 250, - "disk_capacity": 250, - "vcpus": 40, - } - node = element.ComputeNode(**node_attributes) - model.add_node(node) - - for i in range(0, instance_count): - instance_uuid = "INSTANCE_{0}".format(i) - instance_attributes = { - "uuid": instance_uuid, - "memory": 2, - "disk": 20, - "disk_capacity": 20, - "vcpus": 10, - "metadata": - '{"optimize": true,"top": "floor","nested": {"x": "y"}}' - } - - instance = element.Instance(**instance_attributes) - instances.append(instance) - model.add_instance(instance) - - mappings = [ - ("INSTANCE_0", "Node_0"), - ("INSTANCE_1", "Node_0"), - ("INSTANCE_2", "Node_1"), - ("INSTANCE_3", "Node_2"), - ("INSTANCE_4", "Node_2"), - ("INSTANCE_5", "Node_2"), - ("INSTANCE_6", "Node_3"), - ("INSTANCE_7", "Node_4"), - ] - for instance_uuid, node_uuid in mappings: - model.map_instance( - model.get_instance_by_uuid(instance_uuid), - model.get_node_by_uuid(node_uuid), - ) - - return model - - def generate_scenario_1(self): - return self.load_model('scenario_1.xml') - - def generate_scenario_3_with_2_nodes(self): - return self.load_model('scenario_3_with_2_nodes.xml') - - def generate_scenario_4_with_1_node_no_instance(self): - return self.load_model('scenario_4_with_1_node_no_instance.xml') - - def generate_scenario_5_with_instance_disk_0(self): - return self.load_model('scenario_5_with_instance_disk_0.xml') - - def generate_scenario_6_with_2_nodes(self): - return self.load_model('scenario_6_with_2_nodes.xml') - - def generate_scenario_7_with_2_nodes(self): - return self.load_model('scenario_7_with_2_nodes.xml') - - def generate_scenario_8_with_4_nodes(self): - return self.load_model('scenario_8_with_4_nodes.xml') - - def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self): - return self.load_model( - 'scenario_9_with_3_active_plus_1_disabled_nodes.xml') - - -class FakerStorageModelCollector(base.BaseClusterDataModelCollector): - - def __init__(self, config=None, osc=None): - if config is None: - config = mock.Mock(period=777) - super(FakerStorageModelCollector, self).__init__(config) - - @property - def notification_endpoints(self): - return [] - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return modelroot.StorageModelRoot.from_xml(self.load_data(filename)) - - def execute(self): - return self._cluster_data_model or self.build_scenario_1() - - def build_scenario_1(self): - - model = modelroot.StorageModelRoot() - # number of nodes - node_count = 2 - # number of pools per node - pool_count = 2 - # number of volumes - volume_count = 9 - - for i in range(0, node_count): - host = "host_{0}@backend_{0}".format(i) - zone = "zone_{0}".format(i) - volume_type = "type_{0}".format(i) - node_attributes = { - "host": host, - "zone": zone, - "status": 'enabled', - "state": 'up', - "volume_type": volume_type, - } - node = element.StorageNode(**node_attributes) - model.add_node(node) - - for j in range(0, pool_count): - name = "host_{0}@backend_{0}#pool_{1}".format(i, j) - pool_attributes = { - "name": name, - "total_volumes": 2, - "total_capacity_gb": 500, - "free_capacity_gb": 420, - "provisioned_capacity_gb": 80, - "allocated_capacity_gb": 80, - "virtual_free": 420, - } - pool = element.Pool(**pool_attributes) - model.add_pool(pool) - - mappings = [ - ("host_0@backend_0#pool_0", "host_0@backend_0"), - ("host_0@backend_0#pool_1", "host_0@backend_0"), - ("host_1@backend_1#pool_0", "host_1@backend_1"), - ("host_1@backend_1#pool_1", "host_1@backend_1"), - ] - - for pool_name, node_name in mappings: - model.map_pool( - model.get_pool_by_pool_name(pool_name), - model.get_node_by_name(node_name), - ) - - for k in range(volume_count): - uuid = "VOLUME_{0}".format(k) - name = "name_{0}".format(k) - project_id = "project_{0}".format(k) - volume_attributes = { - "size": 40, - "status": "in-use", - "uuid": uuid, - "attachments": - '[{"server_id": "server","attachment_id": "attachment"}]', - "name": name, - "multiattach": 'True', - "snapshot_id": uuid, - "project_id": project_id, - "metadata": '{"readonly": false,"attached_mode": "rw"}', - "bootable": 'False' - } - volume = element.Volume(**volume_attributes) - model.add_volume(volume) - - mappings = [ - ("VOLUME_0", "host_0@backend_0#pool_0"), - ("VOLUME_1", "host_0@backend_0#pool_0"), - ("VOLUME_2", "host_0@backend_0#pool_1"), - ("VOLUME_3", "host_0@backend_0#pool_1"), - ("VOLUME_4", "host_1@backend_1#pool_0"), - ("VOLUME_5", "host_1@backend_1#pool_0"), - ("VOLUME_6", "host_1@backend_1#pool_1"), - ("VOLUME_7", "host_1@backend_1#pool_1"), - ] - - for volume_uuid, pool_name in mappings: - model.map_volume( - model.get_volume_by_uuid(volume_uuid), - model.get_pool_by_pool_name(pool_name), - ) - - return model - - def generate_scenario_1(self): - return self.load_model('storage_scenario_1.xml') diff --git a/watcher/tests/decision_engine/model/gnocchi_metrics.py b/watcher/tests/decision_engine/model/gnocchi_metrics.py deleted file mode 100644 index 982bcac..0000000 --- a/watcher/tests/decision_engine/model/gnocchi_metrics.py +++ /dev/null @@ -1,244 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_utils - - -class FakeGnocchiMetrics(object): - def __init__(self): - self.emptytype = "" - - def empty_one_metric(self, emptytype): - self.emptytype = emptytype - - def mock_get_statistics(self, resource_id, metric, granularity, - start_time, stop_time, aggregation='mean'): - result = 0 - meter_name = metric - if meter_name == "hardware.cpu.util": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "compute.node.cpu.percent": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "hardware.memory.used": - result = self.get_usage_node_ram(resource_id) - elif meter_name == "cpu_util": - result = self.get_average_usage_instance_cpu(resource_id) - elif meter_name == "memory.resident": - result = self.get_average_usage_instance_memory(resource_id) - elif meter_name == "hardware.ipmi.node.outlet_temperature": - result = self.get_average_outlet_temperature(resource_id) - elif meter_name == "hardware.ipmi.node.airflow": - result = self.get_average_airflow(resource_id) - elif meter_name == "hardware.ipmi.node.temperature": - result = self.get_average_inlet_t(resource_id) - elif meter_name == "hardware.ipmi.node.power": - result = self.get_average_power(resource_id) - return result - - def mock_get_statistics_wb(self, resource_id, metric, granularity, - start_time, stop_time, aggregation='mean'): - result = 0.0 - if metric == "cpu_util": - result = self.get_average_usage_instance_cpu_wb(resource_id) - return result - - @staticmethod - def get_average_outlet_temperature(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 30 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 100 - return mock[str(uuid)] - - @staticmethod - def get_usage_node_ram(uuid): - mock = {} - # Gnocchi returns hardware.memory.used samples in KB. - mock['Node_0'] = 7 * oslo_utils.units.Ki - mock['Node_1'] = 5 * oslo_utils.units.Ki - mock['Node_2'] = 29 * oslo_utils.units.Ki - mock['Node_3'] = 8 * oslo_utils.units.Ki - mock['Node_4'] = 4 * oslo_utils.units.Ki - - if uuid not in mock.keys(): - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_airflow(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 400 - # use a big value to make sure it exceeds threshold - mock['Node_1'] = 100 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_average_inlet_t(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 24 - mock['Node_1'] = 26 - if uuid not in mock.keys(): - mock[uuid] = 28 - return mock[str(uuid)] - - @staticmethod - def get_average_power(uuid): - """The average outlet temperature for host""" - mock = {} - mock['Node_0'] = 260 - mock['Node_1'] = 240 - if uuid not in mock.keys(): - mock[uuid] = 200 - return mock[str(uuid)] - - @staticmethod - def get_usage_node_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid: instance UUID - :return: float value - """ - # Normalize - mock = {} - # node 0 - mock['Node_0_hostname_0'] = 7 - mock['Node_1_hostname_1'] = 7 - # node 1 - mock['Node_2_hostname_2'] = 80 - # node 2 - mock['Node_3_hostname_3'] = 5 - mock['Node_4_hostname_4'] = 5 - mock['Node_5_hostname_5'] = 10 - - # node 3 - mock['Node_6_hostname_6'] = 8 - mock['Node_19_hostname_19'] = 10 - # node 4 - mock['INSTANCE_7_hostname_7'] = 4 - - mock['Node_0'] = 7 - mock['Node_1'] = 5 - mock['Node_2'] = 10 - mock['Node_3'] = 4 - mock['Node_4'] = 2 - - if uuid not in mock.keys(): - mock[uuid] = 8 - - return float(mock[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid: instance UUID - :return: int value - """ - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_0'] = 7 - mock['INSTANCE_1'] = 7 - # node 1 - mock['INSTANCE_2'] = 10 - # node 2 - mock['INSTANCE_3'] = 5 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 10 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - mock[uuid] = 8 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_memory(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 5 - # node 1 - mock['INSTANCE_2'] = 5 - # node 2 - mock['INSTANCE_3'] = 8 - mock['INSTANCE_4'] = 5 - mock['INSTANCE_5'] = 16 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - if uuid not in mock.keys(): - mock[uuid] = 10 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_disk(uuid): - mock = {} - # node 0 - mock['INSTANCE_0'] = 2 - mock['INSTANCE_1'] = 2 - # node 1 - mock['INSTANCE_2'] = 2 - # node 2 - mock['INSTANCE_3'] = 10 - mock['INSTANCE_4'] = 15 - mock['INSTANCE_5'] = 20 - - # node 3 - mock['INSTANCE_6'] = 8 - - # node 4 - mock['INSTANCE_7'] = 4 - - if uuid not in mock.keys(): - mock[uuid] = 4 - - return mock[str(uuid)] - - @staticmethod - def get_average_usage_instance_cpu_wb(uuid): - """The last VM CPU usage values to average - - :param uuid: instance UUID - :return: float value - """ - # query influxdb stream - - # compute in stream - - # Normalize - mock = {} - # node 0 - mock['INSTANCE_1'] = 80 - mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 - # node 1 - mock['INSTANCE_3'] = 20 - mock['INSTANCE_4'] = 10 - return float(mock[str(uuid)]) diff --git a/watcher/tests/decision_engine/model/monasca_metrics.py b/watcher/tests/decision_engine/model/monasca_metrics.py deleted file mode 100644 index 12ebb27..0000000 --- a/watcher/tests/decision_engine/model/monasca_metrics.py +++ /dev/null @@ -1,266 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_utils - - -class FakeMonascaMetrics(object): - def __init__(self): - self.emptytype = "" - - def empty_one_metric(self, emptytype): - self.emptytype = emptytype - - def mock_get_statistics(self, meter_name, dimensions, period, - aggregate='avg'): - resource_id = dimensions.get( - "resource_id") or dimensions.get("hostname") - result = 0.0 - if meter_name == "cpu.percent": - result = self.get_usage_node_cpu(resource_id) - elif meter_name == "vm.cpu.utilization_perc": - result = self.get_average_usage_instance_cpu(resource_id) - # elif meter_name == "hardware.memory.used": - # result = self.get_usage_node_ram(resource_id) - # elif meter_name == "memory.resident": - # result = self.get_average_usage_instance_memory(resource_id) - # elif meter_name == "hardware.ipmi.node.outlet_temperature": - # result = self.get_average_outlet_temperature(resource_id) - # elif meter_name == "hardware.ipmi.node.airflow": - # result = self.get_average_airflow(resource_id) - # elif meter_name == "hardware.ipmi.node.temperature": - # result = self.get_average_inlet_t(resource_id) - # elif meter_name == "hardware.ipmi.node.power": - # result = self.get_average_power(resource_id) - return result - - def mock_get_statistics_wb(self, meter_name, dimensions, period, - aggregate='avg'): - resource_id = dimensions.get( - "resource_id") or dimensions.get("hostname") - result = 0.0 - if meter_name == "vm.cpu.utilization_perc": - result = self.get_average_usage_instance_cpu_wb(resource_id) - return result - - @staticmethod - def get_average_outlet_temperature(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 30 - # use a big value to make sure it exceeds threshold - measurements['Node_1'] = 100 - if uuid not in measurements.keys(): - measurements[uuid] = 100 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_usage_node_ram(uuid): - measurements = {} - # Monasca returns hardware.memory.used samples in KB. - measurements['Node_0'] = 7 * oslo_utils.units.Ki - measurements['Node_1'] = 5 * oslo_utils.units.Ki - measurements['Node_2'] = 29 * oslo_utils.units.Ki - measurements['Node_3'] = 8 * oslo_utils.units.Ki - measurements['Node_4'] = 4 * oslo_utils.units.Ki - - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 8 - - return float(measurements[str(uuid)]) - - @staticmethod - def get_average_airflow(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 400 - # use a big value to make sure it exceeds threshold - measurements['Node_1'] = 100 - if uuid not in measurements.keys(): - measurements[uuid] = 200 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_inlet_t(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 24 - measurements['Node_1'] = 26 - if uuid not in measurements.keys(): - measurements[uuid] = 28 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_power(uuid): - """The average outlet temperature for host""" - measurements = {} - measurements['Node_0'] = 260 - measurements['Node_1'] = 240 - if uuid not in measurements.keys(): - measurements[uuid] = 200 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_usage_node_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - measurements = {} - # node 0 - measurements['Node_0'] = 7 - measurements['Node_1'] = 7 - # node 1 - measurements['Node_2'] = 80 - # node 2 - measurements['Node_3'] = 5 - measurements['Node_4'] = 5 - measurements['Node_5'] = 10 - - # node 3 - measurements['Node_6'] = 8 - measurements['Node_19'] = 10 - # node 4 - measurements['INSTANCE_7'] = 4 - - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 8 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - # return float(measurements[str(uuid)]) - - @staticmethod - def get_average_usage_instance_cpu_wb(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - measurements = {} - # node 0 - measurements['INSTANCE_1'] = 80 - measurements['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 - # node 1 - measurements['INSTANCE_3'] = 20 - measurements['INSTANCE_4'] = 10 - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_usage_instance_cpu(uuid): - """The last VM CPU usage values to average - - :param uuid:00 - :return: - """ - # query influxdb stream - - # compute in stream - - # Normalize - measurements = {} - # node 0 - measurements['INSTANCE_0'] = 7 - measurements['INSTANCE_1'] = 7 - # node 1 - measurements['INSTANCE_2'] = 10 - # node 2 - measurements['INSTANCE_3'] = 5 - measurements['INSTANCE_4'] = 5 - measurements['INSTANCE_5'] = 10 - - # node 3 - measurements['INSTANCE_6'] = 8 - - # node 4 - measurements['INSTANCE_7'] = 4 - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 8 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_usage_instance_memory(uuid): - measurements = {} - # node 0 - measurements['INSTANCE_0'] = 2 - measurements['INSTANCE_1'] = 5 - # node 1 - measurements['INSTANCE_2'] = 5 - # node 2 - measurements['INSTANCE_3'] = 8 - measurements['INSTANCE_4'] = 5 - measurements['INSTANCE_5'] = 16 - - # node 3 - measurements['INSTANCE_6'] = 8 - - # node 4 - measurements['INSTANCE_7'] = 4 - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 10 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] - - @staticmethod - def get_average_usage_instance_disk(uuid): - measurements = {} - # node 0 - measurements['INSTANCE_0'] = 2 - measurements['INSTANCE_1'] = 2 - # node 1 - measurements['INSTANCE_2'] = 2 - # node 2 - measurements['INSTANCE_3'] = 10 - measurements['INSTANCE_4'] = 15 - measurements['INSTANCE_5'] = 20 - - # node 3 - measurements['INSTANCE_6'] = 8 - - # node 4 - measurements['INSTANCE_7'] = 4 - - if uuid not in measurements.keys(): - # measurements[uuid] = random.randint(1, 4) - measurements[uuid] = 4 - - return [{'columns': ['avg'], - 'statistics': [[float(measurements[str(uuid)])]]}] diff --git a/watcher/tests/decision_engine/model/notification/__init__.py b/watcher/tests/decision_engine/model/notification/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/model/notification/data/capacity.json b/watcher/tests/decision_engine/model/notification/data/capacity.json deleted file mode 100644 index 28a8f55..0000000 --- a/watcher/tests/decision_engine/model/notification/data/capacity.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host1@backend1#pool1", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "capacity.host1@backend1#pool1", - "total": 3, - "free": 1, - "allocated": 2, - "provisioned": 2, - "virtual_free": 1, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/instance-create.json b/watcher/tests/decision_engine/model/notification/data/instance-create.json deleted file mode 100644 index ddb1aa0..0000000 --- a/watcher/tests/decision_engine/model/notification/data/instance-create.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z" - }, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "some-server", - "host": "compute", - "host_name": "some-server", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "fake-mini", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "active", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.version": "1.0", - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.data": { - "old_state": "building", - "new_task_state": null, - "old_task_state": "spawning", - "state": "active" - } - }, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2" - }, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "priority": "INFO", - "publisher_id": "nova-compute:compute" -} diff --git a/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json b/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json deleted file mode 100644 index 75eaffa..0000000 --- a/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "event_type":"instance.delete.end", - "payload":{ - "nova_object.data":{ - "architecture":"x86_64", - "availability_zone":null, - "created_at":"2012-10-29T13:42:11Z", - "deleted_at":"2012-10-29T13:42:11Z", - "display_name":"some-server", - "fault":null, - "host":"compute", - "host_name":"some-server", - "ip_addresses":[], - "kernel_id":"", - "launched_at":"2012-10-29T13:42:11Z", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "metadata":{}, - "node":"fake-mini", - "os_type":null, - "progress":0, - "ramdisk_id":"", - "reservation_id":"r-npxv0e40", - "state":"deleted", - "task_state":null, - "power_state":"pending", - "tenant_id":"6f70656e737461636b20342065766572", - "terminated_at":"2012-10-29T13:42:11Z", - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id":"fake", - "uuid":"178b0921-8f85-4257-88b6-2e743b5a975c" - }, - "nova_object.name":"InstanceActionPayload", - "nova_object.namespace":"nova", - "nova_object.version":"1.0" - }, - "priority":"INFO", - "publisher_id":"nova-compute:compute" -} diff --git a/watcher/tests/decision_engine/model/notification/data/instance-update.json b/watcher/tests/decision_engine/model/notification/data/instance-update.json deleted file mode 100644 index f79485a..0000000 --- a/watcher/tests/decision_engine/model/notification/data/instance-update.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z"}, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "some-server", - "host": "compute", - "host_name": "some-server", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "fake-mini", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "active", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.data": { - "new_task_state": null, - "old_state": null, - "old_task_state": null, - "state": "active"}, - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2"}, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "priority": "INFO", - "publisher_id": "nova-compute:compute" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json b/watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json deleted file mode 100644 index d180f8d..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-create.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z" - }, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "some-server", - "host": "Node_0", - "host_name": "some-server", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "hostname_0", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "active", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.version": "1.0", - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.data": { - "old_state": "building", - "new_task_state": null, - "old_task_state": "spawning", - "state": "active" - } - }, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2" - }, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "priority": "INFO", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json deleted file mode 100644 index 90898b8..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-delete-end.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "event_type":"instance.delete.end", - "payload":{ - "nova_object.data":{ - "architecture":"x86_64", - "availability_zone":null, - "created_at":"2012-10-29T13:42:11Z", - "deleted_at":"2012-10-29T13:42:11Z", - "display_name":"some-server", - "fault":null, - "host":"Node_0", - "host_name":"some-server", - "ip_addresses":[], - "kernel_id":"", - "launched_at":"2012-10-29T13:42:11Z", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "metadata":{}, - "node":"fake-mini", - "os_type":null, - "progress":0, - "ramdisk_id":"", - "reservation_id":"r-npxv0e40", - "state":"deleted", - "task_state":null, - "power_state":"pending", - "tenant_id":"6f70656e737461636b20342065766572", - "terminated_at":"2012-10-29T13:42:11Z", - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id":"fake", - "uuid":"73b09e16-35b7-4922-804e-e8f5d9b740fc" - }, - "nova_object.name":"InstanceActionPayload", - "nova_object.namespace":"nova", - "nova_object.version":"1.0" - }, - "priority":"INFO", - "publisher_id":"nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json deleted file mode 100644 index 23d23b9..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z"}, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "NEW_INSTANCE0", - "host": "Node_0", - "host_name": "NEW_INSTANCE0", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "hostname_0", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "paused", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.data": { - "old_task_state": null, - "new_task_state": null, - "old_state": "paused", - "state": "paused"}, - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc"}, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "priority": "INFO", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json deleted file mode 100644 index 3a0b366..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-create-end.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "event_type": "compute.instance.create.end", - "metadata": { - "message_id": "577bfd11-88e0-4044-b8ae-496e3257efe2", - "timestamp": "2016-08-19 10:20:59.279903" - }, - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "availability_zone": "nova", - "cell_name": "", - "created_at": "2016-08-19 10:20:49+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "INSTANCE_0", - "ephemeral_gb": 0, - "fixed_ips": [ - { - "address": "192.168.1.197", - "floating_ips": [], - "label": "demo-net", - "meta": {}, - "type": "fixed", - "version": 4, - "vif_mac": "fa:16:3e:a3:c0:0f" - } - ], - "host": "Node_0", - "hostname": "INSTANCE_0", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://127.0.0.1:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "c03c0bf9-f46e-4e4f-93f1-817568567ee2", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-19T10:20:59.135390", - "memory_mb": 512, - "message": "Success", - "metadata": {}, - "node": "Node_0", - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-56edz88e", - "root_gb": 1, - "state": "active", - "state_description": "", - "tenant_id": "57ab04ad6d3b495789a58258bc00842b", - "terminated_at": "", - "user_id": "cd7d93be51e4460ab51514b2a925b23a", - "vcpus": 1 - }, - "publisher_id": "compute.Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json deleted file mode 100644 index 12b0a12..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-delete-end.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "publisher_id": "compute:compute", - "event_type": "compute.instance.delete.end", - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "availability_zone": "nova", - "cell_name": "", - "created_at": "2016-08-17 15:10:12+00:00", - "deleted_at": "2016-08-17T15:10:33.000000", - "disk_gb": 1, - "display_name": "some-server", - "ephemeral_gb": 0, - "host": "Node_0", - "hostname": "some-server", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.254.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "73b09e16-35b7-4922-804e-e8f5d9b740fc", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-17T15:10:23.000000", - "memory_mb": 512, - "metadata": {}, - "node": "Node_0", - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-z76fnsyy", - "root_gb": 1, - "state": "deleted", - "state_description": "", - "tenant_id": "15995ea2694e4268b3631db32e38678b", - "terminated_at": "2016-08-17T15:10:33.008164", - "user_id": "cd7d93be51e4460ab51514b2a925b23a", - "vcpus": 1 - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json deleted file mode 100644 index ce2b997..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_instance-update.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "publisher_id": "compute:Node_0", - "event_type": "compute.instance.update", - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "audit_period_beginning": "2016-08-17T13:00:00.000000", - "audit_period_ending": "2016-08-17T13:56:05.262440", - "availability_zone": "nova", - "bandwidth": {}, - "cell_name": "", - "created_at": "2016-08-17 13:53:23+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "NEW_INSTANCE0", - "ephemeral_gb": 0, - "host": "Node_0", - "hostname": "NEW_INSTANCE0", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "73b09e16-35b7-4922-804e-e8f5d9b740fc", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-17T13:53:35.000000", - "memory_mb": 512, - "metadata": {}, - "new_task_state": null, - "node": "hostname_0", - "old_state": "paused", - "old_task_state": null, - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-0822ymml", - "root_gb": 1, - "state": "paused", - "state_description": "paused", - "tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1", - "terminated_at": "", - "user_id": "ce64facc93354bbfa90f4f9f9a3e1e75", - "vcpus": 1 - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json b/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json deleted file mode 100644 index 916b91b..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_legacy_livemigration-post-dest-end.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "event_type": "compute.instance.live_migration.post.dest.end", - "metadata": { - "message_id": "9f58cad4-ff90-40f8-a8e4-633807f4a995", - "timestamp": "2016-08-19 10:13:44.645575" - }, - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "availability_zone": "nova", - "cell_name": "", - "created_at": "2016-08-18 09:49:23+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "INSTANCE_0", - "ephemeral_gb": 0, - "fixed_ips": [ - { - "address": "192.168.1.196", - "floating_ips": [], - "label": "demo-net", - "meta": {}, - "type": "fixed", - "version": 4, - "vif_mac": "fa:16:3e:cc:ba:81" - } - ], - "host": "Node_1", - "hostname": "INSTANCE_0", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.254.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "73b09e16-35b7-4922-804e-e8f5d9b740fc", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-18T09:49:33.000000", - "memory_mb": 512, - "metadata": {}, - "node": "Node_1", - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-he04tfco", - "root_gb": 1, - "state": "active", - "state_description": "", - "tenant_id": "57ab04ad6d3b495789a58258bc00842b", - "terminated_at": "", - "user_id": "cd7d93be51e4460ab51514b2a925b23a", - "vcpus": 1 - }, - "publisher_id": "compute.Node_1" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json deleted file mode 100644 index 2f27862..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "event_type": "instance.update", - "payload": { - "nova_object.data": { - "architecture": "x86_64", - "audit_period": { - "nova_object.data": { - "audit_period_beginning": "2012-10-01T00:00:00Z", - "audit_period_ending": "2012-10-29T13:42:11Z"}, - "nova_object.name": "AuditPeriodPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" - }, - "availability_zone": null, - "bandwidth": [], - "created_at": "2012-10-29T13:42:11Z", - "deleted_at": null, - "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "host": "Node_2", - "host_name": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", - "kernel_id": "", - "launched_at": null, - "metadata": {}, - "node": "hostname_0", - "old_display_name": null, - "os_type": null, - "progress": 0, - "ramdisk_id": "", - "reservation_id": "r-sd3ygfjj", - "state": "paused", - "task_state": "scheduling", - "power_state": "pending", - "ip_addresses": [], - "state_update": { - "nova_object.data": { - "old_task_state": null, - "new_task_state": null, - "old_state": "paused", - "state": "paused"}, - "nova_object.name": "InstanceStateUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "tenant_id": "6f70656e737461636b20342065766572", - "terminated_at": null, - "flavor": { - "nova_object.name": "FlavorPayload", - "nova_object.data": { - "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", - "root_gb": 1, - "vcpus": 1, - "ephemeral_gb": 0, - "memory_mb": 512 - }, - "nova_object.version": "1.0", - "nova_object.namespace": "nova" - }, - "user_id": "fake", - "uuid": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7"}, - "nova_object.name": "InstanceUpdatePayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0"}, - "priority": "INFO", - "publisher_id": "nova-compute:Node_2" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json b/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json deleted file mode 100644 index caf2863..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "publisher_id": "compute:Node_2", - "event_type": "compute.instance.update", - "payload": { - "access_ip_v4": null, - "access_ip_v6": null, - "architecture": null, - "audit_period_beginning": "2016-08-17T13:00:00.000000", - "audit_period_ending": "2016-08-17T13:56:05.262440", - "availability_zone": "nova", - "bandwidth": {}, - "cell_name": "", - "created_at": "2016-08-17 13:53:23+00:00", - "deleted_at": "", - "disk_gb": 1, - "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "ephemeral_gb": 0, - "host": "Node_2", - "hostname": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "image_meta": { - "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", - "container_format": "bare", - "disk_format": "qcow2", - "min_disk": "1", - "min_ram": "0" - }, - "image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", - "instance_flavor_id": "1", - "instance_id": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7", - "instance_type": "m1.tiny", - "instance_type_id": 2, - "kernel_id": "", - "launched_at": "2016-08-17T13:53:35.000000", - "memory_mb": 512, - "metadata": {}, - "new_task_state": null, - "node": "hostname_0", - "old_state": "paused", - "old_task_state": null, - "os_type": null, - "progress": "", - "ramdisk_id": "", - "reservation_id": "r-0822ymml", - "root_gb": 1, - "state": "paused", - "state_description": "paused", - "tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1", - "terminated_at": "", - "user_id": "ce64facc93354bbfa90f4f9f9a3e1e75", - "vcpus": 1 - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json b/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json deleted file mode 100644 index 410f12d..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "nova_object.namespace": "nova", - "nova_object.name": "ServiceStatusPayload", - "nova_object.version": "1.0", - "nova_object.data": { - "host": "Node_0", - "disabled": true, - "last_seen_up": "2012-10-29T13:42:05Z", - "binary": "nova-compute", - "topic": "compute", - "disabled_reason": null, - "report_count": 1, - "forced_down": true, - "version": 15 - } - }, - "event_type": "service.update", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json b/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json deleted file mode 100644 index f3e7f23..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "nova_object.namespace": "nova", - "nova_object.name": "ServiceStatusPayload", - "nova_object.version": "1.0", - "nova_object.data": { - "host": "Node_0", - "disabled": false, - "last_seen_up": "2012-10-29T13:42:05Z", - "binary": "nova-compute", - "topic": "compute", - "disabled_reason": null, - "report_count": 1, - "forced_down": false, - "version": 15 - } - }, - "event_type": "service.update", - "publisher_id": "nova-compute:Node_0" -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json deleted file mode 100644 index 4b0fe28..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "available", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {"readonly": false, "attached_mode": "rw"}, - "glance_metadata": {} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json deleted file mode 100644 index 7831bdd..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host_0@backend_0#pool_0", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "host_0@backend_0#pool_0", - "total": 500, - "free": 460, - "allocated": 40, - "provisioned": 40, - "virtual_free": 460, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json deleted file mode 100644 index 650a575..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host_2@backend_2#pool_0", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "host_2@backend_2#pool_0", - "total": 500, - "free": 460, - "allocated": 40, - "provisioned": 40, - "virtual_free": 460, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json deleted file mode 100644 index 948c230..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "capacity.host_0@backend_0#pool_2", - "event_type": "capacity.pool", - "payload": { - "name_to_id": "host_0@backend_0#pool_2", - "total": 500, - "free": 380, - "allocated": 120, - "provisioned": 120, - "virtual_free": 380, - "reported_at": "2017-05-15T13:42:11Z" - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json deleted file mode 100644 index ac0739e..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "error", - "volume_attachment": [], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json deleted file mode 100644 index 2f2a2a1..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.attach.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "40", - "status": "in-use", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json deleted file mode 100644 index 089da52..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "available", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json deleted file mode 100644 index a772f35..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_2@backend_2#pool_0", - "event_type": "volume.create.end", - "payload": { - "host": "host_2@backend_2#pool_0", - "volume_id": "VOLUME_00", - "display_name": "name_00", - "size": "40", - "status": "available", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_00", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json deleted file mode 100644 index 6d09d7f..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.delete.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "40", - "status": "deleting", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json deleted file mode 100644 index f2d635d..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.detach.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "40", - "status": "available", - "volume_attachment": [], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json deleted file mode 100644 index 6b4597f..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.resize.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_0", - "size": "20", - "status": "in-use", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json b/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json deleted file mode 100644 index b846ddc..0000000 --- a/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "priority": "INFO", - "publisher_id": "volume.host_0@backend_0#pool_0", - "event_type": "volume.update.end", - "payload": { - "host": "host_0@backend_0#pool_0", - "volume_id": "VOLUME_0", - "display_name": "name_01", - "size": "40", - "status": "enabled", - "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], - "snapshot_id": "", - "tenant_id": "project_0", - "metadata": {"readonly": false, "attached_mode": "rw"} - } -} diff --git a/watcher/tests/decision_engine/model/notification/data/service-update.json b/watcher/tests/decision_engine/model/notification/data/service-update.json deleted file mode 100644 index 1baf63a..0000000 --- a/watcher/tests/decision_engine/model/notification/data/service-update.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "priority": "INFO", - "payload": { - "nova_object.namespace": "nova", - "nova_object.name": "ServiceStatusPayload", - "nova_object.version": "1.0", - "nova_object.data": { - "host": "host1", - "disabled": false, - "last_seen_up": "2012-10-29T13:42:05Z", - "binary": "nova-compute", - "topic": "compute", - "disabled_reason": null, - "report_count": 1, - "forced_down": false, - "version": 15 - } - }, - "event_type": "service.update", - "publisher_id": "nova-compute:host1" -} diff --git a/watcher/tests/decision_engine/model/notification/fake_managers.py b/watcher/tests/decision_engine/model/notification/fake_managers.py deleted file mode 100644 index 0d196f3..0000000 --- a/watcher/tests/decision_engine/model/notification/fake_managers.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.common import service_manager -from watcher.decision_engine.model.notification import cinder as cnotification -from watcher.decision_engine.model.notification import nova as novanotification -from watcher.tests.decision_engine.model import faker_cluster_state - - -class FakeManager(service_manager.ServiceManager): - - API_VERSION = '1.0' - - fake_cdmc = faker_cluster_state.FakerModelCollector() - - @property - def service_name(self): - return 'watcher-fake' - - @property - def api_version(self): - return self.API_VERSION - - @property - def publisher_id(self): - return 'test_publisher_id' - - @property - def conductor_topic(self): - return 'test_conductor_topic' - - @property - def notification_topics(self): - return ['nova'] - - @property - def conductor_endpoints(self): - return [] # Disable audit endpoint - - @property - def notification_endpoints(self): - return [ - novanotification.ServiceUpdated(self.fake_cdmc), - - novanotification.InstanceCreated(self.fake_cdmc), - novanotification.InstanceUpdated(self.fake_cdmc), - novanotification.InstanceDeletedEnd(self.fake_cdmc), - - novanotification.LegacyInstanceCreatedEnd(self.fake_cdmc), - novanotification.LegacyInstanceUpdated(self.fake_cdmc), - novanotification.LegacyLiveMigratedEnd(self.fake_cdmc), - novanotification.LegacyInstanceDeletedEnd(self.fake_cdmc), - ] - - -class FakeStorageManager(FakeManager): - - fake_cdmc = faker_cluster_state.FakerStorageModelCollector() - - @property - def notification_endpoints(self): - return [ - cnotification.CapacityNotificationEndpoint(self.fake_cdmc), - cnotification.VolumeCreateEnd(self.fake_cdmc), - cnotification.VolumeUpdateEnd(self.fake_cdmc), - cnotification.VolumeDeleteEnd(self.fake_cdmc), - cnotification.VolumeAttachEnd(self.fake_cdmc), - cnotification.VolumeDetachEnd(self.fake_cdmc), - cnotification.VolumeResizeEnd(self.fake_cdmc), - ] diff --git a/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py b/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py deleted file mode 100644 index 020ef66..0000000 --- a/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py +++ /dev/null @@ -1,607 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright 2017 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os - -import mock -from oslo_serialization import jsonutils - -from watcher.common import cinder_helper -from watcher.common import context -from watcher.common import exception -from watcher.common import service as watcher_service -from watcher.db.sqlalchemy import api as db_api -from watcher.decision_engine.model.notification import cinder as cnotification -from watcher.tests import base as base_test -from watcher.tests.db import utils -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model.notification import fake_managers - - -class NotificationTestCase(base_test.TestCase): - - @staticmethod - def load_message(filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as json_file: - json_data = jsonutils.load(json_file) - - return json_data - - -class TestReceiveCinderNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestReceiveCinderNotifications, self).setUp() - - p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') - m_from_dict = p_from_dict.start() - m_from_dict.return_value = self.context - self.addCleanup(p_from_dict.stop) - - p_get_service_list = mock.patch.object( - db_api.Connection, 'get_service_list') - p_update_service = mock.patch.object( - db_api.Connection, 'update_service') - m_get_service_list = p_get_service_list.start() - m_update_service = p_update_service.start() - fake_service = utils.get_test_service( - created_at=datetime.datetime.utcnow()) - - m_get_service_list.return_value = [fake_service] - m_update_service.return_value = fake_service.copy() - - self.addCleanup(p_get_service_list.stop) - self.addCleanup(p_update_service.stop) - - @mock.patch.object(cnotification.CapacityNotificationEndpoint, 'info') - def test_cinder_receive_capacity(self, m_info): - message = self.load_message('capacity.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'capacity.host1@backend1#pool1', 'capacity.pool', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeCreateEnd, 'info') - def test_cinder_receive_volume_create_end(self, m_info): - message = self.load_message('scenario_1_volume-create.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.create.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeUpdateEnd, 'info') - def test_cinder_receive_volume_update_end(self, m_info): - message = self.load_message('scenario_1_volume-update.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.update.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeAttachEnd, 'info') - def test_cinder_receive_volume_attach_end(self, m_info): - message = self.load_message('scenario_1_volume-attach.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.attach.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeDetachEnd, 'info') - def test_cinder_receive_volume_detach_end(self, m_info): - message = self.load_message('scenario_1_volume-detach.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.detach.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeResizeEnd, 'info') - def test_cinder_receive_volume_resize_end(self, m_info): - message = self.load_message('scenario_1_volume-resize.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.resize.end', expected_message, self.FAKE_METADATA) - - @mock.patch.object(cnotification.VolumeDeleteEnd, 'info') - def test_cinder_receive_volume_delete_end(self, m_info): - message = self.load_message('scenario_1_volume-delete.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeStorageManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'volume.host_0@backend_0#pool_0', - 'volume.delete.end', expected_message, self.FAKE_METADATA) - - -class TestCinderNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestCinderNotifications, self).setUp() - # fake cluster - self.fake_cdmc = faker_cluster_state.FakerStorageModelCollector() - - def test_cinder_capacity(self): - """test consuming capacity""" - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) - - pool_0_name = 'host_0@backend_0#pool_0' - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - - # before - self.assertEqual(pool_0_name, pool_0.name) - self.assertEqual(420, pool_0.free_capacity_gb) - self.assertEqual(420, pool_0.virtual_free) - self.assertEqual(80, pool_0.allocated_capacity_gb) - self.assertEqual(80, pool_0.provisioned_capacity_gb) - - message = self.load_message('scenario_1_capacity.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # after - self.assertEqual(pool_0_name, pool_0.name) - self.assertEqual(460, pool_0.free_capacity_gb) - self.assertEqual(460, pool_0.virtual_free) - self.assertEqual(40, pool_0.allocated_capacity_gb) - self.assertEqual(40, pool_0.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_capacity_pool_notfound(self, m_cinder_helper): - """test consuming capacity, new pool in existing node""" - - # storage_pool_by_name mock - return_mock = mock.Mock() - return_mock.configure_mock( - name='host_0@backend_0#pool_2', - total_volumes='2', - total_capacity_gb='500', - free_capacity_gb='380', - provisioned_capacity_gb='120', - allocated_capacity_gb='120') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) - - message = self.load_message('scenario_1_capacity_pool_notfound.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # after consuming message, still pool_0 exists - pool_0_name = 'host_0@backend_0#pool_0' - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0_name, pool_0.name) - self.assertEqual(420, pool_0.free_capacity_gb) - self.assertEqual(420, pool_0.virtual_free) - self.assertEqual(80, pool_0.allocated_capacity_gb) - self.assertEqual(80, pool_0.provisioned_capacity_gb) - - # new pool was added - pool_1_name = 'host_0@backend_0#pool_2' - m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) - storage_node = storage_model.get_node_by_pool_name(pool_1_name) - self.assertEqual('host_0@backend_0', storage_node.host) - pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) - self.assertEqual(pool_1_name, pool_1.name) - self.assertEqual(500, pool_1.total_capacity_gb) - self.assertEqual(380, pool_1.free_capacity_gb) - self.assertEqual(120, pool_1.allocated_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_capacity_node_notfound(self, m_cinder_helper): - """test consuming capacity, new pool in new node""" - - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_2@backend_2#pool_0', - total_volumes='2', - total_capacity_gb='500', - free_capacity_gb='460', - provisioned_capacity_gb='40', - allocated_capacity_gb='40') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - # storage_node_by_name mock - return_node_mock = mock.Mock() - return_node_mock.configure_mock( - host='host_2@backend_2', - zone='nova', - state='up', - status='enabled') - - m_get_storage_node_by_name = mock.Mock( - side_effect=lambda name: return_node_mock) - - m_get_volume_type_by_backendname = mock.Mock( - side_effect=lambda name: mock.Mock('backend_2')) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name, - get_storage_node_by_name=m_get_storage_node_by_name, - get_volume_type_by_backendname=m_get_volume_type_by_backendname) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) - - message = self.load_message('scenario_1_capacity_node_notfound.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # new pool and new node was added - node_1_name = 'host_2@backend_2' - pool_1_name = node_1_name + '#pool_0' - volume_type = 'backend_2' - m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) - m_get_storage_node_by_name.assert_called_once_with(node_1_name) - m_get_volume_type_by_backendname.assert_called_once_with(volume_type) - # new node was added - storage_node = storage_model.get_node_by_pool_name(pool_1_name) - self.assertEqual('host_2@backend_2', storage_node.host) - # new pool was added - pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) - self.assertEqual(pool_1_name, pool_1.name) - self.assertEqual(500, pool_1.total_capacity_gb) - self.assertEqual(460, pool_1.free_capacity_gb) - self.assertEqual(40, pool_1.allocated_capacity_gb) - self.assertEqual(40, pool_1.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_create(self, m_cinder_helper): - """test creating volume in existing pool and node""" - - # create storage_pool_by_name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='3', - total_capacity_gb='500', - free_capacity_gb='380', - provisioned_capacity_gb='120', - allocated_capacity_gb='120') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message('scenario_1_volume-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - self.assertFalse(volume_00.bootable) - # check that capacity was updated - pool_0_name = 'host_0@backend_0#pool_0' - m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(3, pool_0.total_volumes) - self.assertEqual(380, pool_0.free_capacity_gb) - self.assertEqual(120, pool_0.allocated_capacity_gb) - self.assertEqual(120, pool_0.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_bootable_volume_create(self, m_cinder_helper): - """test creating bootable volume in existing pool and node""" - - # create storage_pool_by_name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='3', - total_capacity_gb='500', - free_capacity_gb='380', - provisioned_capacity_gb='120', - allocated_capacity_gb='120') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message('scenario_1_bootable-volume-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - self.assertTrue(volume_00.bootable) - # check that capacity was updated - pool_0_name = 'host_0@backend_0#pool_0' - m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(3, pool_0.total_volumes) - self.assertEqual(380, pool_0.free_capacity_gb) - self.assertEqual(120, pool_0.allocated_capacity_gb) - self.assertEqual(120, pool_0.provisioned_capacity_gb) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_create_pool_notfound(self, m_cinder_helper): - """check creating volume in not existing pool and node""" - - # get_storage_pool_by_name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_2@backend_2#pool_0', - total_volumes='1', - total_capacity_gb='500', - free_capacity_gb='460', - provisioned_capacity_gb='40', - allocated_capacity_gb='40') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - # create storage_node_by_name mock - return_node_mock = mock.Mock() - return_node_mock.configure_mock( - host='host_2@backend_2', - zone='nova', - state='up', - status='enabled') - - m_get_storage_node_by_name = mock.Mock( - side_effect=lambda name: return_node_mock) - - m_get_volume_type_by_backendname = mock.Mock( - side_effect=lambda name: mock.Mock('backend_2')) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name, - get_storage_node_by_name=m_get_storage_node_by_name, - get_volume_type_by_backendname=m_get_volume_type_by_backendname) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message( - 'scenario_1_volume-create_pool_notfound.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - # check that capacity was updated - node_2_name = 'host_2@backend_2' - pool_0_name = node_2_name + '#pool_0' - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(1, pool_0.total_volumes) - self.assertEqual(460, pool_0.free_capacity_gb) - self.assertEqual(40, pool_0.allocated_capacity_gb) - self.assertEqual(40, pool_0.provisioned_capacity_gb) - # check that node was added - m_get_storage_node_by_name.assert_called_once_with(node_2_name) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_error_volume_unmapped(self, m_cinder_helper): - """test creating error volume unmapped""" - - m_get_storage_pool_by_name = mock.Mock( - side_effect=exception.PoolNotFound(name="TEST")) - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeCreateEnd(self.fake_cdmc) - - message = self.load_message('scenario_1_error-volume-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # we do not call get_storage_pool_by_name - m_get_storage_pool_by_name.assert_not_called() - # check that volume00 was added to the model - volume_00_name = 'VOLUME_00' - volume_00 = storage_model.get_volume_by_uuid(volume_00_name) - self.assertEqual(volume_00_name, volume_00.uuid) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_update(self, m_cinder_helper): - """test updating volume in existing pool and node""" - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeUpdateEnd(self.fake_cdmc) - - volume_0_name = 'VOLUME_0' - volume_0 = storage_model.get_volume_by_uuid(volume_0_name) - self.assertEqual('name_0', volume_0.name) - - # create storage_pool_by name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='2', - total_capacity_gb='500', - free_capacity_gb='420', - provisioned_capacity_gb='80', - allocated_capacity_gb='80') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - message = self.load_message('scenario_1_volume-update.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - # check that name of volume_0 was updated in the model - volume_0 = storage_model.get_volume_by_uuid(volume_0_name) - self.assertEqual('name_01', volume_0.name) - - @mock.patch.object(cinder_helper, 'CinderHelper') - def test_cinder_volume_delete(self, m_cinder_helper): - """test deleting volume""" - - # create storage_pool_by name mock - return_pool_mock = mock.Mock() - return_pool_mock.configure_mock( - name='host_0@backend_0#pool_0', - total_volumes='1', - total_capacity_gb='500', - free_capacity_gb='460', - provisioned_capacity_gb='40', - allocated_capacity_gb='40') - - m_get_storage_pool_by_name = mock.Mock( - side_effect=lambda name: return_pool_mock) - - m_cinder_helper.return_value = mock.Mock( - get_storage_pool_by_name=m_get_storage_pool_by_name) - - storage_model = self.fake_cdmc.generate_scenario_1() - self.fake_cdmc.cluster_data_model = storage_model - handler = cnotification.VolumeDeleteEnd(self.fake_cdmc) - - # volume exists before consuming - volume_0_uuid = 'VOLUME_0' - volume_0 = storage_model.get_volume_by_uuid(volume_0_uuid) - self.assertEqual(volume_0_uuid, volume_0.uuid) - - message = self.load_message('scenario_1_volume-delete.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # volume does not exists after consuming - self.assertRaises( - exception.VolumeNotFound, - storage_model.get_volume_by_uuid, volume_0_uuid) - - # check that capacity was updated - pool_0_name = 'host_0@backend_0#pool_0' - m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) - pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) - self.assertEqual(pool_0.name, pool_0_name) - self.assertEqual(1, pool_0.total_volumes) - self.assertEqual(460, pool_0.free_capacity_gb) - self.assertEqual(40, pool_0.allocated_capacity_gb) - self.assertEqual(40, pool_0.provisioned_capacity_gb) diff --git a/watcher/tests/decision_engine/model/notification/test_notifications.py b/watcher/tests/decision_engine/model/notification/test_notifications.py deleted file mode 100644 index 9e2e6fa..0000000 --- a/watcher/tests/decision_engine/model/notification/test_notifications.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock -from oslo_serialization import jsonutils - -from watcher.common import context -from watcher.common import service as watcher_service -from watcher.decision_engine.model.notification import base -from watcher.decision_engine.model.notification import filtering -from watcher.tests import base as base_test -from watcher.tests.decision_engine.model.notification import fake_managers - - -class DummyManager(fake_managers.FakeManager): - - @property - def notification_endpoints(self): - return [DummyNotification(self.fake_cdmc)] - - -class DummyNotification(base.NotificationEndpoint): - - @property - def filter_rule(self): - return filtering.NotificationFilter( - publisher_id=r'.*', - event_type=r'compute.dummy', - payload={'data': {'nested': r'^T.*'}}, - ) - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - pass - - -class NotificationTestCase(base_test.TestCase): - - def load_message(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as json_file: - json_data = jsonutils.load(json_file) - - return json_data - - -class TestReceiveNotifications(NotificationTestCase): - - def setUp(self): - super(TestReceiveNotifications, self).setUp() - - p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') - m_from_dict = p_from_dict.start() - m_from_dict.return_value = self.context - self.addCleanup(p_from_dict.stop) - - @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') - @mock.patch.object(DummyNotification, 'info') - def test_receive_dummy_notification(self, m_info, m_heartbeat): - message = { - 'publisher_id': 'nova-compute', - 'event_type': 'compute.dummy', - 'payload': {'data': {'nested': 'TEST'}}, - 'priority': 'INFO', - } - de_service = watcher_service.Service(DummyManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - - m_info.assert_called_once_with( - self.context, 'nova-compute', 'compute.dummy', - {'data': {'nested': 'TEST'}}, - {'message_id': None, 'timestamp': None}) - - @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') - @mock.patch.object(DummyNotification, 'info') - def test_skip_unwanted_notification(self, m_info, m_heartbeat): - message = { - 'publisher_id': 'nova-compute', - 'event_type': 'compute.dummy', - 'payload': {'data': {'nested': 'unwanted'}}, - 'priority': 'INFO', - } - de_service = watcher_service.Service(DummyManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - - self.assertEqual(0, m_info.call_count) diff --git a/watcher/tests/decision_engine/model/notification/test_nova_notifications.py b/watcher/tests/decision_engine/model/notification/test_nova_notifications.py deleted file mode 100644 index f257dc7..0000000 --- a/watcher/tests/decision_engine/model/notification/test_nova_notifications.py +++ /dev/null @@ -1,523 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import mock -from oslo_serialization import jsonutils - -from watcher.common import context -from watcher.common import exception -from watcher.common import nova_helper -from watcher.common import service as watcher_service -from watcher.decision_engine.model import element -from watcher.decision_engine.model.notification import nova as novanotification -from watcher.tests import base as base_test -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model.notification import fake_managers - - -class NotificationTestCase(base_test.TestCase): - - @staticmethod - def load_message(filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as json_file: - json_data = jsonutils.load(json_file) - - return json_data - - -class TestReceiveNovaNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestReceiveNovaNotifications, self).setUp() - - p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') - m_from_dict = p_from_dict.start() - m_from_dict.return_value = self.context - self.addCleanup(p_from_dict.stop) - p_heartbeat = mock.patch.object( - watcher_service.ServiceHeartbeat, "send_beat") - self.m_heartbeat = p_heartbeat.start() - self.addCleanup(p_heartbeat.stop) - - @mock.patch.object(novanotification.ServiceUpdated, 'info') - def test_nova_receive_service_update(self, m_info): - message = self.load_message('service-update.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:host1', 'service.update', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(novanotification.InstanceCreated, 'info') - def test_nova_receive_instance_create(self, m_info): - message = self.load_message('instance-create.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:compute', 'instance.update', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(novanotification.InstanceUpdated, 'info') - def test_nova_receive_instance_update(self, m_info): - message = self.load_message('instance-update.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:compute', 'instance.update', - expected_message, self.FAKE_METADATA) - - @mock.patch.object(novanotification.InstanceDeletedEnd, 'info') - def test_nova_receive_instance_delete_end(self, m_info): - message = self.load_message('instance-delete-end.json') - expected_message = message['payload'] - - de_service = watcher_service.Service(fake_managers.FakeManager) - incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) - - de_service.notification_handler.dispatcher.dispatch(incoming) - m_info.assert_called_once_with( - self.context, 'nova-compute:compute', 'instance.delete.end', - expected_message, self.FAKE_METADATA) - - -class TestNovaNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestNovaNotifications, self).setUp() - # fake cluster - self.fake_cdmc = faker_cluster_state.FakerModelCollector() - - def test_nova_service_update(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.ServiceUpdated(self.fake_cdmc) - - node0_uuid = 'Node_0' - node0 = compute_model.get_node_by_uuid(node0_uuid) - - message = self.load_message('scenario3_service-update-disabled.json') - - self.assertEqual('hostname_0', node0.hostname) - self.assertEqual(element.ServiceState.ONLINE.value, node0.state) - self.assertEqual(element.ServiceState.ENABLED.value, node0.status) - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual('Node_0', node0.hostname) - self.assertEqual(element.ServiceState.OFFLINE.value, node0.state) - self.assertEqual(element.ServiceState.DISABLED.value, node0.status) - - message = self.load_message('scenario3_service-update-enabled.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual('Node_0', node0.hostname) - self.assertEqual(element.ServiceState.ONLINE.value, node0.state) - self.assertEqual(element.ServiceState.ENABLED.value, node0.status) - - def test_nova_instance_update(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceUpdated(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - message = self.load_message('scenario3_instance-update.json') - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_nova_instance_update_notfound_still_creates( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=lambda uuid: mock.Mock( - name='m_get_compute_node_by_hostname', - id=3, - hypervisor_hostname="Node_2", - state='up', - status='enabled', - uuid=uuid, - memory_mb=7777, - vcpus=42, - free_disk_gb=974, - local_gb=1337)) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message('scenario3_notfound_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_called_once_with('Node_2') - node_2 = compute_model.get_node_by_uuid('Node_2') - self.assertEqual(7777, node_2.memory) - self.assertEqual(42, node_2.vcpus) - self.assertEqual(974, node_2.disk) - self.assertEqual(1337, node_2.disk_capacity) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_instance_update_node_notfound_set_unmapped( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=exception.ComputeNodeNotFound(name="TEST")) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message( - 'scenario3_notfound_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_any_call('Node_2') - self.assertRaises( - exception.ComputeNodeNotFound, - compute_model.get_node_by_uuid, 'Node_2') - - def test_nova_instance_create(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceCreated(self.fake_cdmc) - - instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2' - - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) - - message = self.load_message('scenario3_instance-create.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - def test_nova_instance_delete_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.InstanceDeletedEnd(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - - # Before - self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) - - message = self.load_message('scenario3_instance-delete-end.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # After - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) - - -class TestLegacyNovaNotifications(NotificationTestCase): - - FAKE_METADATA = {'message_id': None, 'timestamp': None} - - def setUp(self): - super(TestLegacyNovaNotifications, self).setUp() - # fake cluster - self.fake_cdmc = faker_cluster_state.FakerModelCollector() - - def test_legacy_instance_created_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceCreatedEnd(self.fake_cdmc) - - instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2' - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) - - message = self.load_message( - 'scenario3_legacy_instance-create-end.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - def test_legacy_instance_updated(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - message = self.load_message('scenario3_legacy_instance-update.json') - - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_legacy_instance_update_node_notfound_still_creates( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=lambda uuid: mock.Mock( - name='m_get_compute_node_by_hostname', - id=3, - uuid=uuid, - hypervisor_hostname="Node_2", - state='up', - status='enabled', - memory_mb=7777, - vcpus=42, - free_disk_gb=974, - local_gb=1337)) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message( - 'scenario3_notfound_legacy_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_any_call('Node_2') - node_2 = compute_model.get_node_by_uuid('Node_2') - self.assertEqual(7777, node_2.memory) - self.assertEqual(42, node_2.vcpus) - self.assertEqual(974, node_2.disk) - self.assertEqual(1337, node_2.disk_capacity) - - @mock.patch.object(nova_helper, "NovaHelper") - def test_legacy_instance_update_node_notfound_set_unmapped( - self, m_nova_helper_cls): - m_get_compute_node_by_hostname = mock.Mock( - side_effect=exception.ComputeNodeNotFound) - m_nova_helper_cls.return_value = mock.Mock( - get_compute_node_by_hostname=m_get_compute_node_by_hostname, - name='m_nova_helper') - - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc) - - instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' - - message = self.load_message( - 'scenario3_notfound_legacy_instance-update.json') - - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) - self.assertEqual(1, instance0.vcpus) - self.assertEqual(1, instance0.disk) - self.assertEqual(1, instance0.disk_capacity) - self.assertEqual(512, instance0.memory) - - m_get_compute_node_by_hostname.assert_any_call('Node_2') - self.assertRaises( - exception.ComputeNodeNotFound, - compute_model.get_node_by_uuid, 'Node_2') - - def test_legacy_live_migrated_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyLiveMigratedEnd(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - instance0 = compute_model.get_instance_by_uuid(instance0_uuid) - - node = compute_model.get_node_by_instance_uuid(instance0_uuid) - self.assertEqual('Node_0', node.uuid) - - message = self.load_message( - 'scenario3_legacy_livemigration-post-dest-end.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - node = compute_model.get_node_by_instance_uuid(instance0_uuid) - self.assertEqual('Node_1', node.uuid) - self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) - - def test_legacy_instance_deleted_end(self): - compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() - self.fake_cdmc.cluster_data_model = compute_model - handler = novanotification.LegacyInstanceDeletedEnd(self.fake_cdmc) - - instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' - - # Before - self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) - - message = self.load_message( - 'scenario3_legacy_instance-delete-end.json') - handler.info( - ctxt=self.context, - publisher_id=message['publisher_id'], - event_type=message['event_type'], - payload=message['payload'], - metadata=self.FAKE_METADATA, - ) - - # After - self.assertRaises( - exception.InstanceNotFound, - compute_model.get_instance_by_uuid, instance0_uuid) diff --git a/watcher/tests/decision_engine/model/test_element.py b/watcher/tests/decision_engine/model/test_element.py deleted file mode 100644 index 1df7324..0000000 --- a/watcher/tests/decision_engine/model/test_element.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.model import element -from watcher.tests import base - - -class TestElement(base.TestCase): - - scenarios = [ - ("ComputeNode_with_all_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'hostname': 'hostname', - 'human_id': 'human_id', - 'memory': 111, - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ("ComputeNode_with_some_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ("Instance_with_all_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'hostname': 'hostname', - 'human_id': 'human_id', - 'memory': 111, - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ("Instance_with_some_fields", dict( - cls=element.Instance, - data={ - 'uuid': 'FAKE_UUID', - 'state': 'state', - 'vcpus': 222, - 'disk': 333, - 'disk_capacity': 444, - })), - ] - - def test_as_xml_element(self): - el = self.cls(**self.data) - el.as_xml_element() - - -class TestStorageElement(base.TestCase): - - scenarios = [ - ("StorageNode_with_all_fields", dict( - cls=element.StorageNode, - data={ - 'host': 'host@backend', - 'zone': 'zone', - 'status': 'enabled', - 'state': 'up', - 'volume_type': 'volume_type', - })), - ("Pool_with_all_fields", dict( - cls=element.Pool, - data={ - 'name': 'host@backend#pool', - 'total_volumes': 1, - 'total_capacity_gb': 500, - 'free_capacity_gb': 420, - 'provisioned_capacity_gb': 80, - 'allocated_capacity_gb': 80, - 'virtual_free': 420, - })), - ("Pool_without_virtual_free_fields", dict( - cls=element.Pool, - data={ - 'name': 'host@backend#pool', - 'total_volumes': 1, - 'total_capacity_gb': 500, - 'free_capacity_gb': 420, - 'provisioned_capacity_gb': 80, - 'allocated_capacity_gb': 80, - })), - ("Volume_with_all_fields", dict( - cls=element.Volume, - data={ - 'uuid': 'FAKE_UUID', - 'size': 1, - 'status': 'in-use', - 'attachments': '[{"key": "value"}]', - 'name': 'name', - 'multiattach': 'false', - 'snapshot_id': '', - 'project_id': 'project_id', - 'metadata': '{"key": "value"}', - 'bootable': 'false', - 'human_id': 'human_id', - })), - ("Volume_without_bootable_fields", dict( - cls=element.Volume, - data={ - 'uuid': 'FAKE_UUID', - 'size': 1, - 'status': 'in-use', - 'attachments': '[]', - 'name': 'name', - 'multiattach': 'false', - 'snapshot_id': '', - 'project_id': 'project_id', - 'metadata': '{"key": "value"}', - 'human_id': 'human_id', - })), - ("Volume_without_human_id_fields", dict( - cls=element.Volume, - data={ - 'uuid': 'FAKE_UUID', - 'size': 1, - 'status': 'in-use', - 'attachments': '[]', - 'name': 'name', - 'multiattach': 'false', - 'snapshot_id': '', - 'project_id': 'project_id', - 'metadata': '{"key": "value"}', - })), - ] - - def test_as_xml_element(self): - el = self.cls(**self.data) - el.as_xml_element() diff --git a/watcher/tests/decision_engine/model/test_model.py b/watcher/tests/decision_engine/model/test_model.py deleted file mode 100644 index c4cacb9..0000000 --- a/watcher/tests/decision_engine/model/test_model.py +++ /dev/null @@ -1,369 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from oslo_utils import uuidutils - -from watcher.common import exception -from watcher.decision_engine.model import element -from watcher.decision_engine.model import model_root -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestModel(base.TestCase): - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return model_root.ModelRoot.from_xml(self.load_data(filename)) - - def test_model_structure(self): - fake_cluster = faker_cluster_state.FakerModelCollector() - model1 = fake_cluster.build_scenario_1() - - self.assertEqual(5, len(model1.get_all_compute_nodes())) - self.assertEqual(35, len(model1.get_all_instances())) - self.assertEqual(8, len(model1.edges())) - - expected_struct_str = self.load_data('scenario_1.xml') - model2 = model_root.ModelRoot.from_xml(expected_struct_str) - - self.assertTrue(model_root.ModelRoot.is_isomorphic(model2, model1)) - - def test_build_model_from_xml(self): - fake_cluster = faker_cluster_state.FakerModelCollector() - - expected_model = fake_cluster.generate_scenario_1() - struct_str = self.load_data('scenario_1.xml') - - model = model_root.ModelRoot.from_xml(struct_str) - self.assertEqual(expected_model.to_string(), model.to_string()) - - def test_get_node_by_instance_uuid(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertEqual(node, model.get_node_by_uuid(uuid_)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - instance = element.Instance(id=1) - instance.uuid = uuid_ - model.add_instance(instance) - self.assertEqual(instance, model.get_instance_by_uuid(uuid_)) - model.map_instance(instance, node) - self.assertEqual(node, model.get_node_by_instance_uuid(instance.uuid)) - - def test_add_node(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertEqual(node, model.get_node_by_uuid(uuid_)) - - def test_delete_node(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertEqual(node, model.get_node_by_uuid(uuid_)) - model.remove_node(node) - self.assertRaises(exception.ComputeNodeNotFound, - model.get_node_by_uuid, uuid_) - - def test_get_all_compute_nodes(self): - model = model_root.ModelRoot() - for id_ in range(10): - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id_) - node.uuid = uuid_ - model.add_node(node) - all_nodes = model.get_all_compute_nodes() - for uuid_ in all_nodes: - node = model.get_node_by_uuid(uuid_) - model.assert_node(node) - - def test_set_get_state_nodes(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - - self.assertIn(node.state, [el.value for el in element.ServiceState]) - - node = model.get_node_by_uuid(uuid_) - node.state = element.ServiceState.OFFLINE.value - self.assertIn(node.state, [el.value for el in element.ServiceState]) - - def test_node_from_uuid_raise(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - - uuid2 = "{0}".format(uuidutils.generate_uuid()) - self.assertRaises(exception.ComputeNodeNotFound, - model.get_node_by_uuid, uuid2) - - def test_remove_node_raise(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - - uuid2 = "{0}".format(uuidutils.generate_uuid()) - node2 = element.ComputeNode(id=2) - node2.uuid = uuid2 - - self.assertRaises(exception.ComputeNodeNotFound, - model.remove_node, node2) - - def test_assert_node_raise(self): - model = model_root.ModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - node = element.ComputeNode(id=1) - node.uuid = uuid_ - model.add_node(node) - self.assertRaises(exception.IllegalArgumentException, - model.assert_node, "objet_qcq") - - def test_instance_from_uuid_raise(self): - fake_cluster = faker_cluster_state.FakerModelCollector() - model = fake_cluster.generate_scenario_1() - self.assertRaises(exception.InstanceNotFound, - model.get_instance_by_uuid, "valeur_qcq") - - def test_assert_instance_raise(self): - model = model_root.ModelRoot() - self.assertRaises(exception.IllegalArgumentException, - model.assert_instance, "valeur_qcq") - - -class TestStorageModel(base.TestCase): - - def load_data(self, filename): - cwd = os.path.abspath(os.path.dirname(__file__)) - data_folder = os.path.join(cwd, "data") - - with open(os.path.join(data_folder, filename), 'rb') as xml_file: - xml_data = xml_file.read() - - return xml_data - - def load_model(self, filename): - return model_root.StorageModelRoot.from_xml(self.load_data(filename)) - - def test_model_structure(self): - fake_cluster = faker_cluster_state.FakerStorageModelCollector() - model1 = fake_cluster.build_scenario_1() - - self.assertEqual(2, len(model1.get_all_storage_nodes())) - self.assertEqual(9, len(model1.get_all_volumes())) - self.assertEqual(12, len(model1.edges())) - - expected_struct_str = self.load_data('storage_scenario_1.xml') - model2 = model_root.StorageModelRoot.from_xml(expected_struct_str) - self.assertTrue( - model_root.StorageModelRoot.is_isomorphic(model2, model1)) - - def test_build_model_from_xml(self): - fake_cluster = faker_cluster_state.FakerStorageModelCollector() - - expected_model = fake_cluster.generate_scenario_1() - struct_str = self.load_data('storage_scenario_1.xml') - - model = model_root.StorageModelRoot.from_xml(struct_str) - self.assertEqual(expected_model.to_string(), model.to_string()) - - def test_assert_node_raise(self): - model = model_root.StorageModelRoot() - node = element.StorageNode(host="host@backend") - model.add_node(node) - self.assertRaises(exception.IllegalArgumentException, - model.assert_node, "obj") - - def test_assert_pool_raise(self): - model = model_root.StorageModelRoot() - pool = element.Pool(name="host@backend#pool") - model.add_pool(pool) - self.assertRaises(exception.IllegalArgumentException, - model.assert_pool, "obj") - - def test_assert_volume_raise(self): - model = model_root.StorageModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertRaises(exception.IllegalArgumentException, - model.assert_volume, "obj") - - def test_add_node(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - - def test_add_pool(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - - def test_remove_node(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - model.remove_node(node) - self.assertRaises(exception.StorageNodeNotFound, - model.get_node_by_name, hostname) - - def test_remove_pool(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - model.remove_pool(pool) - self.assertRaises(exception.PoolNotFound, - model.get_pool_by_pool_name, pool_name) - - def test_map_unmap_pool(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - model.map_pool(pool, node) - self.assertTrue(pool.name in model.predecessors(node.host)) - model.unmap_pool(pool, node) - self.assertFalse(pool.name in model.predecessors(node.host)) - - def test_add_volume(self): - model = model_root.StorageModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - - def test_remove_volume(self): - model = model_root.StorageModelRoot() - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.remove_volume(volume) - self.assertRaises(exception.VolumeNotFound, - model.get_volume_by_uuid, uuid_) - - def test_map_unmap_volume(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.map_volume(volume, pool) - self.assertTrue(volume.uuid in model.predecessors(pool.name)) - model.unmap_volume(volume, pool) - self.assertFalse(volume.uuid in model.predecessors(pool.name)) - - def test_get_all_storage_nodes(self): - model = model_root.StorageModelRoot() - for i in range(10): - hostname = "host_{0}".format(i) - node = element.StorageNode(host=hostname) - model.add_node(node) - all_nodes = model.get_all_storage_nodes() - for hostname in all_nodes: - node = model.get_node_by_name(hostname) - model.assert_node(node) - - def test_get_all_volumes(self): - model = model_root.StorageModelRoot() - for id_ in range(10): - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - all_volumes = model.get_all_volumes() - for vol in all_volumes: - volume = model.get_volume_by_uuid(vol) - model.assert_volume(volume) - - def test_get_node_pools(self): - model = model_root.StorageModelRoot() - hostname = "host@backend" - node = element.StorageNode(host=hostname) - model.add_node(node) - self.assertEqual(node, model.get_node_by_name(hostname)) - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - model.map_pool(pool, node) - self.assertEqual([pool], model.get_node_pools(node)) - - def test_get_pool_by_volume(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.map_volume(volume, pool) - self.assertEqual(pool, model.get_pool_by_volume(volume)) - - def test_get_pool_volumes(self): - model = model_root.StorageModelRoot() - pool_name = "host@backend#pool" - pool = element.Pool(name=pool_name) - model.add_pool(pool) - self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) - uuid_ = "{0}".format(uuidutils.generate_uuid()) - volume = element.Volume(uuid=uuid_) - model.add_volume(volume) - self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) - model.map_volume(volume, pool) - self.assertEqual([volume], model.get_pool_volumes(pool)) diff --git a/watcher/tests/decision_engine/planner/__init__.py b/watcher/tests/decision_engine/planner/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/planner/test_planner_manager.py b/watcher/tests/decision_engine/planner/test_planner_manager.py deleted file mode 100644 index 7d030f1..0000000 --- a/watcher/tests/decision_engine/planner/test_planner_manager.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - -from watcher.decision_engine.planner import manager as planner -from watcher.decision_engine.planner import weight -from watcher.tests import base - - -class TestPlannerManager(base.TestCase): - def test_load(self): - cfg.CONF.set_override('planner', "weight", group='watcher_planner') - manager = planner.PlannerManager() - self.assertIsInstance(manager.load(), weight.WeightPlanner) diff --git a/watcher/tests/decision_engine/planner/test_weight_planner.py b/watcher/tests/decision_engine/planner/test_weight_planner.py deleted file mode 100644 index 3c9cc48..0000000 --- a/watcher/tests/decision_engine/planner/test_weight_planner.py +++ /dev/null @@ -1,944 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import nova_helper -from watcher.common import utils -from watcher.db import api as db_api -from watcher.decision_engine.planner import weight as pbase -from watcher.decision_engine.solution import default as dsol -from watcher.decision_engine.strategy import strategies -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils as db_utils -from watcher.tests.decision_engine.model import ceilometer_metrics as fake -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.objects import utils as obj_utils - - -class SolutionFaker(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon.compute_model = current_state_cluster.generate_scenario_1() - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - return sercon.execute() - - -class SolutionFakerSingleHyp(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon.compute_model = ( - current_state_cluster.generate_scenario_3_with_2_nodes()) - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - - return sercon.execute() - - -class TestActionScheduling(base.DbTestCase): - - def setUp(self): - super(TestActionScheduling, self).setUp() - self.goal = db_utils.create_test_goal(name="dummy") - self.strategy = db_utils.create_test_strategy(name="dummy") - self.audit = db_utils.create_test_audit( - uuid=utils.generate_uuid(), strategy_id=self.strategy.id) - self.planner = pbase.WeightPlanner( - mock.Mock( - weights={ - 'turn_host_to_acpi_s3_state': 10, - 'resize': 20, - 'migrate': 30, - 'sleep': 40, - 'change_nova_service_state': 50, - 'nop': 60, - 'new_action_type': 70, - }, - parallelization={ - 'turn_host_to_acpi_s3_state': 2, - 'resize': 2, - 'migrate': 2, - 'sleep': 1, - 'change_nova_service_state': 1, - 'nop': 1, - 'new_action_type': 70, - })) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_actions(self, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", - "33333333-3333-3333-3333-333333333333", - # "44444444-4444-4444-4444-444444444444", - # "55555555-5555-5555-5555-555555555555", - # "66666666-6666-6666-6666-666666666666", - # "77777777-7777-7777-7777-777777777777", - # "88888888-8888-8888-8888-888888888888", - # "99999999-9999-9999-9999-999999999999", - ] - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - self.planner.config.weights = {'migrate': 3} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - - self.assertIsNotNone(action_plan.uuid) - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = [] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_two_actions(self, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", - "22222222-2222-2222-2222-222222222222", - "33333333-3333-3333-3333-333333333333", - "44444444-4444-4444-4444-444444444444", # Migrate 1 - "55555555-5555-5555-5555-555555555555", # Nop 1 - ] - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - # We create the migrate action before but we then schedule - # after the nop action - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="nop", - input_parameters={"message": "Hello world"}) - - self.planner.config.weights = {'migrate': 3, 'nop': 5} - - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - - self.assertIsNotNone(action_plan.uuid) - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'nop', - 'parents': [], - 'uuid': '55555555-5555-5555-5555-555555555555'}, - {'action_type': 'migrate', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_actions_with_unknown_action(self, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # new_action_type - "33333333-3333-3333-3333-333333333333", - - ] - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "src_uuid_node": "server1", - "dst_uuid_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="new_action_type", - resource_id="", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'new_action_type', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['22222222-2222-2222-2222-222222222222'], - 'uuid': '11111111-1111-1111-1111-111111111111'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') - def test_schedule_migrate_resize_actions(self, m_nova, m_generate_uuid): - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Migrate 4 - "55555555-5555-5555-5555-555555555555", # Migrate 5 - "66666666-6666-6666-6666-666666666666", # Resize 1 - "77777777-7777-7777-7777-777777777777", # Resize 2 - "88888888-8888-8888-8888-888888888888", # Nop - "99999999-9999-9999-9999-999999999999", - ] - m_nova.return_value = 'server1' - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={"flavor": "x1"}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111'], - 'uuid': '22222222-2222-2222-2222-222222222222'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_3_migrate_1_resize_1_acpi_actions_1_swimlane( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 1 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111'], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111'], - 'uuid': '22222222-2222-2222-2222-222222222222'}), - ({'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': ['22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_migrate_resize_acpi_actions_2_swimlanes( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 2 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_migrate_resize_acpi_actions_3_swimlanes( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 3 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '44444444-4444-4444-4444-444444444444'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_three_migrate_two_resize_actions( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 3 - self.planner.config.parallelization["resize"] = 2 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Resize - "55555555-5555-5555-5555-555555555555", # ACPI - "66666666-6666-6666-6666-666666666666", - "77777777-7777-7777-7777-777777777777", - "88888888-8888-8888-8888-888888888888", - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server0", - "destination_node": "server1", - } - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters=parameters) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="resize", - resource_id="b189db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters={'flavor': 'x1'}) - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'resize', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222', - '33333333-3333-3333-3333-333333333333'], - 'uuid': '55555555-5555-5555-5555-555555555555'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - @mock.patch.object(utils, "generate_uuid") - def test_schedule_5_migrate_2_resize_actions_for_2_swimlanes( - self, m_generate_uuid): - self.planner.config.parallelization["migrate"] = 2 - self.planner.config.parallelization["resize"] = 2 - m_generate_uuid.side_effect = [ - "00000000-0000-0000-0000-000000000000", # Action plan - "11111111-1111-1111-1111-111111111111", # Migrate 1 - "22222222-2222-2222-2222-222222222222", # Migrate 2 - "33333333-3333-3333-3333-333333333333", # Migrate 3 - "44444444-4444-4444-4444-444444444444", # Migrate 4 - "55555555-5555-5555-5555-555555555555", # Migrate 5 - "66666666-6666-6666-6666-666666666666", # Resize 1 - "77777777-7777-7777-7777-777777777777", # Resize 2 - "88888888-8888-8888-8888-888888888888", # Nop - "99999999-9999-9999-9999-999999999999", - ] - - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server1", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server2", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server3", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server4", - "destination_node": "server6"}) - - solution.add_action(action_type="migrate", - resource_id="DOESNOTMATTER", - input_parameters={"source_node": "server5", - "destination_node": "server6"}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="resize", - resource_id="DOESNOTMATTER", - input_parameters={'flavor': 'x2'}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="DOESNOTMATTER") - - with mock.patch.object( - pbase.WeightPlanner, "create_scheduled_actions", - wraps=self.planner.create_scheduled_actions - ) as m_create_scheduled_actions: - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_scheduled_actions.call_count) - action_graph = m_create_scheduled_actions.call_args[0][0] - - expected_edges = \ - [({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '11111111-1111-1111-1111-111111111111'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}), - ({'action_type': 'migrate', - 'parents': [], - 'uuid': '22222222-2222-2222-2222-222222222222'}, - {'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '44444444-4444-4444-4444-444444444444'}), - ({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '33333333-3333-3333-3333-333333333333'}, - {'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': ['11111111-1111-1111-1111-111111111111', - '22222222-2222-2222-2222-222222222222'], - 'uuid': '44444444-4444-4444-4444-444444444444'}, - {'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}), - ({'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}, - {'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '66666666-6666-6666-6666-666666666666'}), - ({'action_type': 'migrate', - 'parents': ['33333333-3333-3333-3333-333333333333', - '44444444-4444-4444-4444-444444444444'], - 'uuid': '55555555-5555-5555-5555-555555555555'}, - {'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '77777777-7777-7777-7777-777777777777'}), - ({'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '66666666-6666-6666-6666-666666666666'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['66666666-6666-6666-6666-666666666666', - '77777777-7777-7777-7777-777777777777'], - 'uuid': '88888888-8888-8888-8888-888888888888'}), - ({'action_type': 'resize', - 'parents': ['55555555-5555-5555-5555-555555555555'], - 'uuid': '77777777-7777-7777-7777-777777777777'}, - {'action_type': 'turn_host_to_acpi_s3_state', - 'parents': ['66666666-6666-6666-6666-666666666666', - '77777777-7777-7777-7777-777777777777'], - 'uuid': '88888888-8888-8888-8888-888888888888'})] - - edges = sorted([(src.as_dict(), dst.as_dict()) - for src, dst in action_graph.edges()], - key=lambda pair: pair[0]['uuid']) - for src, dst in edges: - for key in ('id', 'action_plan', 'action_plan_id', 'created_at', - 'input_parameters', 'deleted_at', 'updated_at', - 'state'): - del src[key] - del dst[key] - - self.assertEqual(len(expected_edges), len(edges)) - for pair in expected_edges: - self.assertIn(pair, edges) - - -class TestWeightPlanner(base.DbTestCase): - - def setUp(self): - super(TestWeightPlanner, self).setUp() - self.planner = pbase.WeightPlanner(mock.Mock()) - self.planner.config.weights = { - 'nop': 0, - 'sleep': 1, - 'change_nova_service_state': 2, - 'migrate': 3 - } - - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy( - self.context, goal_id=self.goal.id) - obj_utils.create_test_audit_template( - self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) - - p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') - self.mock_create_action_plan = p.start() - self.mock_create_action_plan.side_effect = ( - self._simulate_action_plan_create) - self.addCleanup(p.stop) - - q = mock.patch.object(db_api.BaseConnection, 'create_action') - self.mock_create_action = q.start() - self.mock_create_action.side_effect = ( - self._simulate_action_create) - self.addCleanup(q.stop) - - def _simulate_action_plan_create(self, action_plan): - action_plan.create() - return action_plan - - def _simulate_action_create(self, action): - action.create() - return action - - @mock.patch.object(objects.Strategy, 'get_by_name') - def test_scheduler_warning_empty_action_plan(self, m_get_by_name): - m_get_by_name.return_value = self.strategy - audit = db_utils.create_test_audit( - goal_id=self.goal.id, strategy_id=self.strategy.id) - fake_solution = mock.MagicMock(efficacy_indicators=[], - actions=[]) - action_plan = self.planner.schedule( - self.context, audit.id, fake_solution) - self.assertIsNotNone(action_plan.uuid) diff --git a/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py b/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py deleted file mode 100644 index 4956fef..0000000 --- a/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.common import utils -from watcher.db import api as db_api -from watcher.decision_engine.planner import workload_stabilization as pbase -from watcher.decision_engine.solution import default as dsol -from watcher.decision_engine.strategy import strategies -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils as db_utils -from watcher.tests.decision_engine.model import ceilometer_metrics as fake -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.objects import utils as obj_utils - - -class SolutionFaker(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon._compute_model = current_state_cluster.generate_scenario_1() - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - return sercon.execute() - - -class SolutionFakerSingleHyp(object): - @staticmethod - def build(): - metrics = fake.FakerMetricsCollector() - current_state_cluster = faker_cluster_state.FakerModelCollector() - sercon = strategies.BasicConsolidation(config=mock.Mock()) - sercon._compute_model = ( - current_state_cluster.generate_scenario_3_with_2_nodes()) - sercon.ceilometer = mock.MagicMock( - get_statistics=metrics.mock_get_statistics) - - return sercon.execute() - - -class TestActionScheduling(base.DbTestCase): - - def setUp(self): - super(TestActionScheduling, self).setUp() - self.goal = db_utils.create_test_goal(name="dummy") - self.strategy = db_utils.create_test_strategy(name="dummy") - self.audit = db_utils.create_test_audit( - uuid=utils.generate_uuid(), strategy_id=self.strategy.id) - self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) - self.nova_helper = nova_helper.NovaHelper(mock.Mock()) - - def test_schedule_actions(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - self.planner.config.weights = {'migrate': 3} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(1, m_create_action.call_count) - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("migrate", actions[0].action_type) - - def test_schedule_two_actions(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="nop", - input_parameters={"message": "Hello world"}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - self.planner.config.weights = {'migrate': 3, 'nop': 5} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(2, m_create_action.call_count) - # check order - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("nop", actions[0].action_type) - self.assertEqual("migrate", actions[1].action_type) - - def test_schedule_actions_with_unknown_action(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "src_uuid_node": "server1", - "dst_uuid_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="new_action_type", - resource_id="", - input_parameters={}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: - self.planner.config.weights = {'migrate': 0} - self.assertRaises(KeyError, self.planner.schedule, - self.context, self.audit.id, solution) - assert not m_nova.called - self.assertEqual(2, m_create_action.call_count) - - def test_schedule_actions_with_unsupported_action(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "src_uuid_node": "server1", - "dst_uuid_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="new_action_type", - resource_id="", - input_parameters={}) - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: - self.planner.config.weights = { - 'turn_host_to_acpi_s3_state': 0, - 'resize': 1, - 'migrate': 2, - 'sleep': 3, - 'change_nova_service_state': 4, - 'nop': 5, - 'new_action_type': 6} - self.assertRaises(exception.UnsupportedActionType, - self.planner.schedule, - self.context, self.audit.id, solution) - assert not m_nova.called - self.assertEqual(2, m_create_action.call_count) - - @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') - def test_schedule_migrate_resize_actions(self, mock_nova): - mock_nova.return_value = 'server1' - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="resize", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters={"flavor": "x1"}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: - self.planner.config.weights = {'migrate': 3, 'resize': 2} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertEqual(1, m_nova.call_count) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(2, m_create_action.call_count) - # check order - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("migrate", actions[0].action_type) - self.assertEqual("resize", actions[1].action_type) - self.assertEqual(actions[0].uuid, actions[1].parents[0]) - - def test_schedule_migrate_resize_acpi_s3_actions(self): - solution = dsol.DefaultSolution( - goal=mock.Mock(), strategy=self.strategy) - - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - parent_migration = "b199db0c-1408-4d52-b5a5-5ca14de0ff36" - solution.add_action(action_type="migrate", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - - solution.add_action(action_type="resize", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters={'flavor': 'x1'}) - - solution.add_action(action_type="migrate", - resource_id="f6416850-da28-4047-a547-8c49f53e95fe", - input_parameters={"source_node": "server1", - "destination_node": "server2"}) - - solution.add_action(action_type="migrate", - resource_id="bb404e74-2caf-447b-bd1e-9234db386ca5", - input_parameters={"source_node": "server2", - "destination_node": "server3"}) - - solution.add_action(action_type="turn_host_to_acpi_s3_state", - resource_id="server1", - input_parameters={}) - - with mock.patch.object( - pbase.WorkloadStabilizationPlanner, "create_action", - wraps=self.planner.create_action - ) as m_create_action: - with mock.patch.object( - nova_helper, 'NovaHelper') as m_nova: - m_nova().get_hostname.return_value = 'server1' - m_nova().get_instance_by_uuid.return_value = ['uuid1'] - self.planner.config.weights = { - 'turn_host_to_acpi_s3_state': 0, - 'resize': 1, - 'migrate': 2, - 'sleep': 3, - 'change_nova_service_state': 4, - 'nop': 5} - action_plan = self.planner.schedule( - self.context, self.audit.id, solution) - self.assertEqual(3, m_nova.call_count) - self.assertIsNotNone(action_plan.uuid) - self.assertEqual(5, m_create_action.call_count) - # check order - filters = {'action_plan_id': action_plan.id} - actions = objects.Action.dbapi.get_action_list(self.context, filters) - self.assertEqual("migrate", actions[0].action_type) - self.assertEqual("migrate", actions[1].action_type) - self.assertEqual("migrate", actions[2].action_type) - self.assertEqual("resize", actions[3].action_type) - self.assertEqual("turn_host_to_acpi_s3_state", actions[4].action_type) - for action in actions: - if action.input_parameters['resource_id'] == parent_migration: - parent_migration = action - break - self.assertEqual(parent_migration.uuid, actions[3].parents[0]) - - -class TestDefaultPlanner(base.DbTestCase): - - def setUp(self): - super(TestDefaultPlanner, self).setUp() - self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) - self.planner.config.weights = { - 'nop': 0, - 'sleep': 1, - 'change_nova_service_state': 2, - 'migrate': 3 - } - - self.goal = obj_utils.create_test_goal(self.context) - self.strategy = obj_utils.create_test_strategy( - self.context, goal_id=self.goal.id) - obj_utils.create_test_audit_template( - self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) - - p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') - self.mock_create_action_plan = p.start() - self.mock_create_action_plan.side_effect = ( - self._simulate_action_plan_create) - self.addCleanup(p.stop) - - q = mock.patch.object(db_api.BaseConnection, 'create_action') - self.mock_create_action = q.start() - self.mock_create_action.side_effect = ( - self._simulate_action_create) - self.addCleanup(q.stop) - - def _simulate_action_plan_create(self, action_plan): - action_plan.create() - return action_plan - - def _simulate_action_create(self, action): - action.create() - return action - - @mock.patch.object(objects.Strategy, 'get_by_name') - def test_scheduler_warning_empty_action_plan(self, m_get_by_name): - m_get_by_name.return_value = self.strategy - audit = db_utils.create_test_audit( - goal_id=self.goal.id, strategy_id=self.strategy.id) - fake_solution = mock.MagicMock(efficacy_indicators=[], - actions=[]) - action_plan = self.planner.schedule( - self.context, audit.id, fake_solution) - self.assertIsNotNone(action_plan.uuid) - - -class TestActionValidator(base.DbTestCase): - INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" - - def setUp(self): - super(TestActionValidator, self).setUp() - self.r_osc_cls = mock.Mock() - self.r_helper_cls = mock.Mock() - self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) - self.r_helper_cls.return_value = self.r_helper - r_nova_helper = mock.patch.object( - nova_helper, "NovaHelper", self.r_helper_cls) - - r_nova_helper.start() - - self.addCleanup(r_nova_helper.stop) - - def test_resize_validate_parents(self): - resize_object = pbase.ResizeActionValidator() - action = {'uuid': 'fcec56cd-74c1-406b-a7c1-81ef9f0c1393', - 'input_parameters': {'resource_id': self.INSTANCE_UUID}} - resource_action_map = {self.INSTANCE_UUID: [ - ('action_uuid', 'migrate')]} - self.r_helper.get_hostname.return_value = 'server1' - self.r_helper.get_instance_by_uuid.return_value = ['instance'] - result = resize_object.validate_parents(resource_action_map, action) - self.assertEqual('action_uuid', result[0]) - - def test_migrate_validate_parents(self): - migrate_object = pbase.MigrationActionValidator() - action = {'uuid': '712f1701-4c1b-4076-bfcf-3f23cfec6c3b', - 'input_parameters': {'source_node': 'server1', - 'resource_id': self.INSTANCE_UUID}} - resource_action_map = {} - expected_map = { - '94ae2f92-b7fd-4da7-9e97-f13504ae98c4': [ - ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')], - 'server1': [ - ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')]} - migrate_object.validate_parents(resource_action_map, action) - self.assertEqual(resource_action_map, expected_map) diff --git a/watcher/tests/decision_engine/scope/__init__.py b/watcher/tests/decision_engine/scope/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/scope/fake_scopes.py b/watcher/tests/decision_engine/scope/fake_scopes.py deleted file mode 100644 index 9e638fe..0000000 --- a/watcher/tests/decision_engine/scope/fake_scopes.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -fake_scope_1 = [{'availability_zones': [{'name': 'AZ1'}]}, - {'exclude': - [{'instances': - [{'uuid': 'INSTANCE_6'}]}] - } - ] - -default_scope = [{'host_aggregates': [{'id': '*'}]}, - {'availability_zones': [{'name': 'AZ1'}, - {'name': 'AZ2'}]}, - {'exclude': [ - {'instances': [ - {'uuid': 'INSTANCE_1'}, - {'uuid': 'INSTANCE_2'}]}, - {'compute_nodes': [ - {'name': 'Node_1'}, - {'name': 'Node_2'}]} - ]} - ] diff --git a/watcher/tests/decision_engine/scope/test_default.py b/watcher/tests/decision_engine/scope/test_default.py deleted file mode 100644 index 06e7572..0000000 --- a/watcher/tests/decision_engine/scope/test_default.py +++ /dev/null @@ -1,255 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from jsonschema import validators -import mock - -from watcher.common import exception -from watcher.common import nova_helper -from watcher.decision_engine.scope import default -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.scope import fake_scopes - - -class TestDefaultScope(base.TestCase): - - def setUp(self): - super(TestDefaultScope, self).setUp() - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_get_scoped_model_with_zones_and_instances(self, mock_zone_list): - cluster = self.fake_cluster.generate_scenario_1() - audit_scope = fake_scopes.fake_scope_1 - mock_zone_list.return_value = [ - mock.Mock(zoneName='AZ{0}'.format(i), - hosts={'Node_{0}'.format(i): {}}) - for i in range(2)] - model = default.DefaultScope(audit_scope, mock.Mock(), - osc=mock.Mock()).get_scoped_model(cluster) - expected_edges = [('INSTANCE_2', 'Node_1')] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_get_scoped_model_without_scope(self, mock_zone_list): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).get_scoped_model(model) - assert not mock_zone_list.called - - def test_remove_instance(self): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), osc=mock.Mock()).remove_instance( - model, model.get_instance_by_uuid('INSTANCE_2'), 'Node_1') - expected_edges = [ - ('INSTANCE_0', 'Node_0'), - ('INSTANCE_1', 'Node_0'), - ('INSTANCE_3', 'Node_2'), - ('INSTANCE_4', 'Node_2'), - ('INSTANCE_5', 'Node_2'), - ('INSTANCE_6', 'Node_3'), - ('INSTANCE_7', 'Node_4'), - ] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_collect_aggregates(self, mock_aggregate, mock_detailed_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] - mock_detailed_aggregate.side_effect = [ - mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] - default.DefaultScope([{'host_aggregates': [{'id': 1}, {'id': 2}]}], - mock.Mock(), osc=mock.Mock())._collect_aggregates( - [{'id': 1}, {'id': 2}], allowed_nodes) - self.assertEqual(['Node_1'], allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_aggregates_wildcard_is_used(self, mock_aggregate, - mock_detailed_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] - mock_detailed_aggregate.side_effect = [ - mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] - default.DefaultScope([{'host_aggregates': [{'id': '*'}]}], - mock.Mock(), osc=mock.Mock())._collect_aggregates( - [{'id': '*'}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_aggregates_wildcard_with_other_ids(self, mock_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] - scope_handler = default.DefaultScope( - [{'host_aggregates': [{'id': '*'}, {'id': 1}]}], - mock.Mock(), osc=mock.Mock()) - self.assertRaises(exception.WildcardCharacterIsUsed, - scope_handler._collect_aggregates, - [{'id': '*'}, {'id': 1}], - allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_aggregates_with_names_and_ids(self, mock_aggregate, - mock_detailed_aggregate): - allowed_nodes = [] - mock_aggregate.return_value = [mock.Mock(id=i, - name="HA_{0}".format(i)) - for i in range(2)] - mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) - for i in range(2)] - mock_collection[0].name = 'HA_0' - mock_collection[1].name = 'HA_1' - - mock_detailed_aggregate.side_effect = mock_collection - - default.DefaultScope([{'host_aggregates': [{'name': 'HA_1'}, - {'id': 0}]}], - mock.Mock(), osc=mock.Mock())._collect_aggregates( - [{'name': 'HA_1'}, {'id': 0}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_collect_zones(self, mock_zone_list): - allowed_nodes = [] - mock_zone_list.return_value = [ - mock.Mock(zoneName="AZ{0}".format(i + 1), - hosts={'Node_{0}'.format(2 * i): 1, - 'Node_{0}'.format(2 * i + 1): 2}) - for i in range(2)] - default.DefaultScope([{'availability_zones': [{'name': "AZ1"}]}], - mock.Mock(), osc=mock.Mock())._collect_zones( - [{'name': "AZ1"}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1'], sorted(allowed_nodes)) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_zones_wildcard_is_used(self, mock_zone_list): - allowed_nodes = [] - mock_zone_list.return_value = [ - mock.Mock(zoneName="AZ{0}".format(i + 1), - hosts={'Node_{0}'.format(2 * i): 1, - 'Node_{0}'.format(2 * i + 1): 2}) - for i in range(2)] - default.DefaultScope([{'availability_zones': [{'name': "*"}]}], - mock.Mock(), osc=mock.Mock())._collect_zones( - [{'name': "*"}], allowed_nodes) - self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], - sorted(allowed_nodes)) - - @mock.patch.object(nova_helper.NovaHelper, 'get_availability_zone_list') - def test_zones_wildcard_with_other_ids(self, mock_zone_list): - allowed_nodes = [] - mock_zone_list.return_value = [ - mock.Mock(zoneName="AZ{0}".format(i + 1), - hosts={'Node_{0}'.format(2 * i): 1, - 'Node_{0}'.format(2 * i + 1): 2}) - for i in range(2)] - scope_handler = default.DefaultScope( - [{'availability_zones': [{'name': "*"}, {'name': 'AZ1'}]}], - mock.Mock(), osc=mock.Mock()) - self.assertRaises(exception.WildcardCharacterIsUsed, - scope_handler._collect_zones, - [{'name': "*"}, {'name': 'AZ1'}], - allowed_nodes) - - def test_default_schema(self): - test_scope = fake_scopes.default_scope - validators.Draft4Validator( - default.DefaultScope.DEFAULT_SCHEMA).validate(test_scope) - - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') - @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') - def test_exclude_resource( - self, mock_aggregate, mock_detailed_aggregate): - mock_aggregate.return_value = [mock.Mock(id=i, - name="HA_{0}".format(i)) - for i in range(2)] - mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) - for i in range(2)] - mock_collection[0].name = 'HA_0' - mock_collection[1].name = 'HA_1' - mock_detailed_aggregate.side_effect = mock_collection - - resources_to_exclude = [{'host_aggregates': [{'name': 'HA_1'}, - {'id': 0}]}, - {'instances': [{'uuid': 'INSTANCE_1'}, - {'uuid': 'INSTANCE_2'}]}, - {'compute_nodes': [{'name': 'Node_2'}, - {'name': 'Node_3'}]}, - {'instance_metadata': [{'optimize': True}, - {'optimize1': False}]}] - instances_to_exclude = [] - nodes_to_exclude = [] - instance_metadata = [] - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).exclude_resources( - resources_to_exclude, instances=instances_to_exclude, - nodes=nodes_to_exclude, instance_metadata=instance_metadata) - - self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], - sorted(nodes_to_exclude)) - self.assertEqual(['INSTANCE_1', 'INSTANCE_2'], - sorted(instances_to_exclude)) - self.assertEqual([{'optimize': True}, {'optimize1': False}], - instance_metadata) - - def test_exclude_instances_with_given_metadata(self): - cluster = self.fake_cluster.generate_scenario_1() - instance_metadata = [{'optimize': True}] - instances_to_remove = set() - default.DefaultScope( - [], mock.Mock(), - osc=mock.Mock()).exclude_instances_with_given_metadata( - instance_metadata, cluster, instances_to_remove) - self.assertEqual(sorted(['INSTANCE_' + str(i) for i in range(35)]), - sorted(instances_to_remove)) - - instance_metadata = [{'optimize': False}] - instances_to_remove = set() - default.DefaultScope( - [], mock.Mock(), - osc=mock.Mock()).exclude_instances_with_given_metadata( - instance_metadata, cluster, instances_to_remove) - self.assertEqual(set(), instances_to_remove) - - def test_remove_nodes_from_model(self): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).remove_nodes_from_model( - ['Node_1', 'Node_2'], model) - expected_edges = [ - ('INSTANCE_0', 'Node_0'), - ('INSTANCE_1', 'Node_0'), - ('INSTANCE_6', 'Node_3'), - ('INSTANCE_7', 'Node_4')] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) - - def test_remove_instances_from_model(self): - model = self.fake_cluster.generate_scenario_1() - default.DefaultScope([], mock.Mock(), - osc=mock.Mock()).remove_instances_from_model( - ['INSTANCE_1', 'INSTANCE_2'], model) - expected_edges = [ - ('INSTANCE_0', 'Node_0'), - ('INSTANCE_3', 'Node_2'), - ('INSTANCE_4', 'Node_2'), - ('INSTANCE_5', 'Node_2'), - ('INSTANCE_6', 'Node_3'), - ('INSTANCE_7', 'Node_4')] - self.assertEqual(sorted(expected_edges), sorted(model.edges())) diff --git a/watcher/tests/decision_engine/scoring/__init__.py b/watcher/tests/decision_engine/scoring/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/scoring/test_dummy_scorer.py b/watcher/tests/decision_engine/scoring/test_dummy_scorer.py deleted file mode 100644 index 43a91c7..0000000 --- a/watcher/tests/decision_engine/scoring/test_dummy_scorer.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from watcher.decision_engine.scoring import dummy_scorer -from watcher.tests import base - - -class TestDummyScorer(base.TestCase): - - def setUp(self): - super(TestDummyScorer, self).setUp() - - def test_metadata(self): - scorer = dummy_scorer.DummyScorer(config=None) - self.assertEqual('dummy_scorer', scorer.get_name()) - self.assertIn('Dummy', scorer.get_description()) - - metainfo = scorer.get_metainfo() - self.assertIn('feature_columns', metainfo) - self.assertIn('result_columns', metainfo) - self.assertIn('workloads', metainfo) - - def test_calculate_score(self): - scorer = dummy_scorer.DummyScorer(config=None) - - self._assert_result(scorer, 0, '[0, 0, 0, 0, 0, 0, 0, 0, 0]') - self._assert_result(scorer, 0, '[50, 0, 0, 600, 0, 0, 0, 0, 0]') - self._assert_result(scorer, 0, '[0, 0, 0, 0, 600, 0, 0, 0, 0]') - self._assert_result(scorer, 1, '[85, 0, 0, 0, 0, 0, 0, 0, 0]') - self._assert_result(scorer, 2, '[0, 0, 0, 1100, 1100, 0, 0, 0, 0]') - self._assert_result(scorer, 3, - '[0, 0, 0, 0, 0, 70000000, 70000000, 0, 0]') - - def _assert_result(self, scorer, expected, features): - result_str = scorer.calculate_score(features) - actual_result = jsonutils.loads(result_str)[0] - self.assertEqual(expected, actual_result) diff --git a/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py b/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py deleted file mode 100644 index 25786b3..0000000 --- a/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils - -from watcher.decision_engine.scoring import dummy_scoring_container -from watcher.tests import base - - -class TestDummyScoringContainer(base.TestCase): - - def setUp(self): - super(TestDummyScoringContainer, self).setUp() - - def test_get_scoring_engine_list(self): - scorers = (dummy_scoring_container.DummyScoringContainer - .get_scoring_engine_list()) - - self.assertEqual(3, len(scorers)) - self.assertEqual('dummy_min_scorer', scorers[0].get_name()) - self.assertEqual('dummy_max_scorer', scorers[1].get_name()) - self.assertEqual('dummy_avg_scorer', scorers[2].get_name()) - - def test_scorers(self): - scorers = (dummy_scoring_container.DummyScoringContainer - .get_scoring_engine_list()) - - self._assert_result(scorers[0], 1.1, '[1.1, 2.2, 4, 8]') - self._assert_result(scorers[1], 8, '[1.1, 2.2, 4, 8]') - # float(1 + 2 + 4 + 8) / 4 = 15.0 / 4 = 3.75 - self._assert_result(scorers[2], 3.75, '[1, 2, 4, 8]') - - def _assert_result(self, scorer, expected, features): - result_str = scorer.calculate_score(features) - actual_result = jsonutils.loads(result_str)[0] - self.assertEqual(expected, actual_result) diff --git a/watcher/tests/decision_engine/scoring/test_scoring_factory.py b/watcher/tests/decision_engine/scoring/test_scoring_factory.py deleted file mode 100644 index dfb79c6..0000000 --- a/watcher/tests/decision_engine/scoring/test_scoring_factory.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel -# -# Authors: Tomasz Kaczynski -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from watcher.decision_engine.scoring import scoring_factory -from watcher.tests import base - - -class TestScoringFactory(base.TestCase): - - def setUp(self): - super(TestScoringFactory, self).setUp() - - def test_get_scoring_engine(self): - scorer = scoring_factory.get_scoring_engine('dummy_scorer') - self.assertEqual('dummy_scorer', scorer.get_name()) - - scorer = scoring_factory.get_scoring_engine('dummy_min_scorer') - self.assertEqual('dummy_min_scorer', scorer.get_name()) - - scorer = scoring_factory.get_scoring_engine('dummy_max_scorer') - self.assertEqual('dummy_max_scorer', scorer.get_name()) - - scorer = scoring_factory.get_scoring_engine('dummy_avg_scorer') - self.assertEqual('dummy_avg_scorer', scorer.get_name()) - - self.assertRaises( - KeyError, - scoring_factory.get_scoring_engine, - 'non_existing_scorer') - - def test_get_scoring_engine_list(self): - scoring_engines = scoring_factory.get_scoring_engine_list() - - engine_names = {'dummy_scorer', 'dummy_min_scorer', - 'dummy_max_scorer', 'dummy_avg_scorer'} - - for scorer in scoring_engines: - self.assertIn(scorer.get_name(), engine_names) diff --git a/watcher/tests/decision_engine/solution/__init__.py b/watcher/tests/decision_engine/solution/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/solution/test_default_solution.py b/watcher/tests/decision_engine/solution/test_default_solution.py deleted file mode 100644 index c0fc839..0000000 --- a/watcher/tests/decision_engine/solution/test_default_solution.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine.solution import default -from watcher.tests import base - - -class TestDefaultSolution(base.TestCase): - - def test_default_solution(self): - solution = default.DefaultSolution( - goal=mock.Mock(), strategy=mock.Mock()) - parameters = { - "source_node": "server1", - "destination_node": "server2", - } - solution.add_action(action_type="nop", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", - input_parameters=parameters) - self.assertEqual(1, len(solution.actions)) - expected_action_type = "nop" - expected_parameters = { - "source_node": "server1", - "destination_node": "server2", - "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" - } - self.assertEqual(expected_action_type, - solution.actions[0].get('action_type')) - self.assertEqual(expected_parameters, - solution.actions[0].get('input_parameters')) - - def test_default_solution_with_no_input_parameters(self): - solution = default.DefaultSolution( - goal=mock.Mock(), strategy=mock.Mock()) - solution.add_action(action_type="nop", - resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36") - self.assertEqual(1, len(solution.actions)) - expected_action_type = "nop" - expected_parameters = { - "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" - } - self.assertEqual(expected_action_type, - solution.actions[0].get('action_type')) - self.assertEqual(expected_parameters, - solution.actions[0].get('input_parameters')) diff --git a/watcher/tests/decision_engine/strategy/__init__.py b/watcher/tests/decision_engine/strategy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/context/__init__.py b/watcher/tests/decision_engine/strategy/context/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/context/test_strategy_context.py b/watcher/tests/decision_engine/strategy/context/test_strategy_context.py deleted file mode 100644 index b6b5c80..0000000 --- a/watcher/tests/decision_engine/strategy/context/test_strategy_context.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import utils -from watcher.decision_engine.model.collector import manager -from watcher.decision_engine.solution import default -from watcher.decision_engine.strategy.context import default as d_strategy_ctx -from watcher.decision_engine.strategy.selection import default as d_selector -from watcher.decision_engine.strategy import strategies -from watcher.tests.db import base -from watcher.tests.objects import utils as obj_utils - - -class TestStrategyContext(base.DbTestCase): - - def setUp(self): - super(TestStrategyContext, self).setUp() - obj_utils.create_test_goal(self.context, id=1, name="DUMMY") - audit_template = obj_utils.create_test_audit_template( - self.context, uuid=utils.generate_uuid()) - self.audit = obj_utils.create_test_audit( - self.context, audit_template_id=audit_template.id) - - strategy_context = d_strategy_ctx.DefaultStrategyContext() - - @mock.patch.object(strategies.DummyStrategy, 'compute_model', - new_callable=mock.PropertyMock) - @mock.patch.object(d_selector.DefaultStrategySelector, 'select') - def test_execute_strategy(self, mock_call, m_model): - m_model.return_value = mock.Mock() - mock_call.return_value = strategies.DummyStrategy( - config=mock.Mock()) - solution = self.strategy_context.execute_strategy( - self.audit, self.context) - self.assertIsInstance(solution, default.DefaultSolution) - - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", - mock.Mock()) - def test_execute_force_dummy(self): - goal = obj_utils.create_test_goal( - self.context, id=50, uuid=utils.generate_uuid(), name="my_goal") - - strategy = obj_utils.create_test_strategy( - self.context, id=42, uuid=utils.generate_uuid(), name="dummy", - goal_id=goal.id) - - audit = obj_utils.create_test_audit( - self.context, - id=2, - goal_id=goal.id, - strategy_id=strategy.id, - uuid=utils.generate_uuid(), - ) - - solution = self.strategy_context.execute_strategy(audit, self.context) - - self.assertEqual(len(solution.actions), 3) - - @mock.patch.object(strategies.BasicConsolidation, "execute") - @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", - mock.Mock()) - def test_execute_force_basic(self, mock_call): - expected_strategy = "basic" - mock_call.return_value = expected_strategy - - obj_utils.create_test_goal(self.context, id=50, - uuid=utils.generate_uuid(), - name="my_goal") - - strategy = obj_utils.create_test_strategy(self.context, - id=42, - uuid=utils.generate_uuid(), - name=expected_strategy) - - audit = obj_utils.create_test_audit( - self.context, - id=2, - strategy_id=strategy.id, - uuid=utils.generate_uuid(), - ) - - solution = self.strategy_context.execute_strategy(audit, self.context) - - self.assertEqual(solution, expected_strategy) diff --git a/watcher/tests/decision_engine/strategy/selector/__init__.py b/watcher/tests/decision_engine/strategy/selector/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py b/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py deleted file mode 100644 index d793cc6..0000000 --- a/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import exception -from watcher.decision_engine.loading import default as default_loader -from watcher.decision_engine.strategy.selection import ( - default as default_selector) -from watcher.decision_engine.strategy import strategies -from watcher.tests import base - - -class TestStrategySelector(base.TestCase): - - def setUp(self): - super(TestStrategySelector, self).setUp() - - @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') - def test_select_with_strategy_name(self, m_load): - expected_goal = 'dummy' - expected_strategy = "dummy" - strategy_selector = default_selector.DefaultStrategySelector( - expected_goal, expected_strategy, osc=None) - strategy_selector.select() - m_load.assert_called_once_with(expected_strategy, osc=None) - - @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') - @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') - def test_select_with_goal_name_only(self, m_list_available, m_load): - m_list_available.return_value = {"dummy": strategies.DummyStrategy} - expected_goal = 'dummy' - expected_strategy = "dummy" - strategy_selector = default_selector.DefaultStrategySelector( - expected_goal, osc=None) - strategy_selector.select() - m_load.assert_called_once_with(expected_strategy, osc=None) - - def test_select_non_existing_strategy(self): - strategy_selector = default_selector.DefaultStrategySelector( - "dummy", "NOT_FOUND") - self.assertRaises(exception.LoadingError, strategy_selector.select) - - @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') - def test_select_no_available_strategy_for_goal(self, m_list_available): - m_list_available.return_value = {} - strategy_selector = default_selector.DefaultStrategySelector("dummy") - self.assertRaises(exception.NoAvailableStrategyForGoal, - strategy_selector.select) diff --git a/watcher/tests/decision_engine/strategy/strategies/__init__.py b/watcher/tests/decision_engine/strategy/strategies/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py deleted file mode 100644 index 9f3cf42..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Authors: Jean-Emile DARTOIS -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import copy -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import clients -from watcher.common import exception -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics -from watcher.tests.decision_engine.model import monasca_metrics - - -class TestBasicConsolidation(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Monasca", - {"datasource": "monasca", - "fake_datasource_cls": monasca_metrics.FakeMonascaMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestBasicConsolidation, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_osc = mock.patch.object( - clients, "OpenStackClients") - self.m_osc = p_osc.start() - self.addCleanup(p_osc.stop) - - p_model = mock.patch.object( - strategies.BasicConsolidation, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.BasicConsolidation, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.BasicConsolidation, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.BasicConsolidation( - config=mock.Mock(datasource=self.datasource)) - - def test_cluster_size(self): - size_cluster = len( - self.fake_cluster.generate_scenario_1().get_all_compute_nodes()) - size_cluster_assert = 5 - self.assertEqual(size_cluster_assert, size_cluster) - - def test_basic_consolidation_score_node(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - node_1_score = 0.023333333333333317 - self.assertEqual(node_1_score, self.strategy.calculate_score_node( - model.get_node_by_uuid("Node_1"))) - node_2_score = 0.26666666666666666 - self.assertEqual(node_2_score, self.strategy.calculate_score_node( - model.get_node_by_uuid("Node_2"))) - node_0_score = 0.023333333333333317 - self.assertEqual(node_0_score, self.strategy.calculate_score_node( - model.get_node_by_uuid("Node_0"))) - - def test_basic_consolidation_score_instance(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - instance_0_score = 0.023333333333333317 - self.assertEqual( - instance_0_score, - self.strategy.calculate_score_instance(instance_0)) - - instance_1 = model.get_instance_by_uuid("INSTANCE_1") - instance_1_score = 0.023333333333333317 - self.assertEqual( - instance_1_score, - self.strategy.calculate_score_instance(instance_1)) - instance_2 = model.get_instance_by_uuid("INSTANCE_2") - instance_2_score = 0.033333333333333326 - self.assertEqual( - instance_2_score, - self.strategy.calculate_score_instance(instance_2)) - instance_6 = model.get_instance_by_uuid("INSTANCE_6") - instance_6_score = 0.02666666666666669 - self.assertEqual( - instance_6_score, - self.strategy.calculate_score_instance(instance_6)) - instance_7 = model.get_instance_by_uuid("INSTANCE_7") - instance_7_score = 0.013333333333333345 - self.assertEqual( - instance_7_score, - self.strategy.calculate_score_instance(instance_7)) - - def test_basic_consolidation_score_instance_disk(self): - model = self.fake_cluster.generate_scenario_5_with_instance_disk_0() - self.m_model.return_value = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - instance_0_score = 0.023333333333333355 - self.assertEqual( - instance_0_score, - self.strategy.calculate_score_instance(instance_0)) - - def test_basic_consolidation_weight(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - cores = 16 - # 80 Go - disk = 80 - # mem 8 Go - mem = 8 - instance_0_weight_assert = 3.1999999999999997 - self.assertEqual( - instance_0_weight_assert, - self.strategy.calculate_weight(instance_0, cores, disk, mem)) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_check_migration(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - - all_instances = model.get_all_instances() - all_nodes = model.get_all_compute_nodes() - instance0 = all_instances[list(all_instances.keys())[0]] - node0 = all_nodes[list(all_nodes.keys())[0]] - - self.strategy.check_migration(node0, node0, instance0) - - def test_threshold(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - - all_nodes = model.get_all_compute_nodes() - node0 = all_nodes[list(all_nodes.keys())[0]] - - self.assertFalse(self.strategy.check_threshold( - node0, 1000, 1000, 1000)) - - def test_basic_consolidation_works_on_model_copy(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = copy.deepcopy(model) - - self.assertTrue(model_root.ModelRoot.is_isomorphic( - model, self.strategy.compute_model)) - self.assertIsNot(model, self.strategy.compute_model) - - def test_basic_consolidation_migration(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - - solution = self.strategy.execute() - - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - expected_num_migrations = 1 - expected_power_state = 1 - - num_migrations = actions_counter.get("migrate", 0) - num_node_state_change = actions_counter.get( - "change_nova_service_state", 0) - self.assertEqual(expected_num_migrations, num_migrations) - self.assertEqual(expected_power_state, num_node_state_change) - - def test_basic_consolidation_execute_scenario_8_with_4_nodes(self): - model = self.fake_cluster.generate_scenario_8_with_4_nodes() - self.m_model.return_value = model - - solution = self.strategy.execute() - - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - expected_num_migrations = 5 - expected_power_state = 3 - expected_global_efficacy = 75 - - num_migrations = actions_counter.get("migrate", 0) - num_node_state_change = actions_counter.get( - "change_nova_service_state", 0) - - global_efficacy_value = solution.global_efficacy.get("value", 0) - - self.assertEqual(expected_num_migrations, num_migrations) - self.assertEqual(expected_power_state, num_node_state_change) - self.assertEqual(expected_global_efficacy, global_efficacy_value) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - # calculate_weight - def test_execute_no_workload(self): - model = ( - self.fake_cluster - .generate_scenario_4_with_1_node_no_instance()) - self.m_model.return_value = model - - with mock.patch.object( - strategies.BasicConsolidation, 'calculate_weight' - ) as mock_score_call: - mock_score_call.return_value = 0 - solution = self.strategy.execute() - self.assertEqual(0, solution.efficacy.global_efficacy.value) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - node_1 = model.get_node_by_uuid("Node_1") - p_ceilometer = mock.patch.object( - strategies.BasicConsolidation, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_monasca = mock.patch.object(strategies.BasicConsolidation, "monasca") - m_monasca = p_monasca.start() - self.addCleanup(p_monasca.stop) - p_gnocchi = mock.patch.object(strategies.BasicConsolidation, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_monasca.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_gnocchi.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy.calculate_score_node(node_1) - resource_id = "%s_%s" % (node_1.uuid, node_1.hostname) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='compute.node.cpu.percent', - period=7200, resource_id=resource_id) - elif self.strategy.config.datasource == "monasca": - m_monasca.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='cpu.percent', - period=7200, dimensions={'hostname': 'Node_1'}) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('7200')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=resource_id, metric='compute.node.cpu.percent', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') - - self.strategy.input_parameters.update({"period": 600}) - self.strategy.calculate_score_node(node_1) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='compute.node.cpu.percent', - period=600, resource_id=resource_id) - elif self.strategy.config.datasource == "monasca": - m_monasca.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='cpu.percent', - period=600, dimensions={'hostname': 'Node_1'}) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('600')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=resource_id, metric='compute.node.cpu.percent', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py b/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py deleted file mode 100644 index 9ce0a7c..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.applier.loading import default -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestDummyStrategy(base.TestCase): - - def setUp(self): - super(TestDummyStrategy, self).setUp() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.DummyStrategy, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_audit_scope = mock.patch.object( - strategies.DummyStrategy, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.strategy = strategies.DummyStrategy(config=mock.Mock()) - - self.m_model.return_value = model_root.ModelRoot() - self.strategy = strategies.DummyStrategy(config=mock.Mock()) - - def test_dummy_strategy(self): - dummy = strategies.DummyStrategy(config=mock.Mock()) - dummy.input_parameters = utils.Struct() - dummy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) - solution = dummy.execute() - self.assertEqual(3, len(solution.actions)) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() diff --git a/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py b/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py deleted file mode 100644 index cb6fa28..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.applier.loading import default -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestDummyWithScorer(base.TestCase): - - def setUp(self): - super(TestDummyWithScorer, self).setUp() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.DummyWithScorer, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - self.m_model.return_value = model_root.ModelRoot() - self.strategy = strategies.DummyWithScorer(config=mock.Mock()) - - def test_dummy_with_scorer(self): - dummy = strategies.DummyWithScorer(config=mock.Mock()) - dummy.input_parameters = utils.Struct() - dummy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) - solution = dummy.execute() - self.assertEqual(4, len(solution.actions)) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() diff --git a/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py b/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py deleted file mode 100644 index 349ce1b..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Intel Corp -# -# Authors: Prudhvi Rao Shedimbi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestNoisyNeighbor(base.TestCase): - - def setUp(self): - super(TestNoisyNeighbor, self).setUp() - # fake metrics - self.fake_metrics = ceilometer_metrics.FakeCeilometerMetrics() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.NoisyNeighbor, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_ceilometer = mock.patch.object( - strategies.NoisyNeighbor, "ceilometer", - new_callable=mock.PropertyMock) - self.m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - - p_audit_scope = mock.patch.object( - strategies.NoisyNeighbor, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics_nn) - self.strategy = strategies.NoisyNeighbor(config=mock.Mock()) - - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'cache_threshold': 35}) - self.strategy.threshold = 35 - self.strategy.input_parameters.update({'period': 100}) - self.strategy.threshold = 100 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = self.strategy.calc_used_resource( - node) - - self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used)) - - def test_group_hosts(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - node_uuid = 'Node_1' - n1, n2 = self.strategy.group_hosts() - self.assertTrue(node_uuid in n1) - self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3') - self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4') - self.assertEqual('Node_0', n2[0].uuid) - - def test_find_priority_instance(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - potential_prio_inst = model.get_instance_by_uuid('INSTANCE_3') - inst_res = self.strategy.find_priority_instance(potential_prio_inst) - self.assertEqual('INSTANCE_3', inst_res.uuid) - - def test_find_noisy_instance(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - potential_noisy_inst = model.get_instance_by_uuid('INSTANCE_4') - inst_res = self.strategy.find_noisy_instance(potential_noisy_inst) - self.assertEqual('INSTANCE_4', inst_res.uuid) - - def test_filter_destination_hosts(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - n1, n2 = self.strategy.group_hosts() - mig_source_node = max(n1.keys(), key=lambda a: - n1[a]['priority_vm']) - instance_to_mig = n1[mig_source_node]['noisy_vm'] - dest_hosts = self.strategy.filter_dest_servers( - n2, instance_to_mig) - - self.assertEqual(1, len(dest_hosts)) - self.assertEqual('Node_0', dest_hosts[0].uuid) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - self.strategy.cache_threshold = 35 - self.strategy.period = 100 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(1, num_migrations) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() diff --git a/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py b/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py deleted file mode 100644 index 596bbbc..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 Intel Corp -# -# Authors: Zhenzan Zhou -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestOutletTempControl(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestOutletTempControl, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.OutletTempControl, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.OutletTempControl, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.OutletTempControl, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.OutletTempControl( - config=mock.Mock(datasource=self.datasource)) - - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'threshold': 34.3}) - self.strategy.threshold = 34.3 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = self.strategy.calc_used_resource( - node) - - self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used)) - - def test_group_hosts_by_outlet_temp(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - n1, n2 = self.strategy.group_hosts_by_outlet_temp() - self.assertEqual('Node_1', n1[0]['node'].uuid) - self.assertEqual('Node_0', n2[0]['node'].uuid) - - def test_choose_instance_to_migrate(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - n1, n2 = self.strategy.group_hosts_by_outlet_temp() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - self.assertEqual('Node_1', instance_to_mig[0].uuid) - self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517', - instance_to_mig[1].uuid) - - def test_filter_dest_servers(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - n1, n2 = self.strategy.group_hosts_by_outlet_temp() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1]) - self.assertEqual(1, len(dest_hosts)) - self.assertEqual('Node_0', dest_hosts[0]['node'].uuid) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(1, num_migrations) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_3_with_2_nodes() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.OutletTempControl, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.OutletTempControl, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - m_gnocchi.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - node = model.get_node_by_uuid('Node_0') - self.strategy.input_parameters.update({'threshold': 35.0}) - self.strategy.threshold = 35.0 - self.strategy.group_hosts_by_outlet_temp() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', - meter_name='hardware.ipmi.node.outlet_temperature', - period=30, resource_id=node.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('30')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, - metric='hardware.ipmi.node.outlet_temperature', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py b/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py deleted file mode 100644 index 63076a1..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestUniformAirflow(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestUniformAirflow, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.UniformAirflow, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.UniformAirflow, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.UniformAirflow, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.UniformAirflow( - config=mock.Mock(datasource=self.datasource)) - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'threshold_airflow': 400.0, - 'threshold_inlet_t': 28.0, - 'threshold_power': 350.0, - 'period': 300}) - self.strategy.threshold_airflow = 400 - self.strategy.threshold_inlet_t = 28 - self.strategy.threshold_power = 350 - self._period = 300 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = ( - self.strategy.calculate_used_resource(node)) - self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40)) - - def test_group_hosts_by_airflow(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - n1, n2 = self.strategy.group_hosts_by_airflow() - # print n1, n2, avg, w_map - self.assertEqual(n1[0]['node'].uuid, 'Node_0') - self.assertEqual(n2[0]['node'].uuid, 'Node_1') - - def test_choose_instance_to_migrate(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 22 - n1, n2 = self.strategy.group_hosts_by_airflow() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - - self.assertEqual(instance_to_mig[0].uuid, 'Node_0') - self.assertEqual(len(instance_to_mig[1]), 1) - self.assertIn(instance_to_mig[1][0].uuid, - {'cae81432-1631-4d4e-b29c-6f3acdcde906', - '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) - - def test_choose_instance_to_migrate_all(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 25 - n1, n2 = self.strategy.group_hosts_by_airflow() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - - self.assertEqual(instance_to_mig[0].uuid, 'Node_0') - self.assertEqual(len(instance_to_mig[1]), 2) - self.assertEqual({'cae81432-1631-4d4e-b29c-6f3acdcde906', - '73b09e16-35b7-4922-804e-e8f5d9b740fc'}, - {inst.uuid for inst in instance_to_mig[1]}) - - def test_choose_instance_notfound(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 22 - n1, n2 = self.strategy.group_hosts_by_airflow() - instances = model.get_all_instances() - [model.remove_instance(inst) for inst in instances.values()] - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - self.assertIsNone(instance_to_mig) - - def test_filter_destination_hosts(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 22 - n1, n2 = self.strategy.group_hosts_by_airflow() - instance_to_mig = self.strategy.choose_instance_to_migrate(n1) - dest_hosts = self.strategy.filter_destination_hosts( - n2, instance_to_mig[1]) - - self.assertEqual(len(dest_hosts), 1) - self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1') - self.assertIn(instance_to_mig[1][0].uuid, - {'cae81432-1631-4d4e-b29c-6f3acdcde906', - '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 25 - self.strategy.threshold_power = 300 - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - self.strategy.threshold_airflow = 300 - self.strategy.threshold_inlet_t = 25 - self.strategy.threshold_power = 300 - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(num_migrations, 2) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_7_with_2_nodes() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.UniformAirflow, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.UniformAirflow, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - m_gnocchi.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics) - self.strategy.group_hosts_by_airflow() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', meter_name='hardware.ipmi.node.airflow', - period=300, resource_id=mock.ANY) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('300')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, metric='hardware.ipmi.node.airflow', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py deleted file mode 100644 index 0f83824..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py +++ /dev/null @@ -1,346 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Authors: Vojtech CIMA -# Bruno GRAZIOLI -# Sean MURPHY -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import datetime -import mock - -from watcher.common import exception -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_and_metrics - - -class TestVMWorkloadConsolidation(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": - faker_cluster_and_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": - faker_cluster_and_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestVMWorkloadConsolidation, self).setUp() - - # fake cluster - self.fake_cluster = faker_cluster_and_metrics.FakerModelCollector() - - p_model = mock.patch.object( - strategies.VMWorkloadConsolidation, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.VMWorkloadConsolidation, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.VMWorkloadConsolidation, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - - # fake metrics - self.fake_metrics = self.fake_datasource_cls( - self.m_model.return_value) - - self.m_model.return_value = model_root.ModelRoot() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - self.strategy = strategies.VMWorkloadConsolidation( - config=mock.Mock(datasource=self.datasource)) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_get_instance_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - instance_0 = model.get_instance_by_uuid("INSTANCE_0") - instance_util = dict(cpu=1.0, ram=1, disk=10) - self.assertEqual( - instance_util, - self.strategy.get_instance_utilization(instance_0)) - - def test_get_node_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - node_0 = model.get_node_by_uuid("Node_0") - node_util = dict(cpu=1.0, ram=1, disk=10) - self.assertEqual( - node_util, - self.strategy.get_node_utilization(node_0)) - - def test_get_node_capacity(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - node_0 = model.get_node_by_uuid("Node_0") - node_util = dict(cpu=40, ram=64, disk=250) - self.assertEqual(node_util, self.strategy.get_node_capacity(node_0)) - - def test_get_relative_node_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - node = model.get_node_by_uuid('Node_0') - rhu = self.strategy.get_relative_node_utilization(node) - expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025} - self.assertEqual(expected_rhu, rhu) - - def test_get_relative_cluster_utilization(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - cru = self.strategy.get_relative_cluster_utilization() - expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375} - self.assertEqual(expected_cru, cru) - - def test_add_migration(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - instance_uuid = 'INSTANCE_0' - instance = model.get_instance_by_uuid(instance_uuid) - self.strategy.add_migration(instance, n1, n2) - self.assertEqual(1, len(self.strategy.solution.actions)) - expected = {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': instance_uuid}} - self.assertEqual(expected, self.strategy.solution.actions[0]) - - def test_is_overloaded(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.is_overloaded(n1, cc) - self.assertFalse(res) - - cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.is_overloaded(n1, cc) - self.assertFalse(res) - - cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.is_overloaded(n1, cc) - self.assertTrue(res) - - def test_instance_fits(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n = model.get_node_by_uuid('Node_1') - instance0 = model.get_instance_by_uuid('INSTANCE_0') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.instance_fits(instance0, n, cc) - self.assertTrue(res) - - cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} - res = self.strategy.instance_fits(instance0, n, cc) - self.assertFalse(res) - - def test_add_action_enable_compute_node(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n = model.get_node_by_uuid('Node_0') - self.strategy.add_action_enable_compute_node(n) - expected = [{'action_type': 'change_nova_service_state', - 'input_parameters': {'state': 'enabled', - 'resource_id': 'Node_0'}}] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_add_action_disable_node(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n = model.get_node_by_uuid('Node_0') - self.strategy.add_action_disable_node(n) - expected = [{'action_type': 'change_nova_service_state', - 'input_parameters': {'state': 'disabled', - 'resource_id': 'Node_0'}}] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_disable_unused_nodes(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - instance_uuid = 'INSTANCE_0' - instance = model.get_instance_by_uuid(instance_uuid) - self.strategy.disable_unused_nodes() - self.assertEqual(0, len(self.strategy.solution.actions)) - - # Migrate VM to free the node - self.strategy.add_migration(instance, n1, n2) - - self.strategy.disable_unused_nodes() - expected = {'action_type': 'change_nova_service_state', - 'input_parameters': {'state': 'disabled', - 'resource_id': 'Node_0'}} - self.assertEqual(2, len(self.strategy.solution.actions)) - self.assertEqual(expected, self.strategy.solution.actions[1]) - - def test_offload_phase(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.offload_phase(cc) - expected = [] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_consolidation_phase(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - instance_uuid = 'INSTANCE_0' - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.consolidation_phase(cc) - expected = [{'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': instance_uuid}}] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_strategy(self): - model = self.fake_cluster.generate_scenario_2() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.offload_phase(cc) - self.strategy.consolidation_phase(cc) - self.strategy.optimize_solution() - n2 = self.strategy.solution.actions[0][ - 'input_parameters']['destination_node'] - expected = [{'action_type': 'migrate', - 'input_parameters': {'destination_node': n2, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_3'}}, - {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2, - 'source_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_1'}}] - - self.assertEqual(expected, self.strategy.solution.actions) - - def test_strategy2(self): - model = self.fake_cluster.generate_scenario_3() - self.m_model.return_value = model - self.fake_metrics.model = model - n1 = model.get_node_by_uuid('Node_0') - n2 = model.get_node_by_uuid('Node_1') - cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} - self.strategy.offload_phase(cc) - expected = [{'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_6', - 'source_node': n1.uuid}}, - {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_7', - 'source_node': n1.uuid}}, - {'action_type': 'migrate', - 'input_parameters': {'destination_node': n2.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_8', - 'source_node': n1.uuid}}] - self.assertEqual(expected, self.strategy.solution.actions) - self.strategy.consolidation_phase(cc) - expected.append({'action_type': 'migrate', - 'input_parameters': {'destination_node': n1.uuid, - 'migration_type': 'live', - 'resource_id': 'INSTANCE_7', - 'source_node': n2.uuid}}) - self.assertEqual(expected, self.strategy.solution.actions) - self.strategy.optimize_solution() - del expected[3] - del expected[1] - self.assertEqual(expected, self.strategy.solution.actions) - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.VMWorkloadConsolidation, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object( - strategies.VMWorkloadConsolidation, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_gnocchi.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - instance0 = model.get_instance_by_uuid("INSTANCE_0") - self.strategy.get_instance_utilization(instance0) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', meter_name='disk.root.size', - period=3600, resource_id=instance0.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('3600')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=instance0.uuid, metric='disk.root.size', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py b/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py deleted file mode 100644 index 36e06e6..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Intel Corp -# -# Authors: Junjie-Huang -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import collections -import datetime -import mock - -from watcher.applier.loading import default -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestWorkloadBalance(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestWorkloadBalance, self).setUp() - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - p_model = mock.patch.object( - strategies.WorkloadBalance, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.WorkloadBalance, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.WorkloadBalance, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_audit_scope.return_value = mock.Mock() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) - self.strategy = strategies.WorkloadBalance( - config=mock.Mock(datasource=self.datasource)) - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update({'threshold': 25.0, - 'period': 300}) - self.strategy.threshold = 25.0 - self.strategy._period = 300 - - def test_calc_used_resource(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - node = model.get_node_by_uuid('Node_0') - cores_used, mem_used, disk_used = ( - self.strategy.calculate_used_resource(node)) - - self.assertEqual((cores_used, mem_used, disk_used), (20, 4, 40)) - - def test_group_hosts_by_cpu_util(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - self.strategy.threshold = 30 - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - self.assertEqual(n1[0]['node'].uuid, 'Node_0') - self.assertEqual(n2[0]['node'].uuid, 'Node_1') - self.assertEqual(avg, 8.0) - - def test_choose_instance_to_migrate(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - instance_to_mig = self.strategy.choose_instance_to_migrate( - n1, avg, w_map) - self.assertEqual(instance_to_mig[0].uuid, 'Node_0') - self.assertEqual(instance_to_mig[1].uuid, - "73b09e16-35b7-4922-804e-e8f5d9b740fc") - - def test_choose_instance_notfound(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - instances = model.get_all_instances() - [model.remove_instance(inst) for inst in instances.values()] - instance_to_mig = self.strategy.choose_instance_to_migrate( - n1, avg, w_map) - self.assertIsNone(instance_to_mig) - - def test_filter_destination_hosts(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - self.strategy.datasource = mock.MagicMock( - statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) - n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_util() - instance_to_mig = self.strategy.choose_instance_to_migrate( - n1, avg, w_map) - dest_hosts = self.strategy.filter_destination_hosts( - n2, instance_to_mig[1], avg, w_map) - self.assertEqual(len(dest_hosts), 1) - self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1') - - def test_exception_model(self): - self.m_model.return_value = None - self.assertRaises( - exception.ClusterStateNotDefined, self.strategy.execute) - - def test_exception_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_exception_stale_cdm(self): - self.fake_cluster.set_cluster_data_model_as_stale() - self.m_model.return_value = self.fake_cluster.cluster_data_model - - self.assertRaises( - exception.ClusterStateNotDefined, - self.strategy.execute) - - def test_execute_cluster_empty(self): - model = model_root.ModelRoot() - self.m_model.return_value = model - self.assertRaises(exception.ClusterEmpty, self.strategy.execute) - - def test_execute_no_workload(self): - model = self.fake_cluster.generate_scenario_4_with_1_node_no_instance() - self.m_model.return_value = model - solution = self.strategy.execute() - self.assertEqual([], solution.actions) - - def test_execute(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - actions_counter = collections.Counter( - [action.get('action_type') for action in solution.actions]) - - num_migrations = actions_counter.get("migrate", 0) - self.assertEqual(num_migrations, 1) - - def test_check_parameters(self): - model = self.fake_cluster.generate_scenario_6_with_2_nodes() - self.m_model.return_value = model - solution = self.strategy.execute() - loader = default.DefaultActionLoader() - for action in solution.actions: - loaded_action = loader.load(action['action_type']) - loaded_action.input_parameters = action['input_parameters'] - loaded_action.validate_parameters() - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.WorkloadBalance, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.WorkloadBalance, "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics_wb) - m_gnocchi.statistic_aggregation = mock.Mock( - side_effect=self.fake_metrics.mock_get_statistics_wb) - instance0 = model.get_instance_by_uuid("INSTANCE_0") - self.strategy.group_hosts_by_cpu_util() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_any_call( - aggregate='avg', meter_name='cpu_util', - period=300, resource_id=instance0.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('300')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, metric='cpu_util', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') diff --git a/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py b/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py deleted file mode 100644 index 8c9e656..0000000 --- a/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py +++ /dev/null @@ -1,277 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica LLC -# -# Authors: Alexander Chadin -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import datetime -import mock - -from watcher.common import clients -from watcher.common import utils -from watcher.decision_engine.model import model_root -from watcher.decision_engine.strategy import strategies -from watcher.tests import base -from watcher.tests.decision_engine.model import ceilometer_metrics -from watcher.tests.decision_engine.model import faker_cluster_state -from watcher.tests.decision_engine.model import gnocchi_metrics - - -class TestWorkloadStabilization(base.TestCase): - - scenarios = [ - ("Ceilometer", - {"datasource": "ceilometer", - "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), - ("Gnocchi", - {"datasource": "gnocchi", - "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), - ] - - def setUp(self): - super(TestWorkloadStabilization, self).setUp() - - # fake metrics - self.fake_metrics = self.fake_datasource_cls() - - # fake cluster - self.fake_cluster = faker_cluster_state.FakerModelCollector() - - self.hosts_load_assert = { - 'Node_0': {'cpu_util': 0.07, 'memory.resident': 7.0, 'vcpus': 40}, - 'Node_1': {'cpu_util': 0.07, 'memory.resident': 5, 'vcpus': 40}, - 'Node_2': {'cpu_util': 0.8, 'memory.resident': 29, 'vcpus': 40}, - 'Node_3': {'cpu_util': 0.05, 'memory.resident': 8, 'vcpus': 40}, - 'Node_4': {'cpu_util': 0.05, 'memory.resident': 4, 'vcpus': 40}} - - p_osc = mock.patch.object( - clients, "OpenStackClients") - self.m_osc = p_osc.start() - self.addCleanup(p_osc.stop) - - p_model = mock.patch.object( - strategies.WorkloadStabilization, "compute_model", - new_callable=mock.PropertyMock) - self.m_model = p_model.start() - self.addCleanup(p_model.stop) - - p_datasource = mock.patch.object( - strategies.WorkloadStabilization, self.datasource, - new_callable=mock.PropertyMock) - self.m_datasource = p_datasource.start() - self.addCleanup(p_datasource.stop) - - p_audit_scope = mock.patch.object( - strategies.WorkloadStabilization, "audit_scope", - new_callable=mock.PropertyMock - ) - self.m_audit_scope = p_audit_scope.start() - self.addCleanup(p_audit_scope.stop) - - self.m_model.return_value = model_root.ModelRoot() - self.m_audit_scope.return_value = mock.Mock() - self.m_datasource.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - - self.strategy = strategies.WorkloadStabilization( - config=mock.Mock(datasource=self.datasource)) - self.strategy.input_parameters = utils.Struct() - self.strategy.input_parameters.update( - {'metrics': ["cpu_util", "memory.resident"], - 'thresholds': {"cpu_util": 0.2, "memory.resident": 0.2}, - 'weights': {"cpu_util_weight": 1.0, - "memory.resident_weight": 1.0}, - 'instance_metrics': - {"cpu_util": "compute.node.cpu.percent", - "memory.resident": "hardware.memory.used"}, - 'host_choice': 'retry', - 'retry_count': 1, - 'periods': {"instance": 720, "node": 600}}) - self.strategy.metrics = ["cpu_util", "memory.resident"] - self.strategy.thresholds = {"cpu_util": 0.2, "memory.resident": 0.2} - self.strategy.weights = {"cpu_util_weight": 1.0, - "memory.resident_weight": 1.0} - self.strategy.instance_metrics = { - "cpu_util": "compute.node.cpu.percent", - "memory.resident": "hardware.memory.used"} - self.strategy.host_choice = 'retry' - self.strategy.retry_count = 1 - self.strategy.periods = {"instance": 720, "node": 600} - - def test_get_instance_load(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance0 = model.get_instance_by_uuid("INSTANCE_0") - instance_0_dict = { - 'uuid': 'INSTANCE_0', 'vcpus': 10, - 'cpu_util': 0.07, 'memory.resident': 2} - self.assertEqual( - instance_0_dict, self.strategy.get_instance_load(instance0)) - - def test_periods(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - p_ceilometer = mock.patch.object( - strategies.WorkloadStabilization, "ceilometer") - m_ceilometer = p_ceilometer.start() - self.addCleanup(p_ceilometer.stop) - p_gnocchi = mock.patch.object(strategies.WorkloadStabilization, - "gnocchi") - m_gnocchi = p_gnocchi.start() - self.addCleanup(p_gnocchi.stop) - datetime_patcher = mock.patch.object( - datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = datetime.datetime( - 2017, 3, 19, 18, 53, 11, 657417) - self.addCleanup(datetime_patcher.stop) - m_ceilometer.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - m_gnocchi.return_value = mock.Mock( - statistic_aggregation=self.fake_metrics.mock_get_statistics) - instance0 = model.get_instance_by_uuid("INSTANCE_0") - self.strategy.get_instance_load(instance0) - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='min', meter_name='memory.resident', - period=720, resource_id=instance0.uuid) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('720')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=instance0.uuid, metric='memory.resident', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') - self.strategy.get_hosts_load() - if self.strategy.config.datasource == "ceilometer": - m_ceilometer.statistic_aggregation.assert_called_with( - aggregate='avg', meter_name='hardware.memory.used', - period=600, resource_id=mock.ANY) - elif self.strategy.config.datasource == "gnocchi": - stop_time = datetime.datetime.utcnow() - start_time = stop_time - datetime.timedelta( - seconds=int('600')) - m_gnocchi.statistic_aggregation.assert_called_with( - resource_id=mock.ANY, metric='hardware.memory.used', - granularity=300, start_time=start_time, stop_time=stop_time, - aggregation='mean') - - def test_normalize_hosts_load(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7}, - 'Node_1': {'cpu_util': 0.05, 'memory.resident': 5}} - normalized_hosts = {'Node_0': - {'cpu_util': 0.07, - 'memory.resident': 0.05303030303030303}, - 'Node_1': - {'cpu_util': 0.05, - 'memory.resident': 0.03787878787878788}} - self.assertEqual( - normalized_hosts, - self.strategy.normalize_hosts_load(fake_hosts)) - - def test_get_available_nodes(self): - self.m_model.return_value = self.fake_cluster. \ - generate_scenario_9_with_3_active_plus_1_disabled_nodes() - self.assertEqual(3, len(self.strategy.get_available_nodes())) - - def test_get_hosts_load(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.assertEqual(self.strategy.get_hosts_load(), - self.hosts_load_assert) - - def test_get_sd(self): - test_cpu_sd = 0.296 - test_ram_sd = 9.3 - self.assertEqual( - round(self.strategy.get_sd( - self.hosts_load_assert, 'cpu_util'), 3), - test_cpu_sd) - self.assertEqual( - round(self.strategy.get_sd( - self.hosts_load_assert, 'memory.resident'), 1), - test_ram_sd) - - def test_calculate_weighted_sd(self): - sd_case = [0.5, 0.75] - self.assertEqual(self.strategy.calculate_weighted_sd(sd_case), 1.25) - - def test_calculate_migration_case(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - instance = model.get_instance_by_uuid("INSTANCE_5") - src_node = model.get_node_by_uuid("Node_2") - dst_node = model.get_node_by_uuid("Node_1") - result = self.strategy.calculate_migration_case( - self.hosts_load_assert, instance, - src_node, dst_node)[-1][dst_node.uuid] - result['cpu_util'] = round(result['cpu_util'], 3) - self.assertEqual(result, {'cpu_util': 0.095, 'memory.resident': 21.0, - 'vcpus': 40}) - - def test_simulate_migrations(self): - model = self.fake_cluster.generate_scenario_1() - self.m_model.return_value = model - self.strategy.host_choice = 'retry' - self.assertEqual( - 8, - len(self.strategy.simulate_migrations(self.hosts_load_assert))) - - def test_check_threshold(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2} - self.strategy.simulate_migrations = mock.Mock(return_value=True) - self.assertTrue(self.strategy.check_threshold()) - - def test_execute_one_migration(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.001, 'memory.resident': 0.2} - self.strategy.simulate_migrations = mock.Mock( - return_value=[ - {'instance': 'INSTANCE_4', 's_host': 'Node_2', - 'host': 'Node_1'}] - ) - with mock.patch.object(self.strategy, 'migrate') as mock_migration: - self.strategy.do_execute() - mock_migration.assert_called_once_with( - 'INSTANCE_4', 'Node_2', 'Node_1') - - def test_execute_multiply_migrations(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.00001, - 'memory.resident': 0.0001} - self.strategy.simulate_migrations = mock.Mock( - return_value=[ - {'instance': 'INSTANCE_4', 's_host': 'Node_2', - 'host': 'Node_1'}, - {'instance': 'INSTANCE_3', 's_host': 'Node_2', - 'host': 'Node_3'}] - ) - with mock.patch.object(self.strategy, 'migrate') as mock_migrate: - self.strategy.do_execute() - self.assertEqual(mock_migrate.call_count, 2) - - def test_execute_nothing_to_migrate(self): - self.m_model.return_value = self.fake_cluster.generate_scenario_1() - self.strategy.thresholds = {'cpu_util': 0.042, - 'memory.resident': 0.0001} - self.strategy.simulate_migrations = mock.Mock(return_value=False) - with mock.patch.object(self.strategy, 'migrate') as mock_migrate: - self.strategy.execute() - mock_migrate.assert_not_called() diff --git a/watcher/tests/decision_engine/test_gmr.py b/watcher/tests/decision_engine/test_gmr.py deleted file mode 100644 index d686a6d..0000000 --- a/watcher/tests/decision_engine/test_gmr.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.decision_engine import gmr -from watcher.decision_engine.model.collector import manager -from watcher.tests import base - - -class TestGmrPlugin(base.TestCase): - - @mock.patch.object(manager.CollectorManager, "get_collectors") - def test_show_models(self, m_get_collectors): - m_to_string = mock.Mock(return_value="") - m_get_collectors.return_value = { - "test_model": mock.Mock( - cluster_data_model=mock.Mock(to_string=m_to_string))} - output = gmr.show_models() - self.assertEqual(1, m_to_string.call_count) - self.assertIn("", output) diff --git a/watcher/tests/decision_engine/test_rpcapi.py b/watcher/tests/decision_engine/test_rpcapi.py deleted file mode 100644 index 61b9c2f..0000000 --- a/watcher/tests/decision_engine/test_rpcapi.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2015 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import oslo_messaging as om -from watcher.common import exception -from watcher.common import utils -from watcher.decision_engine import rpcapi -from watcher.tests import base - - -class TestDecisionEngineAPI(base.TestCase): - - def setUp(self): - super(TestDecisionEngineAPI, self).setUp() - - api = rpcapi.DecisionEngineAPI() - - def test_get_api_version(self): - with mock.patch.object(om.RPCClient, 'call') as mock_call: - expected_context = self.context - self.api.check_api_version(expected_context) - mock_call.assert_called_once_with( - expected_context, 'check_api_version', - api_version=rpcapi.DecisionEngineAPI().api_version) - - def test_execute_audit_throw_exception(self): - audit_uuid = "uuid" - self.assertRaises(exception.InvalidUuidOrName, - self.api.trigger_audit, - audit_uuid) - - def test_execute_audit_without_error(self): - with mock.patch.object(om.RPCClient, 'cast') as mock_cast: - audit_uuid = utils.generate_uuid() - self.api.trigger_audit(self.context, audit_uuid) - mock_cast.assert_called_once_with( - self.context, 'trigger_audit', audit_uuid=audit_uuid) diff --git a/watcher/tests/decision_engine/test_scheduling.py b/watcher/tests/decision_engine/test_scheduling.py deleted file mode 100644 index d4a057c..0000000 --- a/watcher/tests/decision_engine/test_scheduling.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Authors: Vincent FRANCOISE -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from apscheduler.schedulers import background -from apscheduler.triggers import interval as interval_trigger -import eventlet -import mock - -from watcher.decision_engine.loading import default as default_loading -from watcher.decision_engine import scheduling -from watcher.tests import base -from watcher.tests.decision_engine.model import faker_cluster_state - - -class TestDecisionEngineSchedulingService(base.TestCase): - - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'load') - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'list_available') - @mock.patch.object(background.BackgroundScheduler, 'start') - def test_start_de_scheduling_service(self, m_start, m_list_available, - m_load): - m_list_available.return_value = { - 'fake': faker_cluster_state.FakerModelCollector} - fake_collector = faker_cluster_state.FakerModelCollector( - config=mock.Mock(period=777)) - m_load.return_value = fake_collector - - scheduler = scheduling.DecisionEngineSchedulingService() - - scheduler.start() - - m_start.assert_called_once_with(scheduler) - jobs = scheduler.get_jobs() - self.assertEqual(2, len(jobs)) - - job = jobs[0] - self.assertTrue(bool(fake_collector.cluster_data_model)) - - self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) - - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'load') - @mock.patch.object( - default_loading.ClusterDataModelCollectorLoader, 'list_available') - @mock.patch.object(background.BackgroundScheduler, 'start') - def test_execute_sync_job_fails(self, m_start, m_list_available, - m_load): - fake_config = mock.Mock(period=.01) - fake_collector = faker_cluster_state.FakerModelCollector( - config=fake_config) - fake_collector.synchronize = mock.Mock( - side_effect=lambda: eventlet.sleep(.5)) - m_list_available.return_value = { - 'fake': faker_cluster_state.FakerModelCollector} - m_load.return_value = fake_collector - - scheduler = scheduling.DecisionEngineSchedulingService() - - scheduler.start() - - m_start.assert_called_once_with(scheduler) - jobs = scheduler.get_jobs() - self.assertEqual(2, len(jobs)) - - job = jobs[0] - job.func() - self.assertFalse(bool(fake_collector.cluster_data_model)) - - self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) diff --git a/watcher/tests/decision_engine/test_sync.py b/watcher/tests/decision_engine/test_sync.py deleted file mode 100644 index 3a2f23e..0000000 --- a/watcher/tests/decision_engine/test_sync.py +++ /dev/null @@ -1,661 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock - -from watcher.common import context -from watcher.common import utils -from watcher.decision_engine.loading import default -from watcher.decision_engine import sync -from watcher import objects -from watcher.tests.db import base -from watcher.tests.decision_engine import fake_goals -from watcher.tests.decision_engine import fake_strategies - - -class TestSyncer(base.DbTestCase): - - def setUp(self): - super(TestSyncer, self).setUp() - self.ctx = context.make_context() - - # This mock simulates the strategies discovery done in discover() - self.m_available_strategies = mock.Mock(return_value={ - fake_strategies.FakeDummy1Strategy1.get_name(): - fake_strategies.FakeDummy1Strategy1, - fake_strategies.FakeDummy1Strategy2.get_name(): - fake_strategies.FakeDummy1Strategy2, - fake_strategies.FakeDummy2Strategy3.get_name(): - fake_strategies.FakeDummy2Strategy3, - fake_strategies.FakeDummy2Strategy4.get_name(): - fake_strategies.FakeDummy2Strategy4, - }) - - self.m_available_goals = mock.Mock(return_value={ - fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, - fake_goals.FakeDummy2.get_name(): fake_goals.FakeDummy2, - }) - - self.goal1_spec = fake_goals.FakeDummy1( - config=mock.Mock()).get_efficacy_specification() - self.goal2_spec = fake_goals.FakeDummy2( - config=mock.Mock()).get_efficacy_specification() - - p_goals_load = mock.patch.object( - default.DefaultGoalLoader, 'load', - side_effect=lambda goal: self.m_available_goals()[goal]()) - p_goals = mock.patch.object( - default.DefaultGoalLoader, 'list_available', - self.m_available_goals) - p_strategies = mock.patch.object( - default.DefaultStrategyLoader, 'list_available', - self.m_available_strategies) - - p_goals.start() - p_goals_load.start() - p_strategies.start() - - self.syncer = sync.Syncer() - self.addCleanup(p_goals.stop) - self.addCleanup(p_goals_load.stop) - self.addCleanup(p_strategies.stop) - - @staticmethod - def _find_created_modified_unmodified_ids(befores, afters): - created = { - a_item.id: a_item for a_item in afters - if a_item.uuid not in (b_item.uuid for b_item in befores) - } - - modified = { - a_item.id: a_item for a_item in afters - if a_item.as_dict() not in ( - b_items.as_dict() for b_items in befores) - } - - unmodified = { - a_item.id: a_item for a_item in afters - if a_item.as_dict() in ( - b_items.as_dict() for b_items in befores) - } - - return created, modified, unmodified - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_empty_db( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [] - m_s_list.return_value = [] - - self.syncer.sync() - - self.assertEqual(2, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_existing_goal( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [ - objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - ] - m_s_list.return_value = [] - - self.syncer.sync() - - self.assertEqual(1, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_existing_strategy( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [ - objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - ] - m_s_list.return_value = [ - objects.Strategy(self.ctx, id=1, name="strategy_1", - goal_id=1, display_name="Strategy 1", - parameters_spec='{}') - ] - self.syncer.sync() - - self.assertEqual(1, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(3, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_modified_goal( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [objects.Goal( - self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_2", display_name="original", - efficacy_specification=self.goal2_spec.serialize_indicators_specs() - )] - m_s_list.return_value = [] - self.syncer.sync() - - self.assertEqual(2, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(1, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(0, m_s_soft_delete.call_count) - - @mock.patch.object(objects.Strategy, "soft_delete") - @mock.patch.object(objects.Strategy, "save") - @mock.patch.object(objects.Strategy, "create") - @mock.patch.object(objects.Strategy, "list") - @mock.patch.object(objects.Goal, "get_by_name") - @mock.patch.object(objects.Goal, "soft_delete") - @mock.patch.object(objects.Goal, "save") - @mock.patch.object(objects.Goal, "create") - @mock.patch.object(objects.Goal, "list") - def test_sync_with_modified_strategy( - self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, - m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): - m_g_get_by_name.side_effect = [ - objects.Goal(self.ctx, id=i) for i in range(1, 10)] - m_g_list.return_value = [ - objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - ] - m_s_list.return_value = [ - objects.Strategy(self.ctx, id=1, name="strategy_1", - goal_id=1, display_name="original", - parameters_spec='{}') - ] - self.syncer.sync() - - self.assertEqual(1, m_g_create.call_count) - self.assertEqual(0, m_g_save.call_count) - self.assertEqual(0, m_g_soft_delete.call_count) - - self.assertEqual(4, m_s_create.call_count) - self.assertEqual(0, m_s_save.call_count) - self.assertEqual(1, m_s_soft_delete.call_count) - - def test_end2end_sync_goals_with_modified_goal_and_strategy(self): - # ### Setup ### # - - # Here, we simulate goals and strategies already discovered in the past - # that were saved in DB - - # Should stay unmodified after sync() - goal1 = objects.Goal( - self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=( - self.goal1_spec.serialize_indicators_specs())) - # Should be modified by the sync() - goal2 = objects.Goal( - self.ctx, id=2, uuid=utils.generate_uuid(), - name="dummy_2", display_name="Original", - efficacy_specification=self.goal2_spec.serialize_indicators_specs() - ) - goal1.create() - goal2.create() - - # Should stay unmodified after sync() - strategy1 = objects.Strategy( - self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), - display_name="Strategy 1", goal_id=goal1.id) - # Should be modified after sync() because its related goal has been - # modified - strategy2 = objects.Strategy( - self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), - display_name="Strategy 2", goal_id=goal2.id) - # Should be modified after sync() because its strategy name has been - # modified - strategy3 = objects.Strategy( - self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), - display_name="Original", goal_id=goal1.id) - # Should be modified after sync() because both its related goal - # and its strategy name have been modified - strategy4 = objects.Strategy( - self.ctx, id=4, name="strategy_4", uuid=utils.generate_uuid(), - display_name="Original", goal_id=goal2.id) - strategy1.create() - strategy2.create() - strategy3.create() - strategy4.create() - - # Here we simulate audit_templates that were already created in the - # past and hence saved within the Watcher DB - - # Should stay unmodified after sync() - audit_template1 = objects.AuditTemplate( - self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), - goal_id=goal1.id, strategy_id=strategy1.id) - # Should be modified by the sync() because its associated goal - # has been modified (compared to the defined fake goals) - audit_template2 = objects.AuditTemplate( - self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), - goal_id=goal2.id, strategy_id=strategy2.id) - # Should be modified by the sync() because its associated strategy - # has been modified (compared to the defined fake strategies) - audit_template3 = objects.AuditTemplate( - self.ctx, id=3, name="Synced AT3", uuid=utils.generate_uuid(), - goal_id=goal1.id, strategy_id=strategy3.id) - # Modified because of both because its associated goal and associated - # strategy should be modified - audit_template4 = objects.AuditTemplate( - self.ctx, id=4, name="Synced AT4", uuid=utils.generate_uuid(), - goal_id=goal2.id, strategy_id=strategy4.id) - audit_template1.create() - audit_template2.create() - audit_template3.create() - audit_template4.create() - - # Should stay unmodified after sync() - audit1 = objects.Audit( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) - # Should be modified by the sync() because its associated goal - # has been modified (compared to the defined fake goals) - audit2 = objects.Audit( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) - # Should be modified by the sync() because its associated strategy - # has been modified (compared to the defined fake strategies) - audit3 = objects.Audit( - self.ctx, id=3, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal1.id, strategy_id=strategy3.id, auto_trigger=False) - # Modified because of both because its associated goal and associated - # strategy should be modified (compared to the defined fake - # goals/strategies) - audit4 = objects.Audit( - self.ctx, id=4, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal2.id, strategy_id=strategy4.id, auto_trigger=False) - - audit1.create() - audit2.create() - audit3.create() - audit4.create() - - # Should stay unmodified after sync() - action_plan1 = objects.ActionPlan( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_id=audit1.id, strategy_id=strategy1.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because the goal of the audit has been modified - # (compared to the defined fake goals) - action_plan2 = objects.ActionPlan( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_id=audit2.id, strategy_id=strategy2.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because the strategy has been modified - # (compared to the defined fake strategies) - action_plan3 = objects.ActionPlan( - self.ctx, id=3, uuid=utils.generate_uuid(), - audit_id=audit3.id, strategy_id=strategy3.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because both the strategy and the related audit - # have been modified (compared to the defined fake goals/strategies) - action_plan4 = objects.ActionPlan( - self.ctx, id=4, uuid=utils.generate_uuid(), - audit_id=audit4.id, strategy_id=strategy4.id, - state='DOESNOTMATTER', global_efficacy={}) - - action_plan1.create() - action_plan2.create() - action_plan3.create() - action_plan4.create() - - before_goals = objects.Goal.list(self.ctx) - before_strategies = objects.Strategy.list(self.ctx) - before_audit_templates = objects.AuditTemplate.list(self.ctx) - before_audits = objects.Audit.list(self.ctx) - before_action_plans = objects.ActionPlan.list(self.ctx) - - # ### Action under test ### # - - try: - self.syncer.sync() - except Exception as exc: - self.fail(exc) - - # ### Assertions ### # - - after_goals = objects.Goal.list(self.ctx) - after_strategies = objects.Strategy.list(self.ctx) - after_audit_templates = objects.AuditTemplate.list(self.ctx) - after_audits = objects.Audit.list(self.ctx) - after_action_plans = objects.ActionPlan.list(self.ctx) - - self.assertEqual(2, len(before_goals)) - self.assertEqual(4, len(before_strategies)) - self.assertEqual(4, len(before_audit_templates)) - self.assertEqual(4, len(before_audits)) - self.assertEqual(4, len(before_action_plans)) - self.assertEqual(2, len(after_goals)) - self.assertEqual(4, len(after_strategies)) - self.assertEqual(4, len(after_audit_templates)) - self.assertEqual(4, len(after_audits)) - self.assertEqual(4, len(after_action_plans)) - - self.assertEqual( - {"dummy_1", "dummy_2"}, - set([g.name for g in after_goals])) - self.assertEqual( - {"strategy_1", "strategy_2", "strategy_3", "strategy_4"}, - set([s.name for s in after_strategies])) - - created_goals, modified_goals, unmodified_goals = ( - self._find_created_modified_unmodified_ids( - before_goals, after_goals)) - - created_strategies, modified_strategies, unmodified_strategies = ( - self._find_created_modified_unmodified_ids( - before_strategies, after_strategies)) - - (created_audit_templates, modified_audit_templates, - unmodified_audit_templates) = ( - self._find_created_modified_unmodified_ids( - before_audit_templates, after_audit_templates)) - - created_audits, modified_audits, unmodified_audits = ( - self._find_created_modified_unmodified_ids( - before_audits, after_audits)) - - (created_action_plans, modified_action_plans, - unmodified_action_plans) = ( - self._find_created_modified_unmodified_ids( - before_action_plans, after_action_plans)) - - dummy_1_spec = [ - {'description': 'Dummy indicator', 'name': 'dummy', - 'schema': 'Range(min=0, max=100, min_included=True, ' - 'max_included=True, msg=None)', - 'unit': '%'}] - dummy_2_spec = [] - self.assertEqual( - [dummy_1_spec, dummy_2_spec], - [g.efficacy_specification for g in after_goals]) - - self.assertEqual(1, len(created_goals)) - self.assertEqual(3, len(created_strategies)) - self.assertEqual(0, len(created_audits)) - self.assertEqual(0, len(created_action_plans)) - - self.assertEqual(2, strategy2.goal_id) - - self.assertNotEqual( - set([strategy2.id, strategy3.id, strategy4.id]), - set(modified_strategies)) - self.assertEqual(set([strategy1.id]), set(unmodified_strategies)) - - self.assertEqual( - set([audit_template2.id, audit_template3.id, audit_template4.id]), - set(modified_audit_templates)) - self.assertEqual(set([audit_template1.id]), - set(unmodified_audit_templates)) - - self.assertEqual( - set([audit2.id, audit3.id, audit4.id]), - set(modified_audits)) - self.assertEqual(set([audit1.id]), set(unmodified_audits)) - - self.assertEqual( - set([action_plan2.id, action_plan3.id, action_plan4.id]), - set(modified_action_plans)) - self.assertTrue( - all(ap.state == objects.action_plan.State.CANCELLED - for ap in modified_action_plans.values())) - self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) - - def test_end2end_sync_goals_with_removed_goal_and_strategy(self): - # ### Setup ### # - - # We simulate the fact that we removed 2 strategies - self.m_available_strategies.return_value = { - fake_strategies.FakeDummy1Strategy1.get_name(): - fake_strategies.FakeDummy1Strategy1 - } - # We simulate the fact that we removed the dummy_2 goal - self.m_available_goals.return_value = { - fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, - } - # Should stay unmodified after sync() - goal1 = objects.Goal( - self.ctx, id=1, uuid=utils.generate_uuid(), - name="dummy_1", display_name="Dummy 1", - efficacy_specification=self.goal1_spec.serialize_indicators_specs() - ) - # To be removed by the sync() - goal2 = objects.Goal( - self.ctx, id=2, uuid=utils.generate_uuid(), - name="dummy_2", display_name="Dummy 2", - efficacy_specification=self.goal2_spec.serialize_indicators_specs() - ) - goal1.create() - goal2.create() - - # Should stay unmodified after sync() - strategy1 = objects.Strategy( - self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), - display_name="Strategy 1", goal_id=goal1.id) - # To be removed by the sync() because strategy entry point does not - # exist anymore - strategy2 = objects.Strategy( - self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), - display_name="Strategy 2", goal_id=goal1.id) - # To be removed by the sync() because the goal has been soft deleted - # and because the strategy entry point does not exist anymore - strategy3 = objects.Strategy( - self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), - display_name="Original", goal_id=goal2.id) - strategy1.create() - strategy2.create() - strategy3.create() - - # Here we simulate audit_templates that were already created in the - # past and hence saved within the Watcher DB - - # The strategy of this audit template will be dereferenced - # as it does not exist anymore - audit_template1 = objects.AuditTemplate( - self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), - goal_id=goal1.id, strategy_id=strategy1.id) - # Stale after syncing because the goal has been soft deleted - audit_template2 = objects.AuditTemplate( - self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), - goal_id=goal2.id, strategy_id=strategy2.id) - - audit_template1.create() - audit_template2.create() - - # Should stay unmodified after sync() - audit1 = objects.Audit( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) - # Stale after syncing because the goal has been soft deleted - audit2 = objects.Audit( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_type=objects.audit.AuditType.ONESHOT.value, - state=objects.audit.State.PENDING, - goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) - audit1.create() - audit2.create() - - # Stale after syncing because its related strategy has been be - # soft deleted - action_plan1 = objects.ActionPlan( - self.ctx, id=1, uuid=utils.generate_uuid(), - audit_id=audit1.id, strategy_id=strategy1.id, - state='DOESNOTMATTER', global_efficacy={}) - # Stale after syncing because its related goal has been soft deleted - action_plan2 = objects.ActionPlan( - self.ctx, id=2, uuid=utils.generate_uuid(), - audit_id=audit2.id, strategy_id=strategy2.id, - state='DOESNOTMATTER', global_efficacy={}) - - action_plan1.create() - action_plan2.create() - - before_goals = objects.Goal.list(self.ctx) - before_strategies = objects.Strategy.list(self.ctx) - before_audit_templates = objects.AuditTemplate.list(self.ctx) - before_audits = objects.Audit.list(self.ctx) - before_action_plans = objects.ActionPlan.list(self.ctx) - - # ### Action under test ### # - - try: - self.syncer.sync() - except Exception as exc: - self.fail(exc) - - # ### Assertions ### # - - after_goals = objects.Goal.list(self.ctx) - after_strategies = objects.Strategy.list(self.ctx) - after_audit_templates = objects.AuditTemplate.list(self.ctx) - after_audits = objects.Audit.list(self.ctx) - after_action_plans = objects.ActionPlan.list(self.ctx) - - self.assertEqual(2, len(before_goals)) - self.assertEqual(3, len(before_strategies)) - self.assertEqual(2, len(before_audit_templates)) - self.assertEqual(2, len(before_audits)) - self.assertEqual(2, len(before_action_plans)) - self.assertEqual(1, len(after_goals)) - self.assertEqual(1, len(after_strategies)) - self.assertEqual(2, len(after_audit_templates)) - self.assertEqual(2, len(after_audits)) - self.assertEqual(2, len(after_action_plans)) - self.assertEqual( - {"dummy_1"}, - set([g.name for g in after_goals])) - self.assertEqual( - {"strategy_1"}, - set([s.name for s in after_strategies])) - - created_goals, modified_goals, unmodified_goals = ( - self._find_created_modified_unmodified_ids( - before_goals, after_goals)) - - created_strategies, modified_strategies, unmodified_strategies = ( - self._find_created_modified_unmodified_ids( - before_strategies, after_strategies)) - - (created_audit_templates, modified_audit_templates, - unmodified_audit_templates) = ( - self._find_created_modified_unmodified_ids( - before_audit_templates, after_audit_templates)) - - created_audits, modified_audits, unmodified_audits = ( - self._find_created_modified_unmodified_ids( - before_audits, after_audits)) - - (created_action_plans, modified_action_plans, - unmodified_action_plans) = ( - self._find_created_modified_unmodified_ids( - before_action_plans, after_action_plans)) - - self.assertEqual(0, len(created_goals)) - self.assertEqual(0, len(created_strategies)) - self.assertEqual(0, len(created_audits)) - self.assertEqual(0, len(created_action_plans)) - - self.assertEqual(set([audit_template2.id]), - set(modified_audit_templates)) - self.assertEqual(set([audit_template1.id]), - set(unmodified_audit_templates)) - - self.assertEqual(set([audit2.id]), set(modified_audits)) - self.assertEqual(set([audit1.id]), set(unmodified_audits)) - - self.assertEqual(set([action_plan2.id]), set(modified_action_plans)) - self.assertTrue( - all(ap.state == objects.action_plan.State.CANCELLED - for ap in modified_action_plans.values())) - self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) diff --git a/watcher/tests/fake_policy.py b/watcher/tests/fake_policy.py deleted file mode 100644 index bed907c..0000000 --- a/watcher/tests/fake_policy.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -policy_data = """ -{ - "admin_api": "role:admin or role:administrator", - "show_password": "!", - "default": "rule:admin_api", - - "action:detail": "", - "action:get": "", - "action:get_all": "", - - "action_plan:delete": "", - "action_plan:detail": "", - "action_plan:get": "", - "action_plan:get_all": "", - "action_plan:update": "", - - "audit:create": "", - "audit:delete": "", - "audit:detail": "", - "audit:get": "", - "audit:get_all": "", - "audit:update": "", - - "audit_template:create": "", - "audit_template:delete": "", - "audit_template:detail": "", - "audit_template:get": "", - "audit_template:get_all": "", - "audit_template:update": "", - - "goal:detail": "", - "goal:get": "", - "goal:get_all": "", - - "scoring_engine:detail": "", - "scoring_engine:get": "", - "scoring_engine:get_all": "", - - "strategy:detail": "", - "strategy:get": "", - "strategy:get_all": "", - - "service:detail": "", - "service:get": "", - "service:get_all": "" -} -""" - - -policy_data_compat_juno = """ -{ - "admin": "role:admin or role:administrator", - "admin_api": "is_admin:True", - "default": "rule:admin_api" -} -""" - - -def get_policy_data(compat): - if not compat: - return policy_data - elif compat == 'juno': - return policy_data_compat_juno - else: - raise Exception('Policy data for %s not available' % compat) diff --git a/watcher/tests/fakes.py b/watcher/tests/fakes.py deleted file mode 100644 index d35b10d..0000000 --- a/watcher/tests/fakes.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', - 'X-Roles': u'admin, ResellerAdmin, _member_', - 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', - 'X-Project-Name': 'test', - 'X-User-Name': 'test', - 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', - 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', - 'X-Service-Catalog': u'{test: 12345}', - 'X-Auth-Url': 'fake_auth_url', - 'X-Identity-Status': 'Confirmed', - 'X-User-Domain-Name': 'domain', - 'X-Project-Domain-Id': 'project_domain_id', - 'X-User-Domain-Id': 'user_domain_id', - } - - -class FakePecanRequest(mock.Mock): - - def __init__(self, **kwargs): - super(FakePecanRequest, self).__init__(**kwargs) - self.host_url = 'http://test_url:8080/test' - self.context = {} - self.body = '' - self.content_type = 'text/unicode' - self.params = {} - self.path = '/v1/services' - self.headers = fakeAuthTokenHeaders - self.environ = {} - - def __setitem__(self, index, value): - setattr(self, index, value) - - -class FakePecanResponse(mock.Mock): - - def __init__(self, **kwargs): - super(FakePecanResponse, self).__init__(**kwargs) - self.status = None - - -class FakeApp(object): - pass - - -class FakeService(mock.Mock): - def __init__(self, **kwargs): - super(FakeService, self).__init__(**kwargs) - self.__tablename__ = 'service' - self.__resource__ = 'services' - self.user_id = 'fake user id' - self.project_id = 'fake project id' - self.uuid = 'test_uuid' - self.id = 8 - self.name = 'james' - self.service_type = 'not_this' - self.description = 'amazing' - self.tags = ['this', 'and that'] - self.read_only = True - - def as_dict(self): - return dict(service_type=self.service_type, - user_id=self.user_id, - project_id=self.project_id, - uuid=self.uuid, - id=self.id, - name=self.name, - tags=self.tags, - read_only=self.read_only, - description=self.description) - - -class FakeAuthProtocol(mock.Mock): - - def __init__(self, **kwargs): - super(FakeAuthProtocol, self).__init__(**kwargs) - self.app = FakeApp() - self.config = '' diff --git a/watcher/tests/notifications/__init__.py b/watcher/tests/notifications/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/notifications/test_action_notification.py b/watcher/tests/notifications/test_action_notification.py deleted file mode 100644 index 2a4a5b2..0000000 --- a/watcher/tests/notifications/test_action_notification.py +++ /dev/null @@ -1,355 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import exception -from watcher.common import rpc -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestActionNotification(base.DbTestCase): - - def setUp(self): - super(TestActionNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - self.goal = utils.create_test_goal(mock.Mock()) - self.strategy = utils.create_test_strategy(mock.Mock()) - self.audit = utils.create_test_audit(mock.Mock(), - strategy_id=self.strategy.id) - self.action_plan = utils.create_test_action_plan(mock.Mock()) - - def test_send_invalid_action_plan(self): - action_plan = utils.get_test_action_plan( - mock.Mock(), state='DOESNOTMATTER', audit_id=1) - - self.assertRaises( - exception.InvalidActionPlan, - notifications.action_plan.send_update, - mock.MagicMock(), action_plan, host='node0') - - def test_send_action_update(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.ONGOING, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_update( - mock.MagicMock(), action, host='node0', - old_state=objects.action.State.PENDING) - - # The 1st notification is because we created the object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionUpdatePayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state_update': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionStateUpdatePayload', - 'watcher_object.data': { - 'old_state': 'PENDING', - 'state': 'ONGOING' - } - }, - 'state': 'ONGOING', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - }, - payload - ) - - def test_send_action_plan_create(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.PENDING, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_create(mock.MagicMock(), action, - host='node0') - - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionCreatePayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'PENDING', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - }, - payload - ) - - def test_send_action_delete(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.DELETED, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_delete(mock.MagicMock(), action, - host='node0') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionDeletePayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'DELETED', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - }, - payload - ) - - def test_send_action_execution(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.PENDING, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - notifications.action.send_execution_notification( - mock.MagicMock(), action, 'execution', phase='start', host='node0') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(4, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'event_type': 'action.execution.start', - 'payload': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionExecutionPayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'fault': None, - 'updated_at': None, - 'state': 'PENDING', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - } - }, - notification - ) - - def test_send_action_execution_with_error(self): - action = utils.create_test_action( - mock.Mock(), state=objects.action.State.FAILED, - action_type='nop', input_parameters={'param1': 1, 'param2': 2}, - parents=[], action_plan_id=self.action_plan.id) - - try: - # This is to load the exception in sys.exc_info() - raise exception.WatcherException("TEST") - except exception.WatcherException: - notifications.action.send_execution_notification( - mock.MagicMock(), action, 'execution', phase='error', - host='node0', priority='error') - - self.assertEqual(1, self.m_notifier.error.call_count) - notification = self.m_notifier.error.call_args[1] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - 'event_type': 'action.execution.error', - 'payload': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'ActionExecutionPayload', - 'watcher_object.data': { - 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', - 'input_parameters': { - 'param2': 2, - 'param1': 1 - }, - 'created_at': '2016-10-18T09:52:05Z', - 'fault': { - 'watcher_object.data': { - 'exception': u'WatcherException', - 'exception_message': u'TEST', - 'function_name': ( - 'test_send_action_execution_with_error'), - 'module_name': ( - 'watcher.tests.notifications.' - 'test_action_notification') - }, - 'watcher_object.name': 'ExceptionPayload', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0' - }, - 'updated_at': None, - 'state': 'FAILED', - 'action_plan': { - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0', - 'watcher_object.name': 'TerseActionPlanPayload', - 'watcher_object.data': { - 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', - 'global_efficacy': {}, - 'created_at': '2016-10-18T09:52:05Z', - 'updated_at': None, - 'state': 'ONGOING', - 'audit_uuid': '10a47dd1-4874-4298' - '-91cf-eff046dbdb8d', - 'strategy_uuid': 'cb3d0b58-4415-4d90' - '-b75b-1e96878730e3', - 'deleted_at': None - } - }, - 'parents': [], - 'action_type': 'nop', - 'deleted_at': None - } - } - }, - notification - ) diff --git a/watcher/tests/notifications/test_action_plan_notification.py b/watcher/tests/notifications/test_action_plan_notification.py deleted file mode 100644 index 47dce1f..0000000 --- a/watcher/tests/notifications/test_action_plan_notification.py +++ /dev/null @@ -1,429 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import exception -from watcher.common import rpc -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestActionPlanNotification(base.DbTestCase): - - def setUp(self): - super(TestActionPlanNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - self.goal = utils.create_test_goal(mock.Mock()) - self.audit = utils.create_test_audit(mock.Mock(), interval=None) - self.strategy = utils.create_test_strategy(mock.Mock()) - - def test_send_invalid_action_plan(self): - action_plan = utils.get_test_action_plan( - mock.Mock(), state='DOESNOTMATTER', audit_id=1) - - self.assertRaises( - exception.InvalidActionPlan, - notifications.action_plan.send_update, - mock.MagicMock(), action_plan, host='node0') - - def test_send_action_plan_update(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit, strategy=self.strategy) - notifications.action_plan.send_update( - mock.MagicMock(), action_plan, host='node0', - old_state=objects.action_plan.State.PENDING) - - # The 1st notification is because we created the object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(3, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "deleted_at": None, - "state": "ONGOING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.name": "ActionPlanStateUpdatePayload" - }, - }, - "watcher_object.name": "ActionPlanUpdatePayload" - }, - payload - ) - - def test_send_action_plan_create(self): - action_plan = utils.get_test_action_plan( - mock.Mock(), state=objects.action_plan.State.PENDING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit.as_dict(), strategy=self.strategy.as_dict()) - notifications.action_plan.send_create( - mock.MagicMock(), action_plan, host='node0') - - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "deleted_at": None, - "state": "PENDING", - "updated_at": None, - "created_at": None, - }, - "watcher_object.name": "ActionPlanCreatePayload" - }, - payload - ) - - def test_send_action_plan_delete(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.DELETED, - audit_id=self.audit.id, strategy_id=self.strategy.id) - notifications.action_plan.send_delete( - mock.MagicMock(), action_plan, host='node0') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(3, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "global_efficacy": {}, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "deleted_at": None, - "state": "DELETED", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - }, - "watcher_object.name": "ActionPlanDeletePayload" - }, - payload - ) - - def test_send_action_plan_action(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit, strategy=self.strategy) - notifications.action_plan.send_action_notification( - mock.MagicMock(), action_plan, host='node0', - action='execution', phase='start') - - # The 1st notification is because we created the audit object. - # The 2nd notification is because we created the action plan object. - self.assertEqual(3, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "action_plan.execution.start", - "payload": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": None, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.namespace": "watcher", - "watcher_object.name": "TerseAuditPayload", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - } - }, - "global_efficacy": {}, - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" - }, - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - } - }, - notification - ) - - def test_send_action_plan_action_with_error(self): - action_plan = utils.create_test_action_plan( - mock.Mock(), state=objects.action_plan.State.ONGOING, - audit_id=self.audit.id, strategy_id=self.strategy.id, - audit=self.audit, strategy=self.strategy) - - try: - # This is to load the exception in sys.exc_info() - raise exception.WatcherException("TEST") - except exception.WatcherException: - notifications.action_plan.send_action_notification( - mock.MagicMock(), action_plan, host='node0', - action='execution', priority='error', phase='error') - - self.assertEqual(1, self.m_notifier.error.call_count) - notification = self.m_notifier.error.call_args[1] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "action_plan.execution.error", - "payload": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": ( - "test_send_action_plan_action_with_error"), - "module_name": "watcher.tests.notifications." - "test_action_plan_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "audit": { - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "strategy_uuid": None, - "goal_uuid": ( - "f7ad87ae-4298-91cf-93a0-f35a852e3652"), - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "TerseAuditPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - }, - "global_efficacy": {}, - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" - }, - "watcher_object.name": "ActionPlanActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - } - }, - notification - ) diff --git a/watcher/tests/notifications/test_audit_notification.py b/watcher/tests/notifications/test_audit_notification.py deleted file mode 100644 index 0a5178a..0000000 --- a/watcher/tests/notifications/test_audit_notification.py +++ /dev/null @@ -1,477 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import exception -from watcher.common import rpc -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestAuditNotification(base.DbTestCase): - - def setUp(self): - super(TestAuditNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - self.goal = utils.create_test_goal(mock.Mock()) - self.strategy = utils.create_test_strategy(mock.Mock()) - - def test_send_invalid_audit(self): - audit = utils.get_test_audit( - mock.Mock(), interval=None, state='DOESNOTMATTER', goal_id=1) - - self.assertRaises( - exception.InvalidAudit, - notifications.audit.send_update, - mock.MagicMock(), audit, host='node0') - - def test_send_audit_update_with_strategy(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal, strategy=self.strategy) - notifications.audit.send_update( - mock.MagicMock(), audit, host='node0', - old_state=objects.audit.State.PENDING) - - # The 1st notification is because we created the object. - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "deleted_at": None, - "scope": [], - "state": "ONGOING", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.name": "AuditStateUpdatePayload" - }, - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditUpdatePayload" - }, - payload - ) - - def test_send_audit_update_without_strategy(self): - audit = utils.get_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, goal=self.goal) - notifications.audit.send_update( - mock.MagicMock(), audit, host='node0', - old_state=objects.audit.State.PENDING) - - self.assertEqual(1, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "strategy_uuid": None, - "strategy": None, - "deleted_at": None, - "scope": [], - "state": "ONGOING", - "updated_at": None, - "created_at": None, - "state_update": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "old_state": "PENDING", - "state": "ONGOING" - }, - "watcher_object.name": "AuditStateUpdatePayload" - }, - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditUpdatePayload" - }, - payload - ) - - def test_send_audit_create(self): - audit = utils.get_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.PENDING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal.as_dict(), strategy=self.strategy.as_dict()) - notifications.audit.send_create( - mock.MagicMock(), audit, host='node0') - - self.assertEqual(1, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "deleted_at": None, - "scope": [], - "state": "PENDING", - "updated_at": None, - "created_at": None, - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditCreatePayload" - }, - payload - ) - - def test_send_audit_delete(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.DELETED, - goal_id=self.goal.id, strategy_id=self.strategy.id) - notifications.audit.send_delete( - mock.MagicMock(), audit, host='node0') - - # The 1st notification is because we created the object. - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - payload = notification['payload'] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1", - "watcher_object.data": { - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "strategy": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", - "name": "TEST", - "parameters_spec": {}, - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test strategy", - "deleted_at": None - }, - "watcher_object.name": "StrategyPayload" - }, - "parameters": {}, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0", - "watcher_object.data": { - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "name": "TEST", - "efficacy_specification": [], - "created_at": "2016-10-18T09:52:05Z", - "display_name": "test goal", - "deleted_at": None - }, - "watcher_object.name": "GoalPayload" - }, - "deleted_at": None, - "scope": [], - "state": "DELETED", - "updated_at": None, - "created_at": "2016-10-18T09:52:05Z", - "audit_type": "ONESHOT" - }, - "watcher_object.name": "AuditDeletePayload" - }, - payload - ) - - def test_send_audit_action(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal, strategy=self.strategy) - notifications.audit.send_action_notification( - mock.MagicMock(), audit, host='node0', - action='strategy', phase='start') - - # The 1st notification is because we created the object. - self.assertEqual(2, self.m_notifier.info.call_count) - notification = self.m_notifier.info.call_args[1] - notification = self.m_notifier.info.call_args[1] - - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "audit.strategy.start", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": None, - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test goal", - "efficacy_specification": [], - "name": "TEST", - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "scope": [], - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - } - }, - notification - ) - - def test_send_audit_action_with_error(self): - audit = utils.create_test_audit( - mock.Mock(), interval=None, state=objects.audit.State.ONGOING, - goal_id=self.goal.id, strategy_id=self.strategy.id, - goal=self.goal, strategy=self.strategy) - - try: - # This is to load the exception in sys.exc_info() - raise exception.WatcherException("TEST") - except exception.WatcherException: - notifications.audit.send_action_notification( - mock.MagicMock(), audit, host='node0', - action='strategy', priority='error', phase='error') - - self.assertEqual(1, self.m_notifier.error.call_count) - notification = self.m_notifier.error.call_args[1] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual( - { - "event_type": "audit.strategy.error", - "payload": { - "watcher_object.data": { - "audit_type": "ONESHOT", - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "fault": { - "watcher_object.data": { - "exception": "WatcherException", - "exception_message": "TEST", - "function_name": ( - "test_send_audit_action_with_error"), - "module_name": "watcher.tests.notifications." - "test_audit_notification" - }, - "watcher_object.name": "ExceptionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", - "goal": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test goal", - "efficacy_specification": [], - "name": "TEST", - "updated_at": None, - "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" - }, - "watcher_object.name": "GoalPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "interval": None, - "next_run_time": None, - "auto_trigger": False, - "parameters": {}, - "scope": [], - "state": "ONGOING", - "strategy_uuid": ( - "cb3d0b58-4415-4d90-b75b-1e96878730e3"), - "strategy": { - "watcher_object.data": { - "created_at": "2016-10-18T09:52:05Z", - "deleted_at": None, - "display_name": "test strategy", - "name": "TEST", - "parameters_spec": {}, - "updated_at": None, - "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" - }, - "watcher_object.name": "StrategyPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.0" - }, - "updated_at": None, - "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" - }, - "watcher_object.name": "AuditActionPayload", - "watcher_object.namespace": "watcher", - "watcher_object.version": "1.1" - } - }, - notification - ) diff --git a/watcher/tests/notifications/test_notification.py b/watcher/tests/notifications/test_notification.py deleted file mode 100644 index d60a148..0000000 --- a/watcher/tests/notifications/test_notification.py +++ /dev/null @@ -1,355 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -import mock -from oslo_versionedobjects import fixture - -from watcher.common import exception -from watcher.common import rpc -from watcher.notifications import base as notificationbase -from watcher.objects import base -from watcher.objects import fields as wfields -from watcher.tests import base as testbase -from watcher.tests.objects import test_objects - - -class TestNotificationBase(testbase.TestCase): - - @base.WatcherObjectRegistry.register_if(False) - class TestObject(base.WatcherObject): - VERSION = '1.0' - fields = { - 'field_1': wfields.StringField(), - 'field_2': wfields.IntegerField(), - 'not_important_field': wfields.IntegerField(), - } - - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationPayload(notificationbase.NotificationPayloadBase): - VERSION = '1.0' - - SCHEMA = { - 'field_1': ('source_field', 'field_1'), - 'field_2': ('source_field', 'field_2'), - } - - fields = { - 'extra_field': wfields.StringField(), # filled by ctor - 'field_1': wfields.StringField(), # filled by the schema - 'field_2': wfields.IntegerField(), # filled by the schema - } - - def populate_schema(self, source_field): - super(TestNotificationBase.TestNotificationPayload, - self).populate_schema(source_field=source_field) - - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationPayloadEmptySchema( - notificationbase.NotificationPayloadBase): - VERSION = '1.0' - - fields = { - 'extra_field': wfields.StringField(), # filled by ctor - } - - @notificationbase.notification_sample('test-update-1.json') - @notificationbase.notification_sample('test-update-2.json') - @base.WatcherObjectRegistry.register_if(False) - class TestNotification(notificationbase.NotificationBase): - VERSION = '1.0' - fields = { - 'payload': wfields.ObjectField('TestNotificationPayload') - } - - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationEmptySchema(notificationbase.NotificationBase): - VERSION = '1.0' - fields = { - 'payload': wfields.ObjectField( - 'TestNotificationPayloadEmptySchema') - } - - expected_payload = { - 'watcher_object.name': 'TestNotificationPayload', - 'watcher_object.data': { - 'extra_field': 'test string', - 'field_1': 'test1', - 'field_2': 42}, - 'watcher_object.version': '1.0', - 'watcher_object.namespace': 'watcher'} - - def setUp(self): - super(TestNotificationBase, self).setUp() - - self.my_obj = self.TestObject(field_1='test1', - field_2=42, - not_important_field=13) - - self.payload = self.TestNotificationPayload( - extra_field='test string') - self.payload.populate_schema(source_field=self.my_obj) - - self.notification = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE, - phase=wfields.NotificationPhase.START), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - def _verify_notification(self, mock_notifier, mock_context, - expected_event_type, - expected_payload): - mock_notifier.prepare.assert_called_once_with( - publisher_id='watcher-fake:fake-host') - mock_notify = mock_notifier.prepare.return_value.info - self.assertTrue(mock_notify.called) - self.assertEqual(mock_notify.call_args[0][0], mock_context) - self.assertEqual(mock_notify.call_args[1]['event_type'], - expected_event_type) - actual_payload = mock_notify.call_args[1]['payload'] - self.assertEqual(expected_payload, actual_payload) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_emit_notification(self, mock_notifier): - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - self.notification.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update.start', - expected_payload=self.expected_payload) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_no_emit_notifs_disabled(self, mock_notifier): - # Make sure notifications aren't emitted when notification_level - # isn't defined, indicating notifications should be disabled - self.config(notification_level=None) - notif = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE, - phase=wfields.NotificationPhase.START), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - mock_context = mock.Mock() - notif.emit(mock_context) - - self.assertFalse(mock_notifier.called) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_no_emit_level_too_low(self, mock_notifier): - # Make sure notification doesn't emit when set notification - # level < config level - self.config(notification_level='warning') - notif = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE, - phase=wfields.NotificationPhase.START), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - mock_context = mock.Mock() - notif.emit(mock_context) - - self.assertFalse(mock_notifier.called) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_emit_event_type_without_phase(self, mock_notifier): - noti = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=self.payload) - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - noti.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update', - expected_payload=self.expected_payload) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_not_possible_to_emit_if_not_populated(self, mock_notifier): - non_populated_payload = self.TestNotificationPayload( - extra_field='test string') - noti = self.TestNotification( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=non_populated_payload) - - mock_context = mock.Mock() - self.assertRaises(exception.NotificationPayloadError, - noti.emit, mock_context) - self.assertFalse(mock_notifier.called) - - @mock.patch.object(rpc, 'NOTIFIER') - def test_empty_schema(self, mock_notifier): - non_populated_payload = self.TestNotificationPayloadEmptySchema( - extra_field='test string') - noti = self.TestNotificationEmptySchema( - event_type=notificationbase.EventType( - object='test_object', - action=wfields.NotificationAction.UPDATE), - publisher=notificationbase.NotificationPublisher( - host='fake-host', binary='watcher-fake'), - priority=wfields.NotificationPriority.INFO, - payload=non_populated_payload) - - mock_context = mock.Mock() - mock_context.to_dict.return_value = {} - noti.emit(mock_context) - - self._verify_notification( - mock_notifier, - mock_context, - expected_event_type='test_object.update', - expected_payload={ - 'watcher_object.name': 'TestNotificationPayloadEmptySchema', - 'watcher_object.data': {'extra_field': 'test string'}, - 'watcher_object.version': '1.0', - 'watcher_object.namespace': 'watcher'}) - - def test_sample_decorator(self): - self.assertEqual(2, len(self.TestNotification.samples)) - self.assertIn('test-update-1.json', self.TestNotification.samples) - self.assertIn('test-update-2.json', self.TestNotification.samples) - - -expected_notification_fingerprints = { - 'EventType': '1.3-4258a2c86eca79fd34a7dffe1278eab9', - 'ExceptionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ExceptionPayload': '1.0-4516ae282a55fe2fd5c754967ee6248b', - 'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545', - 'TerseAuditPayload': '1.1-19b0e9224c0953366418a30ed785f267', - 'AuditPayload': '1.1-4c59e0cc5d30c42d3b842ce0332709d5', - 'AuditStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'AuditUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditUpdatePayload': '1.1-9b1f725e736051b976571701e5cc1e55', - 'AuditCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditCreatePayload': '1.1-4c59e0cc5d30c42d3b842ce0332709d5', - 'AuditDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditDeletePayload': '1.1-4c59e0cc5d30c42d3b842ce0332709d5', - 'AuditActionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'AuditActionPayload': '1.1-5a43e7321495c19f98ef5663efa0a821', - 'GoalPayload': '1.0-fa1fecb8b01dd047eef808ded4d50d1a', - 'StrategyPayload': '1.0-94f01c137b083ac236ae82573c1fcfc1', - 'ActionPlanActionPayload': '1.0-d9f134708e06cf2ff2d3b8d522ac2aa8', - 'ActionPlanCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionPlanCreatePayload': '1.0-23d0abbfa43acfd49b2b3097770efdce', - 'ActionPlanDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionPlanDeletePayload': '1.0-23d0abbfa43acfd49b2b3097770efdce', - 'ActionPlanPayload': '1.0-23d0abbfa43acfd49b2b3097770efdce', - 'ActionPlanStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'ActionPlanUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionPlanUpdatePayload': '1.0-3e1a348a0579c6c43c1c3d7257e3f26b', - 'ActionPlanActionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionCreatePayload': '1.0-519b93b7450319d8928b4b6e6362df31', - 'ActionDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionDeletePayload': '1.0-519b93b7450319d8928b4b6e6362df31', - 'ActionExecutionNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionExecutionPayload': '1.0-bff9f820a2abf7bb6d7027b7450157df', - 'ActionPayload': '1.0-519b93b7450319d8928b4b6e6362df31', - 'ActionStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'ActionUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ActionUpdatePayload': '1.0-03306c7e7f4d49ac328c261eff6b30b8', - 'TerseActionPlanPayload': '1.0-42bf7a5585cc111a9a4dbc008a04c67e', - 'ServiceUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', - 'ServicePayload': '1.0-9c5a9bc51e6606e0ec3cf95baf698f4f', - 'ServiceStatusUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', - 'ServiceUpdatePayload': '1.0-e0e9812a45958974693a723a2c820c3f' - -} - - -class TestNotificationObjectVersions(testbase.TestCase): - def setUp(self): - super(TestNotificationObjectVersions, self).setUp() - base.WatcherObjectRegistry.register_notification_objects() - - def test_versions(self): - checker = fixture.ObjectVersionChecker( - test_objects.get_watcher_objects()) - expected_notification_fingerprints.update( - test_objects.expected_object_fingerprints) - expected, actual = checker.test_hashes( - expected_notification_fingerprints) - self.assertEqual(expected, actual, - 'Some notification objects have changed; please make ' - 'sure the versions have been bumped, and then update ' - 'their hashes here.') - - def test_notification_payload_version_depends_on_the_schema(self): - @base.WatcherObjectRegistry.register_if(False) - class TestNotificationPayload( - notificationbase.NotificationPayloadBase): - VERSION = '1.0' - - SCHEMA = { - 'field_1': ('source_field', 'field_1'), - 'field_2': ('source_field', 'field_2'), - } - - fields = { - 'extra_field': wfields.StringField(), # filled by ctor - 'field_1': wfields.StringField(), # filled by the schema - 'field_2': wfields.IntegerField(), # filled by the schema - } - - checker = fixture.ObjectVersionChecker( - {'TestNotificationPayload': (TestNotificationPayload,)}) - - old_hash = checker.get_hashes(extra_data_func=get_extra_data) - TestNotificationPayload.SCHEMA['field_3'] = ('source_field', - 'field_3') - new_hash = checker.get_hashes(extra_data_func=get_extra_data) - - self.assertNotEqual(old_hash, new_hash) - - -def get_extra_data(obj_class): - extra_data = tuple() - - # Get the SCHEMA items to add to the fingerprint - # if we are looking at a notification - if issubclass(obj_class, notificationbase.NotificationPayloadBase): - schema_data = collections.OrderedDict( - sorted(obj_class.SCHEMA.items())) - - extra_data += (schema_data,) - - return extra_data diff --git a/watcher/tests/notifications/test_service_notifications.py b/watcher/tests/notifications/test_service_notifications.py deleted file mode 100644 index 538fed0..0000000 --- a/watcher/tests/notifications/test_service_notifications.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2017 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -import freezegun -import mock -import oslo_messaging as om - -from watcher.common import rpc -from watcher import notifications -from watcher.objects import service as w_service -from watcher.tests.db import base -from watcher.tests.objects import utils - - -@freezegun.freeze_time('2016-10-18T09:52:05.219414') -class TestActionPlanNotification(base.DbTestCase): - - def setUp(self): - super(TestActionPlanNotification, self).setUp() - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - m_get_notifier = p_get_notifier.start() - self.addCleanup(p_get_notifier.stop) - self.m_notifier = mock.Mock(spec=om.Notifier) - - def fake_get_notifier(publisher_id): - self.m_notifier.publisher_id = publisher_id - return self.m_notifier - - m_get_notifier.side_effect = fake_get_notifier - - def test_service_failed(self): - service = utils.get_test_service(mock.Mock(), - created_at=datetime.datetime.utcnow()) - state = w_service.ServiceStatus.FAILED - notifications.service.send_service_update(mock.MagicMock(), - service, - state, - host='node0') - notification = self.m_notifier.warning.call_args[1] - payload = notification['payload'] - self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) - self.assertDictEqual({ - 'watcher_object.data': { - 'last_seen_up': '2016-09-22T08:32:06Z', - 'name': 'watcher-service', - 'sevice_host': 'controller', - 'status_update': { - 'watcher_object.data': { - 'old_state': 'ACTIVE', - 'state': 'FAILED' - }, - 'watcher_object.name': 'ServiceStatusUpdatePayload', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0' - } - }, - 'watcher_object.name': 'ServiceUpdatePayload', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.0' - }, - payload - ) diff --git a/watcher/tests/objects/__init__.py b/watcher/tests/objects/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher/tests/objects/test_action.py b/watcher/tests/objects/test_action.py deleted file mode 100644 index 82c6706..0000000 --- a/watcher/tests/objects/test_action.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import utils as c_utils -from watcher.db.sqlalchemy import api as db_api -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestActionObject(base.DbTestCase): - - action_plan_id = 2 - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_action=utils.get_test_action( - action_plan_id=action_plan_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_action=utils.get_test_action( - action_plan_id=action_plan_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_action=utils.get_test_action( - action_plan_id=action_plan_id, - action_plan=utils.get_test_action_plan(id=action_plan_id)))), - ] - - def setUp(self): - super(TestActionObject, self).setUp() - - p_action_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_notifications = p_action_notifications.start() - self.addCleanup(p_action_notifications.stop) - self.m_send_update = self.m_action_notifications.send_update - - self.fake_action_plan = utils.create_test_action_plan( - id=self.action_plan_id) - - def eager_action_assert(self, action): - if self.eager: - self.assertIsNotNone(action.action_plan) - fields_to_check = set( - super(objects.ActionPlan, objects.ActionPlan).fields - ).symmetric_difference(objects.ActionPlan.fields) - db_data = { - k: v for k, v in self.fake_action_plan.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in action.action_plan.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_action_by_id') - def test_get_by_id(self, mock_get_action): - mock_get_action.return_value = self.fake_action - action_id = self.fake_action['id'] - action = objects.Action.get(self.context, action_id, eager=self.eager) - mock_get_action.assert_called_once_with( - self.context, action_id, eager=self.eager) - self.assertEqual(self.context, action._context) - self.eager_action_assert(action) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_get_by_uuid(self, mock_get_action): - mock_get_action.return_value = self.fake_action - uuid = self.fake_action['uuid'] - action = objects.Action.get(self.context, uuid, eager=self.eager) - mock_get_action.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, action._context) - self.assertEqual(0, self.m_send_update.call_count) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Action.get, self.context, 'not-a-uuid', - eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_action_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_action] - actions = objects.Action.list(self.context, eager=self.eager) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(actions)) - self.assertIsInstance(actions[0], objects.Action) - self.assertEqual(self.context, actions[0]._context) - for action in actions: - self.eager_action_assert(action) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(objects.Strategy, 'get') - @mock.patch.object(objects.Audit, 'get') - @mock.patch.object(db_api.Connection, 'update_action') - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_save(self, mock_get_action, mock_update_action, mock_get_audit, - mock_get_strategy): - mock_get_action.return_value = self.fake_action - fake_saved_action = self.fake_action.copy() - mock_get_audit.return_value = mock.PropertyMock( - uuid=c_utils.generate_uuid()) - mock_get_strategy.return_value = mock.PropertyMock( - uuid=c_utils.generate_uuid()) - fake_saved_action['updated_at'] = datetime.datetime.utcnow() - mock_update_action.return_value = fake_saved_action - uuid = self.fake_action['uuid'] - action = objects.Action.get_by_uuid( - self.context, uuid, eager=self.eager) - action.state = objects.action.State.SUCCEEDED - if not self.eager: - self.assertRaises(exception.EagerlyLoadedActionRequired, - action.save) - else: - action.save() - - expected_update_at = fake_saved_action['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - mock_get_action.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_action.assert_called_once_with( - uuid, {'state': objects.action.State.SUCCEEDED}) - self.assertEqual(self.context, action._context) - self.assertEqual(expected_update_at, action.updated_at) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_refresh(self, mock_get_action): - returns = [dict(self.fake_action, state="first state"), - dict(self.fake_action, state="second state")] - mock_get_action.side_effect = returns - uuid = self.fake_action['uuid'] - expected = [mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - action = objects.Action.get(self.context, uuid, eager=self.eager) - self.assertEqual("first state", action.state) - action.refresh(eager=self.eager) - self.assertEqual("second state", action.state) - self.assertEqual(expected, mock_get_action.call_args_list) - self.assertEqual(self.context, action._context) - self.eager_action_assert(action) - self.assertEqual(0, self.m_send_update.call_count) - - -class TestCreateDeleteActionObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteActionObject, self).setUp() - self.fake_strategy = utils.create_test_strategy(name="DUMMY") - self.fake_audit = utils.create_test_audit() - self.fake_action_plan = utils.create_test_action_plan() - self.fake_action = utils.get_test_action( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_action') - def test_create(self, mock_create_action): - mock_create_action.return_value = self.fake_action - action = objects.Action(self.context, **self.fake_action) - action.create() - expected_action = self.fake_action.copy() - expected_action['created_at'] = expected_action['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - mock_create_action.assert_called_once_with(expected_action) - self.assertEqual(self.context, action._context) - - @mock.patch.object(notifications.action, 'send_delete') - @mock.patch.object(notifications.action, 'send_update') - @mock.patch.object(db_api.Connection, 'update_action') - @mock.patch.object(db_api.Connection, 'soft_delete_action') - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_soft_delete(self, mock_get_action, - mock_soft_delete_action, mock_update_action, - mock_send_update, mock_send_delete): - mock_get_action.return_value = self.fake_action - fake_deleted_action = self.fake_action.copy() - fake_deleted_action['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete_action.return_value = fake_deleted_action - mock_update_action.return_value = fake_deleted_action - - expected_action = fake_deleted_action.copy() - expected_action['created_at'] = expected_action['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_action['deleted_at'] = expected_action['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - del expected_action['action_plan'] - - uuid = self.fake_action['uuid'] - action = objects.Action.get_by_uuid(self.context, uuid) - action.soft_delete() - mock_get_action.assert_called_once_with( - self.context, uuid, eager=False) - mock_soft_delete_action.assert_called_once_with(uuid) - mock_update_action.assert_called_once_with( - uuid, {'state': objects.action.State.DELETED}) - self.assertEqual(self.context, action._context) - self.assertEqual(expected_action, action.as_dict()) - - @mock.patch.object(db_api.Connection, 'destroy_action') - @mock.patch.object(db_api.Connection, 'get_action_by_uuid') - def test_destroy(self, mock_get_action, mock_destroy_action): - mock_get_action.return_value = self.fake_action - uuid = self.fake_action['uuid'] - action = objects.Action.get_by_uuid(self.context, uuid) - action.destroy() - - mock_get_action.assert_called_once_with( - self.context, uuid, eager=False) - mock_destroy_action.assert_called_once_with(uuid) - self.assertEqual(self.context, action._context) diff --git a/watcher/tests/objects/test_action_plan.py b/watcher/tests/objects/test_action_plan.py deleted file mode 100644 index a06c948..0000000 --- a/watcher/tests/objects/test_action_plan.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import utils as common_utils -from watcher import conf -from watcher.db.sqlalchemy import api as db_api -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - -CONF = conf.CONF - - -class TestActionPlanObject(base.DbTestCase): - - audit_id = 2 - strategy_id = 2 - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_action_plan=utils.get_test_action_plan( - created_at=datetime.datetime.utcnow(), - audit_id=audit_id, - strategy_id=strategy_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_action_plan=utils.get_test_action_plan( - created_at=datetime.datetime.utcnow(), - audit_id=audit_id, - strategy_id=strategy_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_action_plan=utils.get_test_action_plan( - created_at=datetime.datetime.utcnow(), - strategy_id=strategy_id, - strategy=utils.get_test_strategy(id=strategy_id), - audit_id=audit_id, - audit=utils.get_test_audit(id=audit_id)))), - ] - - def setUp(self): - super(TestActionPlanObject, self).setUp() - - p_action_plan_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_plan_notifications = p_action_plan_notifications.start() - self.addCleanup(p_action_plan_notifications.stop) - self.m_send_update = self.m_action_plan_notifications.send_update - - self.fake_audit = utils.create_test_audit(id=self.audit_id) - self.fake_strategy = utils.create_test_strategy( - id=self.strategy_id, name="DUMMY") - - def eager_load_action_plan_assert(self, action_plan): - if self.eager: - self.assertIsNotNone(action_plan.audit) - fields_to_check = set( - super(objects.Audit, objects.Audit).fields - ).symmetric_difference(objects.Audit.fields) - db_data = { - k: v for k, v in self.fake_audit.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in action_plan.audit.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_action_plan_by_id') - def test_get_by_id(self, mock_get_action_plan): - mock_get_action_plan.return_value = self.fake_action_plan - action_plan_id = self.fake_action_plan['id'] - action_plan = objects.ActionPlan.get( - self.context, action_plan_id, eager=self.eager) - mock_get_action_plan.assert_called_once_with( - self.context, action_plan_id, eager=self.eager) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') - def test_get_by_uuid(self, mock_get_action_plan): - mock_get_action_plan.return_value = self.fake_action_plan - uuid = self.fake_action_plan['uuid'] - action_plan = objects.ActionPlan.get( - self.context, uuid, eager=self.eager) - mock_get_action_plan.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - self.assertEqual(0, self.m_send_update.call_count) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.ActionPlan.get, self.context, - 'not-a-uuid', eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_action_plan_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_action_plan] - action_plans = objects.ActionPlan.list(self.context, eager=self.eager) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(action_plans)) - self.assertIsInstance(action_plans[0], objects.ActionPlan) - self.assertEqual(self.context, action_plans[0]._context) - for action_plan in action_plans: - self.eager_load_action_plan_assert(action_plan) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'update_action_plan') - @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') - def test_save(self, mock_get_action_plan, mock_update_action_plan): - mock_get_action_plan.return_value = self.fake_action_plan - fake_saved_action_plan = self.fake_action_plan.copy() - fake_saved_action_plan['state'] = objects.action_plan.State.SUCCEEDED - fake_saved_action_plan['updated_at'] = datetime.datetime.utcnow() - - mock_update_action_plan.return_value = fake_saved_action_plan - - expected_action_plan = fake_saved_action_plan.copy() - expected_action_plan[ - 'created_at'] = expected_action_plan['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_action_plan[ - 'updated_at'] = expected_action_plan['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - uuid = self.fake_action_plan['uuid'] - action_plan = objects.ActionPlan.get_by_uuid( - self.context, uuid, eager=self.eager) - action_plan.state = objects.action_plan.State.SUCCEEDED - action_plan.save() - - mock_get_action_plan.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_action_plan.assert_called_once_with( - uuid, {'state': objects.action_plan.State.SUCCEEDED}) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - self.m_send_update.assert_called_once_with( - self.context, action_plan, - old_state=self.fake_action_plan['state']) - self.assertEqual( - {k: v for k, v in expected_action_plan.items() - if k not in action_plan.object_fields}, - {k: v for k, v in action_plan.as_dict().items() - if k not in action_plan.object_fields}) - - @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') - def test_refresh(self, mock_get_action_plan): - returns = [dict(self.fake_action_plan, state="first state"), - dict(self.fake_action_plan, state="second state")] - mock_get_action_plan.side_effect = returns - uuid = self.fake_action_plan['uuid'] - expected = [mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - action_plan = objects.ActionPlan.get( - self.context, uuid, eager=self.eager) - self.assertEqual("first state", action_plan.state) - action_plan.refresh(eager=self.eager) - self.assertEqual("second state", action_plan.state) - self.assertEqual(expected, mock_get_action_plan.call_args_list) - self.assertEqual(self.context, action_plan._context) - self.eager_load_action_plan_assert(action_plan) - - -class TestCreateDeleteActionPlanObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteActionPlanObject, self).setUp() - - p_action_plan_notifications = mock.patch.object( - notifications, 'action_plan', autospec=True) - self.m_action_plan_notifications = p_action_plan_notifications.start() - self.addCleanup(p_action_plan_notifications.stop) - self.m_send_update = self.m_action_plan_notifications.send_update - - self.fake_strategy = utils.create_test_strategy(name="DUMMY") - self.fake_audit = utils.create_test_audit() - self.fake_action_plan = utils.get_test_action_plan( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_action_plan') - def test_create(self, mock_create_action_plan): - mock_create_action_plan.return_value = self.fake_action_plan - action_plan = objects.ActionPlan( - self.context, **self.fake_action_plan) - action_plan.create() - expected_action_plan = self.fake_action_plan.copy() - expected_action_plan['created_at'] = expected_action_plan[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - mock_create_action_plan.assert_called_once_with(expected_action_plan) - self.assertEqual(self.context, action_plan._context) - - @mock.patch.multiple( - db_api.Connection, - get_action_plan_by_uuid=mock.DEFAULT, - soft_delete_action_plan=mock.DEFAULT, - update_action_plan=mock.DEFAULT, - get_efficacy_indicator_list=mock.DEFAULT, - soft_delete_efficacy_indicator=mock.DEFAULT, - ) - def test_soft_delete(self, get_action_plan_by_uuid, - soft_delete_action_plan, update_action_plan, - get_efficacy_indicator_list, - soft_delete_efficacy_indicator): - efficacy_indicator = utils.get_test_efficacy_indicator( - action_plan_id=self.fake_action_plan['id']) - uuid = self.fake_action_plan['uuid'] - m_get_action_plan = get_action_plan_by_uuid - m_soft_delete_action_plan = soft_delete_action_plan - m_get_efficacy_indicator_list = get_efficacy_indicator_list - m_soft_delete_efficacy_indicator = soft_delete_efficacy_indicator - m_update_action_plan = update_action_plan - - m_get_action_plan.return_value = self.fake_action_plan - fake_deleted_action_plan = self.fake_action_plan.copy() - fake_deleted_action_plan['deleted_at'] = datetime.datetime.utcnow() - m_update_action_plan.return_value = fake_deleted_action_plan - m_soft_delete_action_plan.return_value = fake_deleted_action_plan - expected_action_plan = fake_deleted_action_plan.copy() - expected_action_plan['created_at'] = expected_action_plan[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_action_plan['deleted_at'] = expected_action_plan[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - del expected_action_plan['audit'] - del expected_action_plan['strategy'] - - m_get_efficacy_indicator_list.return_value = [efficacy_indicator] - action_plan = objects.ActionPlan.get_by_uuid( - self.context, uuid, eager=False) - action_plan.soft_delete() - - m_get_action_plan.assert_called_once_with( - self.context, uuid, eager=False) - m_get_efficacy_indicator_list.assert_called_once_with( - self.context, filters={"action_plan_uuid": uuid}, - limit=None, marker=None, sort_dir=None, sort_key=None) - m_soft_delete_action_plan.assert_called_once_with(uuid) - m_soft_delete_efficacy_indicator.assert_called_once_with( - efficacy_indicator['uuid']) - m_update_action_plan.assert_called_once_with( - uuid, {'state': objects.action_plan.State.DELETED}) - - self.assertEqual(self.context, action_plan._context) - self.assertEqual(expected_action_plan, action_plan.as_dict()) - - @mock.patch.multiple( - db_api.Connection, - get_action_plan_by_uuid=mock.DEFAULT, - destroy_action_plan=mock.DEFAULT, - get_efficacy_indicator_list=mock.DEFAULT, - destroy_efficacy_indicator=mock.DEFAULT, - ) - def test_destroy(self, get_action_plan_by_uuid, destroy_action_plan, - get_efficacy_indicator_list, destroy_efficacy_indicator): - m_get_action_plan = get_action_plan_by_uuid - m_destroy_action_plan = destroy_action_plan - m_get_efficacy_indicator_list = get_efficacy_indicator_list - m_destroy_efficacy_indicator = destroy_efficacy_indicator - efficacy_indicator = utils.get_test_efficacy_indicator( - action_plan_id=self.fake_action_plan['id']) - uuid = self.fake_action_plan['uuid'] - m_get_action_plan.return_value = self.fake_action_plan - m_get_efficacy_indicator_list.return_value = [efficacy_indicator] - action_plan = objects.ActionPlan.get_by_uuid(self.context, uuid) - action_plan.destroy() - - m_get_action_plan.assert_called_once_with( - self.context, uuid, eager=False) - m_get_efficacy_indicator_list.assert_called_once_with( - self.context, filters={"action_plan_uuid": uuid}, - limit=None, marker=None, sort_dir=None, sort_key=None) - m_destroy_action_plan.assert_called_once_with(uuid) - m_destroy_efficacy_indicator.assert_called_once_with( - efficacy_indicator['uuid']) - self.assertEqual(self.context, action_plan._context) - - -@mock.patch.object(notifications.action_plan, 'send_update', mock.Mock()) -class TestStateManager(base.DbTestCase): - - def setUp(self): - super(TestStateManager, self).setUp() - self.state_manager = objects.action_plan.StateManager() - - def test_check_expired(self): - CONF.set_default('action_plan_expiry', 0, - group='watcher_decision_engine') - strategy_1 = utils.create_test_strategy( - uuid=common_utils.generate_uuid()) - audit_1 = utils.create_test_audit( - uuid=common_utils.generate_uuid()) - action_plan_1 = utils.create_test_action_plan( - state=objects.action_plan.State.RECOMMENDED, - uuid=common_utils.generate_uuid(), - audit_id=audit_1.id, - strategy_id=strategy_1.id) - - self.state_manager.check_expired(self.context) - - action_plan = objects.action_plan.ActionPlan.get_by_uuid( - self.context, action_plan_1.uuid) - self.assertEqual(objects.action_plan.State.SUPERSEDED, - action_plan.state) diff --git a/watcher/tests/objects/test_audit.py b/watcher/tests/objects/test_audit.py deleted file mode 100644 index 0b01480..0000000 --- a/watcher/tests/objects/test_audit.py +++ /dev/null @@ -1,325 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import rpc -from watcher.common import utils as w_utils -from watcher.db.sqlalchemy import api as db_api -from watcher import notifications -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils -from watcher.tests.objects import utils as objutils - - -class TestAuditObject(base.DbTestCase): - - goal_id = 2 - - goal_data = utils.get_test_goal( - id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_audit=utils.get_test_audit( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_audit=utils.get_test_audit( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_audit=utils.get_test_audit( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id, goal=goal_data))), - ] - - def setUp(self): - super(TestAuditObject, self).setUp() - - p_audit_notifications = mock.patch.object( - notifications, 'audit', autospec=True) - self.m_audit_notifications = p_audit_notifications.start() - self.addCleanup(p_audit_notifications.stop) - self.m_send_update = self.m_audit_notifications.send_update - self.fake_goal = utils.create_test_goal(**self.goal_data) - - def eager_load_audit_assert(self, audit, goal): - if self.eager: - self.assertIsNotNone(audit.goal) - fields_to_check = set( - super(objects.Goal, objects.Goal).fields - ).symmetric_difference(objects.Goal.fields) - db_data = { - k: v for k, v in goal.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in audit.goal.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_audit_by_id') - def test_get_by_id(self, mock_get_audit): - mock_get_audit.return_value = self.fake_audit - audit_id = self.fake_audit['id'] - audit = objects.Audit.get(self.context, audit_id, eager=self.eager) - mock_get_audit.assert_called_once_with( - self.context, audit_id, eager=self.eager) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_get_by_uuid(self, mock_get_audit): - mock_get_audit.return_value = self.fake_audit - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get(self.context, uuid, eager=self.eager) - mock_get_audit.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - self.assertEqual(0, self.m_send_update.call_count) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Audit.get, self.context, - 'not-a-uuid', eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_audit_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_audit] - audits = objects.Audit.list(self.context, eager=self.eager) - mock_get_list.assert_called_once_with( - self.context, eager=self.eager, filters=None, limit=None, - marker=None, sort_dir=None, sort_key=None) - self.assertEqual(1, len(audits)) - self.assertIsInstance(audits[0], objects.Audit) - self.assertEqual(self.context, audits[0]._context) - for audit in audits: - self.eager_load_audit_assert(audit, self.fake_goal) - self.assertEqual(0, self.m_send_update.call_count) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_save(self, mock_get_audit, mock_update_audit): - mock_get_audit.return_value = self.fake_audit - fake_saved_audit = self.fake_audit.copy() - fake_saved_audit['state'] = objects.audit.State.SUCCEEDED - fake_saved_audit['updated_at'] = datetime.datetime.utcnow() - mock_update_audit.return_value = fake_saved_audit - - expected_audit = fake_saved_audit.copy() - expected_audit['created_at'] = expected_audit['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_audit['updated_at'] = expected_audit['updated_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=self.eager) - audit.state = objects.audit.State.SUCCEEDED - audit.save() - - mock_get_audit.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_audit.assert_called_once_with( - uuid, {'state': objects.audit.State.SUCCEEDED}) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - self.m_send_update.assert_called_once_with( - self.context, audit, old_state=self.fake_audit['state']) - self.assertEqual( - {k: v for k, v in expected_audit.items() - if k not in audit.object_fields}, - {k: v for k, v in audit.as_dict().items() - if k not in audit.object_fields}) - - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_refresh(self, mock_get_audit): - returns = [dict(self.fake_audit, state="first state"), - dict(self.fake_audit, state="second state")] - mock_get_audit.side_effect = returns - uuid = self.fake_audit['uuid'] - expected = [ - mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - audit = objects.Audit.get(self.context, uuid, eager=self.eager) - self.assertEqual("first state", audit.state) - audit.refresh(eager=self.eager) - self.assertEqual("second state", audit.state) - self.assertEqual(expected, mock_get_audit.call_args_list) - self.assertEqual(self.context, audit._context) - self.eager_load_audit_assert(audit, self.fake_goal) - - -class TestCreateDeleteAuditObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteAuditObject, self).setUp() - p_audit_notifications = mock.patch.object( - notifications, 'audit', autospec=True) - self.m_audit_notifications = p_audit_notifications.start() - self.addCleanup(p_audit_notifications.stop) - self.m_send_update = self.m_audit_notifications.send_update - - self.goal_id = 1 - self.goal = utils.create_test_goal(id=self.goal_id, name="DUMMY") - self.fake_audit = utils.get_test_audit( - goal_id=self.goal_id, created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_audit') - def test_create(self, mock_create_audit): - mock_create_audit.return_value = self.fake_audit - audit = objects.Audit(self.context, **self.fake_audit) - audit.create() - expected_audit = self.fake_audit.copy() - expected_audit['created_at'] = expected_audit['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - mock_create_audit.assert_called_once_with(expected_audit) - self.assertEqual(self.context, audit._context) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'soft_delete_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_soft_delete(self, mock_get_audit, - mock_soft_delete_audit, mock_update_audit): - mock_get_audit.return_value = self.fake_audit - fake_deleted_audit = self.fake_audit.copy() - fake_deleted_audit['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete_audit.return_value = fake_deleted_audit - mock_update_audit.return_value = fake_deleted_audit - - expected_audit = fake_deleted_audit.copy() - expected_audit['created_at'] = expected_audit['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - del expected_audit['goal'] - del expected_audit['strategy'] - - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=False) - audit.soft_delete() - mock_get_audit.assert_called_once_with(self.context, uuid, eager=False) - mock_soft_delete_audit.assert_called_once_with(uuid) - mock_update_audit.assert_called_once_with(uuid, {'state': 'DELETED'}) - self.assertEqual(self.context, audit._context) - self.assertEqual(expected_audit, audit.as_dict()) - - @mock.patch.object(db_api.Connection, 'destroy_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_destroy(self, mock_get_audit, - mock_destroy_audit): - mock_get_audit.return_value = self.fake_audit - uuid = self.fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid) - audit.destroy() - mock_get_audit.assert_called_once_with( - self.context, uuid, eager=False) - mock_destroy_audit.assert_called_once_with(uuid) - self.assertEqual(self.context, audit._context) - - -class TestAuditObjectSendNotifications(base.DbTestCase): - - def setUp(self): - super(TestAuditObjectSendNotifications, self).setUp() - goal_id = 1 - self.fake_goal = utils.create_test_goal(id=goal_id, name="DUMMY") - self.fake_strategy = utils.create_test_strategy( - id=goal_id, name="DUMMY") - self.fake_audit = utils.get_test_audit( - goal_id=goal_id, goal=utils.get_test_goal(id=goal_id), - strategy_id=self.fake_strategy.id, strategy=self.fake_strategy) - - p_get_notifier = mock.patch.object(rpc, 'get_notifier') - self.m_get_notifier = p_get_notifier.start() - self.m_get_notifier.return_value = mock.Mock(name='m_notifier') - self.m_notifier = self.m_get_notifier.return_value - self.addCleanup(p_get_notifier.stop) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_send_update_notification(self, m_get_audit, m_update_audit): - fake_audit = utils.get_test_audit( - goal=self.fake_goal.as_dict(), - strategy_id=self.fake_strategy.id, - strategy=self.fake_strategy.as_dict()) - m_get_audit.return_value = fake_audit - fake_saved_audit = self.fake_audit.copy() - fake_saved_audit['state'] = objects.audit.State.SUCCEEDED - m_update_audit.return_value = fake_saved_audit - uuid = fake_audit['uuid'] - - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) - audit.state = objects.audit.State.ONGOING - audit.save() - - self.assertEqual(1, self.m_notifier.info.call_count) - self.assertEqual('audit.update', - self.m_notifier.info.call_args[1]['event_type']) - - @mock.patch.object(db_api.Connection, 'create_audit') - def test_send_create_notification(self, m_create_audit): - audit = objutils.get_test_audit( - self.context, - id=1, - goal_id=self.fake_goal.id, - strategy_id=self.fake_strategy.id, - goal=self.fake_goal.as_dict(), - strategy=self.fake_strategy.as_dict()) - m_create_audit.return_value = audit - audit.create() - - self.assertEqual(1, self.m_notifier.info.call_count) - self.assertEqual('audit.create', - self.m_notifier.info.call_args[1]['event_type']) - - @mock.patch.object(db_api.Connection, 'update_audit') - @mock.patch.object(db_api.Connection, 'soft_delete_audit') - @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') - def test_send_delete_notification( - self, m_get_audit, m_soft_delete_audit, m_update_audit): - fake_audit = utils.get_test_audit( - goal=self.fake_goal.as_dict(), - strategy_id=self.fake_strategy.id, - strategy=self.fake_strategy.as_dict()) - m_get_audit.return_value = fake_audit - fake_deleted_audit = self.fake_audit.copy() - fake_deleted_audit['deleted_at'] = datetime.datetime.utcnow() - expected_audit = fake_deleted_audit.copy() - expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - m_soft_delete_audit.return_value = fake_deleted_audit - m_update_audit.return_value = fake_deleted_audit - uuid = fake_audit['uuid'] - audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) - audit.soft_delete() - - self.assertEqual(2, self.m_notifier.info.call_count) - self.assertEqual( - 'audit.update', - self.m_notifier.info.call_args_list[0][1]['event_type']) - self.assertEqual( - 'audit.delete', - self.m_notifier.info.call_args_list[1][1]['event_type']) diff --git a/watcher/tests/objects/test_audit_template.py b/watcher/tests/objects/test_audit_template.py deleted file mode 100644 index 14da9f0..0000000 --- a/watcher/tests/objects/test_audit_template.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.common import exception -from watcher.common import utils as w_utils -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestAuditTemplateObject(base.DbTestCase): - - goal_id = 1 - - goal_data = utils.get_test_goal( - id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") - - scenarios = [ - ('non_eager', dict( - eager=False, - fake_audit_template=utils.get_test_audit_template( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_non_eager_load', dict( - eager=True, - fake_audit_template=utils.get_test_audit_template( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id))), - ('eager_with_eager_load', dict( - eager=True, - fake_audit_template=utils.get_test_audit_template( - created_at=datetime.datetime.utcnow(), - goal_id=goal_id, goal=goal_data))), - ] - - def setUp(self): - super(TestAuditTemplateObject, self).setUp() - self.fake_goal = utils.create_test_goal(**self.goal_data) - - def eager_load_audit_template_assert(self, audit_template, goal): - if self.eager: - self.assertIsNotNone(audit_template.goal) - fields_to_check = set( - super(objects.Goal, objects.Goal).fields - ).symmetric_difference(objects.Goal.fields) - db_data = { - k: v for k, v in goal.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in audit_template.goal.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_id') - def test_get_by_id(self, mock_get_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - audit_template_id = self.fake_audit_template['id'] - audit_template = objects.AuditTemplate.get( - self.context, audit_template_id, eager=self.eager) - mock_get_audit_template.assert_called_once_with( - self.context, audit_template_id, eager=self.eager) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_get_by_uuid(self, mock_get_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get( - self.context, uuid, eager=self.eager) - mock_get_audit_template.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_name') - def test_get_by_name(self, mock_get_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - name = self.fake_audit_template['name'] - audit_template = objects.AuditTemplate.get_by_name( - self.context, name, eager=self.eager) - mock_get_audit_template.assert_called_once_with( - self.context, name, eager=self.eager) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - def test_get_bad_id_and_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.AuditTemplate.get, - self.context, 'not-a-uuid', eager=self.eager) - - @mock.patch.object(db_api.Connection, 'get_audit_template_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_audit_template] - audit_templates = objects.AuditTemplate.list( - self.context, eager=self.eager) - mock_get_list.assert_called_once_with( - self.context, eager=self.eager, filters=None, limit=None, - marker=None, sort_dir=None, sort_key=None) - self.assertEqual(1, len(audit_templates)) - self.assertIsInstance(audit_templates[0], objects.AuditTemplate) - self.assertEqual(self.context, audit_templates[0]._context) - for audit_template in audit_templates: - self.eager_load_audit_template_assert( - audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'update_audit_template') - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_save(self, mock_get_audit_template, mock_update_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - fake_saved_audit_template = self.fake_audit_template.copy() - fake_saved_audit_template['updated_at'] = datetime.datetime.utcnow() - mock_update_audit_template.return_value = fake_saved_audit_template - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get_by_uuid( - self.context, uuid, eager=self.eager) - audit_template.goal_id = self.fake_goal.id - audit_template.save() - - mock_get_audit_template.assert_called_once_with( - self.context, uuid, eager=self.eager) - mock_update_audit_template.assert_called_once_with( - uuid, {'goal_id': self.fake_goal.id}) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_refresh(self, mock_get_audit_template): - returns = [dict(self.fake_audit_template, name="first name"), - dict(self.fake_audit_template, name="second name")] - mock_get_audit_template.side_effect = returns - uuid = self.fake_audit_template['uuid'] - expected = [mock.call(self.context, uuid, eager=self.eager), - mock.call(self.context, uuid, eager=self.eager)] - audit_template = objects.AuditTemplate.get( - self.context, uuid, eager=self.eager) - self.assertEqual("first name", audit_template.name) - audit_template.refresh(eager=self.eager) - self.assertEqual("second name", audit_template.name) - self.assertEqual(expected, mock_get_audit_template.call_args_list) - self.assertEqual(self.context, audit_template._context) - self.eager_load_audit_template_assert(audit_template, self.fake_goal) - - -class TestCreateDeleteAuditTemplateObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteAuditTemplateObject, self).setUp() - self.fake_audit_template = utils.get_test_audit_template( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'create_audit_template') - def test_create(self, mock_create_audit_template): - goal = utils.create_test_goal() - self.fake_audit_template['goal_id'] = goal.id - mock_create_audit_template.return_value = self.fake_audit_template - audit_template = objects.AuditTemplate( - self.context, **self.fake_audit_template) - audit_template.create() - expected_audit_template = self.fake_audit_template.copy() - expected_audit_template['created_at'] = expected_audit_template[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - mock_create_audit_template.assert_called_once_with( - expected_audit_template) - self.assertEqual(self.context, audit_template._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_audit_template') - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_soft_delete(self, m_get_audit_template, - m_soft_delete_audit_template): - m_get_audit_template.return_value = self.fake_audit_template - fake_deleted_audit_template = self.fake_audit_template.copy() - fake_deleted_audit_template['deleted_at'] = datetime.datetime.utcnow() - m_soft_delete_audit_template.return_value = fake_deleted_audit_template - - expected_audit_template = fake_deleted_audit_template.copy() - expected_audit_template['created_at'] = expected_audit_template[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_audit_template['deleted_at'] = expected_audit_template[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - del expected_audit_template['goal'] - del expected_audit_template['strategy'] - - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) - audit_template.soft_delete() - m_get_audit_template.assert_called_once_with( - self.context, uuid, eager=False) - m_soft_delete_audit_template.assert_called_once_with(uuid) - self.assertEqual(self.context, audit_template._context) - self.assertEqual(expected_audit_template, audit_template.as_dict()) - - @mock.patch.object(db_api.Connection, 'destroy_audit_template') - @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') - def test_destroy(self, mock_get_audit_template, - mock_destroy_audit_template): - mock_get_audit_template.return_value = self.fake_audit_template - uuid = self.fake_audit_template['uuid'] - audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) - audit_template.destroy() - mock_get_audit_template.assert_called_once_with( - self.context, uuid, eager=False) - mock_destroy_audit_template.assert_called_once_with(uuid) - self.assertEqual(self.context, audit_template._context) diff --git a/watcher/tests/objects/test_efficacy_indicator.py b/watcher/tests/objects/test_efficacy_indicator.py deleted file mode 100644 index 1e5bfb4..0000000 --- a/watcher/tests/objects/test_efficacy_indicator.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from watcher.common import exception -# from watcher.common import utils as w_utils -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestEfficacyIndicatorObject(base.DbTestCase): - - def setUp(self): - super(TestEfficacyIndicatorObject, self).setUp() - self.fake_efficacy_indicator = utils.get_test_efficacy_indicator() - - def test_get_by_id(self): - efficacy_indicator_id = self.fake_efficacy_indicator['id'] - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_id', - autospec=True) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - efficacy_indicator = objects.EfficacyIndicator.get( - self.context, efficacy_indicator_id) - mock_get_efficacy_indicator.assert_called_once_with( - self.context, efficacy_indicator_id) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_get_by_uuid(self): - uuid = self.fake_efficacy_indicator['uuid'] - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', - autospec=True) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - efficacy_indicator = objects.EfficacyIndicator.get( - self.context, uuid) - mock_get_efficacy_indicator.assert_called_once_with( - self.context, uuid) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_get_bad_id_and_uuid(self): - self.assertRaises( - exception.InvalidIdentity, - objects.EfficacyIndicator.get, self.context, 'not-a-uuid') - - def test_list(self): - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_list', - autospec=True) as mock_get_list: - mock_get_list.return_value = [self.fake_efficacy_indicator] - efficacy_indicators = objects.EfficacyIndicator.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(efficacy_indicators)) - self.assertIsInstance( - efficacy_indicators[0], objects.EfficacyIndicator) - self.assertEqual(self.context, efficacy_indicators[0]._context) - - def test_create(self): - with mock.patch.object( - self.dbapi, 'create_efficacy_indicator', - autospec=True - ) as mock_create_efficacy_indicator: - mock_create_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - efficacy_indicator = objects.EfficacyIndicator( - self.context, **self.fake_efficacy_indicator) - - efficacy_indicator.create() - mock_create_efficacy_indicator.assert_called_once_with( - self.fake_efficacy_indicator) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_destroy(self): - uuid = self.fake_efficacy_indicator['uuid'] - with mock.patch.object( - self.dbapi, 'get_efficacy_indicator_by_uuid', - autospec=True - ) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - with mock.patch.object( - self.dbapi, 'destroy_efficacy_indicator', - autospec=True - ) as mock_destroy_efficacy_indicator: - efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( - self.context, uuid) - efficacy_indicator.destroy() - mock_get_efficacy_indicator.assert_called_once_with( - self.context, uuid) - mock_destroy_efficacy_indicator.assert_called_once_with(uuid) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_save(self): - uuid = self.fake_efficacy_indicator['uuid'] - with mock.patch.object( - self.dbapi, 'get_efficacy_indicator_by_uuid', - autospec=True - ) as mock_get_efficacy_indicator: - mock_get_efficacy_indicator.return_value = ( - self.fake_efficacy_indicator) - with mock.patch.object( - self.dbapi, 'update_efficacy_indicator', - autospec=True - ) as mock_update_efficacy_indicator: - efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( - self.context, uuid) - efficacy_indicator.description = 'Indicator Description' - efficacy_indicator.save() - - mock_get_efficacy_indicator.assert_called_once_with( - self.context, uuid) - mock_update_efficacy_indicator.assert_called_once_with( - uuid, {'description': 'Indicator Description'}) - self.assertEqual(self.context, efficacy_indicator._context) - - def test_refresh(self): - uuid = self.fake_efficacy_indicator['uuid'] - returns = [dict(self.fake_efficacy_indicator, - description="first description"), - dict(self.fake_efficacy_indicator, - description="second description")] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', - side_effect=returns, - autospec=True) as mock_get_efficacy_indicator: - efficacy_indicator = objects.EfficacyIndicator.get( - self.context, uuid) - self.assertEqual( - "first description", efficacy_indicator.description) - efficacy_indicator.refresh() - self.assertEqual( - "second description", efficacy_indicator.description) - self.assertEqual( - expected, mock_get_efficacy_indicator.call_args_list) - self.assertEqual(self.context, efficacy_indicator._context) diff --git a/watcher/tests/objects/test_goal.py b/watcher/tests/objects/test_goal.py deleted file mode 100644 index 26c6951..0000000 --- a/watcher/tests/objects/test_goal.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestGoalObject(base.DbTestCase): - - def setUp(self): - super(TestGoalObject, self).setUp() - self.fake_goal = utils.get_test_goal( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'get_goal_by_id') - def test_get_by_id(self, mock_get_goal): - goal_id = self.fake_goal['id'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get(self.context, goal_id) - mock_get_goal.assert_called_once_with(self.context, goal_id) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_get_by_uuid(self, mock_get_goal): - uuid = self.fake_goal['uuid'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get(self.context, uuid) - mock_get_goal.assert_called_once_with(self.context, uuid) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_by_name') - def test_get_by_name(self, mock_get_goal): - name = self.fake_goal['name'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get_by_name(self.context, name) - mock_get_goal.assert_called_once_with(self.context, name) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_goal] - goals = objects.Goal.list(self.context) - self.assertEqual(1, mock_get_list.call_count) - self.assertEqual(1, len(goals)) - self.assertIsInstance(goals[0], objects.Goal) - self.assertEqual(self.context, goals[0]._context) - - @mock.patch.object(db_api.Connection, 'create_goal') - def test_create(self, mock_create_goal): - mock_create_goal.return_value = self.fake_goal - goal = objects.Goal(self.context, **self.fake_goal) - goal.create() - expected_goal = self.fake_goal.copy() - expected_goal['created_at'] = expected_goal['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - mock_create_goal.assert_called_once_with(expected_goal) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'destroy_goal') - @mock.patch.object(db_api.Connection, 'get_goal_by_id') - def test_destroy(self, mock_get_goal, mock_destroy_goal): - goal_id = self.fake_goal['id'] - mock_get_goal.return_value = self.fake_goal - goal = objects.Goal.get_by_id(self.context, goal_id) - goal.destroy() - mock_get_goal.assert_called_once_with( - self.context, goal_id) - mock_destroy_goal.assert_called_once_with(goal_id) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'update_goal') - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_save(self, mock_get_goal, mock_update_goal): - mock_get_goal.return_value = self.fake_goal - goal_uuid = self.fake_goal['uuid'] - fake_saved_goal = self.fake_goal.copy() - fake_saved_goal['updated_at'] = datetime.datetime.utcnow() - mock_update_goal.return_value = fake_saved_goal - - goal = objects.Goal.get_by_uuid(self.context, goal_uuid) - goal.display_name = 'DUMMY' - goal.save() - - mock_get_goal.assert_called_once_with(self.context, goal_uuid) - mock_update_goal.assert_called_once_with( - goal_uuid, {'display_name': 'DUMMY'}) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_refresh(self, mock_get_goal): - fake_goal2 = utils.get_test_goal(name="BALANCE_LOAD") - returns = [self.fake_goal, fake_goal2] - mock_get_goal.side_effect = returns - uuid = self.fake_goal['uuid'] - expected = [mock.call(self.context, uuid), - mock.call(self.context, uuid)] - goal = objects.Goal.get(self.context, uuid) - self.assertEqual("TEST", goal.name) - goal.refresh() - self.assertEqual("BALANCE_LOAD", goal.name) - self.assertEqual(expected, mock_get_goal.call_args_list) - self.assertEqual(self.context, goal._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_goal') - @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') - def test_soft_delete(self, mock_get_goal, mock_soft_delete_goal): - mock_get_goal.return_value = self.fake_goal - fake_deleted_goal = self.fake_goal.copy() - fake_deleted_goal['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete_goal.return_value = fake_deleted_goal - - expected_goal = fake_deleted_goal.copy() - expected_goal['created_at'] = expected_goal['created_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - expected_goal['deleted_at'] = expected_goal['deleted_at'].replace( - tzinfo=iso8601.iso8601.Utc()) - - uuid = self.fake_goal['uuid'] - goal = objects.Goal.get_by_uuid(self.context, uuid) - goal.soft_delete() - mock_get_goal.assert_called_once_with(self.context, uuid) - mock_soft_delete_goal.assert_called_once_with(uuid) - self.assertEqual(self.context, goal._context) - self.assertEqual(expected_goal, goal.as_dict()) diff --git a/watcher/tests/objects/test_objects.py b/watcher/tests/objects/test_objects.py deleted file mode 100644 index cc61f46..0000000 --- a/watcher/tests/objects/test_objects.py +++ /dev/null @@ -1,562 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import datetime -import gettext -import iso8601 - -import mock -from oslo_versionedobjects import base as object_base -from oslo_versionedobjects import exception as object_exception -from oslo_versionedobjects import fixture as object_fixture -import six - -from watcher.common import context -from watcher.objects import base -from watcher.objects import fields -from watcher.tests import base as test_base - -gettext.install('watcher') - - -@base.WatcherObjectRegistry.register -class MyObj(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - VERSION = '1.5' - - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField(), - 'missing': fields.StringField()} - - def obj_load_attr(self, attrname): - setattr(self, attrname, 'loaded!') - - @object_base.remotable_classmethod - def query(cls, context): - obj = cls(context) - obj.foo = 1 - obj.bar = 'bar' - obj.obj_reset_changes() - return obj - - @object_base.remotable - def marco(self, context=None): - return 'polo' - - @object_base.remotable - def update_test(self, context=None): - if context and context.user == 'alternate': - self.bar = 'alternate-context' - else: - self.bar = 'updated' - - @object_base.remotable - def save(self, context=None): - self.obj_reset_changes() - - @object_base.remotable - def refresh(self, context=None): - self.foo = 321 - self.bar = 'refreshed' - self.obj_reset_changes() - - @object_base.remotable - def modify_save_modify(self, context=None): - self.bar = 'meow' - self.save() - self.foo = 42 - - -class MyObj2(object): - @classmethod - def obj_name(cls): - return 'MyObj' - - @object_base.remotable_classmethod - def get(cls, *args, **kwargs): - pass - - -@base.WatcherObjectRegistry.register_if(False) -class WatcherTestSubclassedObject(MyObj): - fields = {'new_field': fields.StringField()} - - -class _LocalTest(test_base.TestCase): - def setUp(self): - super(_LocalTest, self).setUp() - # Just in case - base.WatcherObject.indirection_api = None - - -@contextlib.contextmanager -def things_temporarily_local(): - # Temporarily go non-remote so the conductor handles - # this request directly - _api = base.WatcherObject.indirection_api - base.WatcherObject.indirection_api = None - yield - base.WatcherObject.indirection_api = _api - - -class _TestObject(object): - def test_hydration_type_error(self): - primitive = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 'a'}} - self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) - - def test_hydration(self): - primitive = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 1}} - obj = MyObj.obj_from_primitive(primitive) - self.assertEqual(1, obj.foo) - - def test_hydration_bad_ns(self): - primitive = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'foo', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 1}} - self.assertRaises(object_exception.UnsupportedObjectError, - MyObj.obj_from_primitive, primitive) - - def test_dehydration(self): - expected = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.data': {'foo': 1}} - obj = MyObj(self.context) - obj.foo = 1 - obj.obj_reset_changes() - self.assertEqual(expected, obj.obj_to_primitive()) - - def test_get_updates(self): - obj = MyObj(self.context) - self.assertEqual({}, obj.obj_get_changes()) - obj.foo = 123 - self.assertEqual({'foo': 123}, obj.obj_get_changes()) - obj.bar = 'test' - self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) - obj.obj_reset_changes() - self.assertEqual({}, obj.obj_get_changes()) - - def test_object_property(self): - obj = MyObj(self.context, foo=1) - self.assertEqual(1, obj.foo) - - def test_object_property_type_error(self): - obj = MyObj(self.context) - - def fail(): - obj.foo = 'a' - self.assertRaises(ValueError, fail) - - def test_load(self): - obj = MyObj(self.context) - self.assertEqual('loaded!', obj.bar) - - def test_load_in_base(self): - @base.WatcherObjectRegistry.register_if(False) - class Foo(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - fields = {'foobar': fields.IntegerField()} - obj = Foo(self.context) - - self.assertRaisesRegex( - NotImplementedError, "Cannot load 'foobar' in the base class", - getattr, obj, 'foobar') - - def test_loaded_in_primitive(self): - obj = MyObj(self.context) - obj.foo = 1 - obj.obj_reset_changes() - self.assertEqual('loaded!', obj.bar) - expected = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.changes': ['bar'], - 'watcher_object.data': {'foo': 1, - 'bar': 'loaded!'}} - self.assertEqual(expected, obj.obj_to_primitive()) - - def test_changes_in_primitive(self): - obj = MyObj(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - primitive = obj.obj_to_primitive() - self.assertIn('watcher_object.changes', primitive) - obj2 = MyObj.obj_from_primitive(primitive) - self.assertEqual(set(['foo']), obj2.obj_what_changed()) - obj2.obj_reset_changes() - self.assertEqual(set(), obj2.obj_what_changed()) - - def test_unknown_objtype(self): - self.assertRaises(object_exception.UnsupportedObjectError, - base.WatcherObject.obj_class_from_name, 'foo', '1.0') - - def test_with_alternate_context(self): - ctxt1 = context.RequestContext('foo', 'foo') - ctxt2 = context.RequestContext(user='alternate') - obj = MyObj.query(ctxt1) - obj.update_test(ctxt2) - self.assertEqual('alternate-context', obj.bar) - - def test_orphaned_object(self): - obj = MyObj.query(self.context) - obj._context = None - self.assertRaises(object_exception.OrphanedObjectError, - obj.update_test) - - def test_changed_1(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.update_test(self.context) - self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) - self.assertEqual(123, obj.foo) - - def test_changed_2(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.save() - self.assertEqual(set([]), obj.obj_what_changed()) - self.assertEqual(123, obj.foo) - - def test_changed_3(self): - obj = MyObj.query(self.context) - obj.foo = 123 - self.assertEqual(set(['foo']), obj.obj_what_changed()) - obj.refresh() - self.assertEqual(set([]), obj.obj_what_changed()) - self.assertEqual(321, obj.foo) - self.assertEqual('refreshed', obj.bar) - - def test_changed_4(self): - obj = MyObj.query(self.context) - obj.bar = 'something' - self.assertEqual(set(['bar']), obj.obj_what_changed()) - obj.modify_save_modify(self.context) - self.assertEqual(set(['foo']), obj.obj_what_changed()) - self.assertEqual(42, obj.foo) - self.assertEqual('meow', obj.bar) - - def test_static_result(self): - obj = MyObj.query(self.context) - self.assertEqual('bar', obj.bar) - result = obj.marco() - self.assertEqual('polo', result) - - def test_updates(self): - obj = MyObj.query(self.context) - self.assertEqual(1, obj.foo) - obj.update_test() - self.assertEqual('updated', obj.bar) - - def test_base_attributes(self): - dt = datetime.datetime(1955, 11, 5, 0, 0, tzinfo=iso8601.iso8601.Utc()) - datatime = fields.DateTimeField() - obj = MyObj(self.context) - obj.created_at = dt - obj.updated_at = dt - expected = {'watcher_object.name': 'MyObj', - 'watcher_object.namespace': 'watcher', - 'watcher_object.version': '1.5', - 'watcher_object.changes': - ['created_at', 'updated_at'], - 'watcher_object.data': - {'created_at': datatime.stringify(dt), - 'updated_at': datatime.stringify(dt), - } - } - actual = obj.obj_to_primitive() - # watcher_object.changes is built from a set and order is undefined - self.assertEqual(sorted(expected['watcher_object.changes']), - sorted(actual['watcher_object.changes'])) - del expected[ - 'watcher_object.changes'], actual['watcher_object.changes'] - self.assertEqual(expected, actual) - - def test_contains(self): - obj = MyObj(self.context) - self.assertNotIn('foo', obj) - obj.foo = 1 - self.assertIn('foo', obj) - self.assertNotIn('does_not_exist', obj) - - def test_obj_attr_is_set(self): - obj = MyObj(self.context, foo=1) - self.assertTrue(obj.obj_attr_is_set('foo')) - self.assertFalse(obj.obj_attr_is_set('bar')) - self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') - - def test_get(self): - obj = MyObj(self.context, foo=1) - # Foo has value, should not get the default - self.assertEqual(obj.get('foo', 2), 1) - # Foo has value, should return the value without error - self.assertEqual(obj.get('foo'), 1) - # Bar is not loaded, so we should get the default - self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') - # Bar without a default should lazy-load - self.assertEqual(obj.get('bar'), 'loaded!') - # Bar now has a default, but loaded value should be returned - self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') - # Invalid attribute should raise AttributeError - self.assertRaises(AttributeError, obj.get, 'nothing') - # ...even with a default - self.assertRaises(AttributeError, obj.get, 'nothing', 3) - - def test_object_inheritance(self): - base_fields = ( - list(base.WatcherObject.fields) + - list(base.WatcherPersistentObject.fields)) - myobj_fields = ['foo', 'bar', 'missing'] + base_fields - myobj3_fields = ['new_field'] - self.assertTrue(issubclass(WatcherTestSubclassedObject, MyObj)) - self.assertEqual(len(myobj_fields), len(MyObj.fields)) - self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) - self.assertEqual(len(myobj_fields) + len(myobj3_fields), - len(WatcherTestSubclassedObject.fields)) - self.assertEqual(set(myobj_fields) | set(myobj3_fields), - set(WatcherTestSubclassedObject.fields.keys())) - - def test_get_changes(self): - obj = MyObj(self.context) - self.assertEqual({}, obj.obj_get_changes()) - obj.foo = 123 - self.assertEqual({'foo': 123}, obj.obj_get_changes()) - obj.bar = 'test' - self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) - obj.obj_reset_changes() - self.assertEqual({}, obj.obj_get_changes()) - - def test_obj_fields(self): - @base.WatcherObjectRegistry.register_if(False) - class TestObj(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - fields = {'foo': fields.IntegerField()} - obj_extra_fields = ['bar'] - - @property - def bar(self): - return 'this is bar' - - obj = TestObj(self.context) - self.assertEqual(set(['created_at', 'updated_at', 'deleted_at', - 'foo', 'bar']), - set(obj.obj_fields)) - - def test_refresh_object(self): - @base.WatcherObjectRegistry.register_if(False) - class TestObj(base.WatcherPersistentObject, base.WatcherObject, - base.WatcherObjectDictCompat): - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField()} - - obj = TestObj(self.context) - current_obj = TestObj(self.context) - obj.foo = 10 - obj.bar = 'obj.bar' - current_obj.foo = 2 - current_obj.bar = 'current.bar' - obj.obj_refresh(current_obj) - self.assertEqual(obj.foo, 2) - self.assertEqual(obj.bar, 'current.bar') - - def test_obj_constructor(self): - obj = MyObj(self.context, foo=123, bar='abc') - self.assertEqual(123, obj.foo) - self.assertEqual('abc', obj.bar) - self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) - - def test_assign_value_without_DictCompat(self): - class TestObj(base.WatcherObject): - fields = {'foo': fields.IntegerField(), - 'bar': fields.StringField()} - obj = TestObj(self.context) - obj.foo = 10 - err_message = '' - try: - obj['bar'] = 'value' - except TypeError as e: - err_message = six.text_type(e) - finally: - self.assertIn("'TestObj' object does not support item assignment", - err_message) - - -class TestObject(_LocalTest, _TestObject): - pass - - -# The hashes are help developers to check if the change of objects need a -# version bump. It is md5 hash of object fields and remotable methods. -# The fingerprint values should only be changed if there is a version bump. -expected_object_fingerprints = { - 'Goal': '1.0-93881622db05e7b67a65ca885b4a022e', - 'Strategy': '1.1-73f164491bdd4c034f48083a51bdeb7b', - 'AuditTemplate': '1.1-b291973ffc5efa2c61b24fe34fdccc0b', - 'Audit': '1.3-f47ffb1ee79d8248eb991674bda565ce', - 'ActionPlan': '2.0-394f1abbf5d73d7b6675a118fe1a0284', - 'Action': '2.0-1dd4959a7e7ac30c62ef170fe08dd935', - 'EfficacyIndicator': '1.0-655b71234a82bc7478aff964639c4bb0', - 'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576', - 'Service': '1.0-4b35b99ada9677a882c9de2b30212f35', - 'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3', -} - - -def get_watcher_objects(): - """Get Watcher versioned objects - - This returns a dict of versioned objects which are - in the Watcher project namespace only. ie excludes - objects from os-vif and other 3rd party modules - :return: a dict mapping class names to lists of versioned objects - """ - all_classes = base.WatcherObjectRegistry.obj_classes() - watcher_classes = {} - for name in all_classes: - objclasses = all_classes[name] - if (objclasses[0].OBJ_PROJECT_NAMESPACE != - base.WatcherObject.OBJ_PROJECT_NAMESPACE): - continue - watcher_classes[name] = objclasses - return watcher_classes - - -class TestObjectVersions(test_base.TestCase): - - def test_object_version_check(self): - classes = base.WatcherObjectRegistry.obj_classes() - checker = object_fixture.ObjectVersionChecker(obj_classes=classes) - # Compute the difference between actual fingerprints and - # expect fingerprints. expect = actual = {} if there is no change. - expect, actual = checker.test_hashes(expected_object_fingerprints) - self.assertEqual(expect, actual, - "Some objects fields or remotable methods have been " - "modified. Please make sure the version of those " - "objects have been bumped and then update " - "expected_object_fingerprints with the new hashes. ") - - -class TestObjectSerializer(test_base.TestCase): - - def test_object_serialization(self): - ser = base.WatcherObjectSerializer() - obj = MyObj(self.context) - primitive = ser.serialize_entity(self.context, obj) - self.assertIn('watcher_object.name', primitive) - obj2 = ser.deserialize_entity(self.context, primitive) - self.assertIsInstance(obj2, MyObj) - self.assertEqual(self.context, obj2._context) - - def test_object_serialization_iterables(self): - ser = base.WatcherObjectSerializer() - obj = MyObj(self.context) - for iterable in (list, tuple, set): - thing = iterable([obj]) - primitive = ser.serialize_entity(self.context, thing) - self.assertEqual(1, len(primitive)) - for item in primitive: - self.assertFalse(isinstance(item, base.WatcherObject)) - thing2 = ser.deserialize_entity(self.context, primitive) - self.assertEqual(1, len(thing2)) - for item in thing2: - self.assertIsInstance(item, MyObj) - - @mock.patch('watcher.objects.base.WatcherObject.indirection_api') - def _test_deserialize_entity_newer(self, obj_version, backported_to, - mock_indirection_api, - my_version='1.6'): - ser = base.WatcherObjectSerializer() - mock_indirection_api.object_backport_versions.return_value \ - = 'backported' - - @base.WatcherObjectRegistry.register - class MyTestObj(MyObj): - VERSION = my_version - - obj = MyTestObj(self.context) - obj.VERSION = obj_version - primitive = obj.obj_to_primitive() - result = ser.deserialize_entity(self.context, primitive) - if backported_to is None: - self.assertFalse( - mock_indirection_api.object_backport_versions.called) - else: - self.assertEqual('backported', result) - versions = object_base.obj_tree_get_versions('MyTestObj') - mock_indirection_api.object_backport_versions.assert_called_with( - self.context, primitive, versions) - - def test_deserialize_entity_newer_version_backports(self): - "Test object with unsupported (newer) version" - self._test_deserialize_entity_newer('1.25', '1.6') - - def test_deserialize_entity_same_revision_does_not_backport(self): - "Test object with supported revision" - self._test_deserialize_entity_newer('1.6', None) - - def test_deserialize_entity_newer_revision_does_not_backport_zero(self): - "Test object with supported revision" - self._test_deserialize_entity_newer('1.6.0', None) - - def test_deserialize_entity_newer_revision_does_not_backport(self): - "Test object with supported (newer) revision" - self._test_deserialize_entity_newer('1.6.1', None) - - def test_deserialize_entity_newer_version_passes_revision(self): - "Test object with unsupported (newer) version and revision" - self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') - - -class TestRegistry(test_base.TestCase): - - @mock.patch('watcher.objects.base.objects') - def test_hook_chooses_newer_properly(self, mock_objects): - reg = base.WatcherObjectRegistry() - reg.registration_hook(MyObj, 0) - - class MyNewerObj(object): - VERSION = '1.123' - - @classmethod - def obj_name(cls): - return 'MyObj' - - self.assertEqual(MyObj, mock_objects.MyObj) - reg.registration_hook(MyNewerObj, 0) - self.assertEqual(MyNewerObj, mock_objects.MyObj) - - @mock.patch('watcher.objects.base.objects') - def test_hook_keeps_newer_properly(self, mock_objects): - reg = base.WatcherObjectRegistry() - reg.registration_hook(MyObj, 0) - - class MyOlderObj(object): - VERSION = '1.1' - - @classmethod - def obj_name(cls): - return 'MyObj' - - self.assertEqual(MyObj, mock_objects.MyObj) - reg.registration_hook(MyOlderObj, 0) - self.assertEqual(MyObj, mock_objects.MyObj) diff --git a/watcher/tests/objects/test_scoring_engine.py b/watcher/tests/objects/test_scoring_engine.py deleted file mode 100644 index e27fbcf..0000000 --- a/watcher/tests/objects/test_scoring_engine.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2016 Intel -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestScoringEngineObject(base.DbTestCase): - - def setUp(self): - super(TestScoringEngineObject, self).setUp() - self.fake_scoring_engine = utils.get_test_scoring_engine( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_get_by_id(self, mock_get_scoring_engine): - scoring_engine_id = self.fake_scoring_engine['id'] - mock_get_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine.get_by_id( - self.context, scoring_engine_id) - mock_get_scoring_engine.assert_called_once_with( - self.context, scoring_engine_id) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') - def test_get_by_uuid(self, mock_get_scoring_engine): - se_uuid = self.fake_scoring_engine['uuid'] - mock_get_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine.get_by_uuid( - self.context, se_uuid) - mock_get_scoring_engine.assert_called_once_with( - self.context, se_uuid) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') - def test_get_by_name(self, mock_get_scoring_engine): - scoring_engine_uuid = self.fake_scoring_engine['uuid'] - mock_get_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine.get( - self.context, scoring_engine_uuid) - mock_get_scoring_engine.assert_called_once_with( - self.context, scoring_engine_uuid) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_scoring_engine] - scoring_engines = objects.ScoringEngine.list(self.context) - self.assertEqual(1, mock_get_list.call_count, 1) - self.assertEqual(1, len(scoring_engines)) - self.assertIsInstance(scoring_engines[0], objects.ScoringEngine) - self.assertEqual(self.context, scoring_engines[0]._context) - - @mock.patch.object(db_api.Connection, 'create_scoring_engine') - def test_create(self, mock_create_scoring_engine): - mock_create_scoring_engine.return_value = self.fake_scoring_engine - scoring_engine = objects.ScoringEngine( - self.context, **self.fake_scoring_engine) - scoring_engine.create() - expected_scoring_engine = self.fake_scoring_engine.copy() - expected_scoring_engine['created_at'] = expected_scoring_engine[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - mock_create_scoring_engine.assert_called_once_with( - expected_scoring_engine) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'destroy_scoring_engine') - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_destroy(self, mock_get_scoring_engine, - mock_destroy_scoring_engine): - mock_get_scoring_engine.return_value = self.fake_scoring_engine - _id = self.fake_scoring_engine['id'] - scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) - scoring_engine.destroy() - mock_get_scoring_engine.assert_called_once_with(self.context, _id) - mock_destroy_scoring_engine.assert_called_once_with(_id) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'update_scoring_engine') - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') - def test_save(self, mock_get_scoring_engine, mock_update_scoring_engine): - mock_get_scoring_engine.return_value = self.fake_scoring_engine - fake_saved_scoring_engine = self.fake_scoring_engine.copy() - fake_saved_scoring_engine['updated_at'] = datetime.datetime.utcnow() - mock_update_scoring_engine.return_value = fake_saved_scoring_engine - - uuid = self.fake_scoring_engine['uuid'] - scoring_engine = objects.ScoringEngine.get_by_uuid(self.context, uuid) - scoring_engine.description = 'UPDATED DESCRIPTION' - scoring_engine.save() - - mock_get_scoring_engine.assert_called_once_with(self.context, uuid) - mock_update_scoring_engine.assert_called_once_with( - uuid, {'description': 'UPDATED DESCRIPTION'}) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_refresh(self, mock_get_scoring_engine): - returns = [ - dict(self.fake_scoring_engine, description="first description"), - dict(self.fake_scoring_engine, description="second description")] - mock_get_scoring_engine.side_effect = returns - _id = self.fake_scoring_engine['id'] - expected = [mock.call(self.context, _id), - mock.call(self.context, _id)] - scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) - self.assertEqual("first description", scoring_engine.description) - scoring_engine.refresh() - self.assertEqual("second description", scoring_engine.description) - self.assertEqual(expected, mock_get_scoring_engine.call_args_list) - self.assertEqual(self.context, scoring_engine._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_scoring_engine') - @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') - def test_soft_delete(self, mock_get_scoring_engine, mock_soft_delete): - mock_get_scoring_engine.return_value = self.fake_scoring_engine - fake_deleted_scoring_engine = self.fake_scoring_engine.copy() - fake_deleted_scoring_engine['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete.return_value = fake_deleted_scoring_engine - - expected_scoring_engine = fake_deleted_scoring_engine.copy() - expected_scoring_engine['created_at'] = expected_scoring_engine[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_scoring_engine['deleted_at'] = expected_scoring_engine[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - - _id = self.fake_scoring_engine['id'] - scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) - scoring_engine.soft_delete() - mock_get_scoring_engine.assert_called_once_with(self.context, _id) - mock_soft_delete.assert_called_once_with(_id) - self.assertEqual(self.context, scoring_engine._context) - self.assertEqual(expected_scoring_engine, scoring_engine.as_dict()) diff --git a/watcher/tests/objects/test_service.py b/watcher/tests/objects/test_service.py deleted file mode 100644 index 0dcef21..0000000 --- a/watcher/tests/objects/test_service.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock - -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestServiceObject(base.DbTestCase): - - def setUp(self): - super(TestServiceObject, self).setUp() - self.fake_service = utils.get_test_service( - created_at=datetime.datetime.utcnow()) - - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_get_by_id(self, mock_get_service): - service_id = self.fake_service['id'] - mock_get_service.return_value = self.fake_service - service = objects.Service.get(self.context, service_id) - mock_get_service.assert_called_once_with(self.context, service_id) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'get_service_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_service] - services = objects.Service.list(self.context) - self.assertEqual(1, mock_get_list.call_count, 1) - self.assertEqual(1, len(services)) - self.assertIsInstance(services[0], objects.Service) - self.assertEqual(self.context, services[0]._context) - - @mock.patch.object(db_api.Connection, 'create_service') - def test_create(self, mock_create_service): - mock_create_service.return_value = self.fake_service - service = objects.Service(self.context, **self.fake_service) - - service.create() - expected_service = self.fake_service.copy() - expected_service['created_at'] = expected_service[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - - mock_create_service.assert_called_once_with(expected_service) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'update_service') - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_save(self, mock_get_service, mock_update_service): - mock_get_service.return_value = self.fake_service - fake_saved_service = self.fake_service.copy() - fake_saved_service['updated_at'] = datetime.datetime.utcnow() - mock_update_service.return_value = fake_saved_service - _id = self.fake_service['id'] - service = objects.Service.get(self.context, _id) - service.name = 'UPDATED NAME' - service.save() - - mock_get_service.assert_called_once_with(self.context, _id) - mock_update_service.assert_called_once_with( - _id, {'name': 'UPDATED NAME'}) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_refresh(self, mock_get_service): - returns = [dict(self.fake_service, name="first name"), - dict(self.fake_service, name="second name")] - mock_get_service.side_effect = returns - _id = self.fake_service['id'] - expected = [mock.call(self.context, _id), - mock.call(self.context, _id)] - service = objects.Service.get(self.context, _id) - self.assertEqual("first name", service.name) - service.refresh() - self.assertEqual("second name", service.name) - self.assertEqual(expected, mock_get_service.call_args_list) - self.assertEqual(self.context, service._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_service') - @mock.patch.object(db_api.Connection, 'get_service_by_id') - def test_soft_delete(self, mock_get_service, mock_soft_delete): - mock_get_service.return_value = self.fake_service - fake_deleted_service = self.fake_service.copy() - fake_deleted_service['deleted_at'] = datetime.datetime.utcnow() - mock_soft_delete.return_value = fake_deleted_service - - expected_service = fake_deleted_service.copy() - expected_service['created_at'] = expected_service[ - 'created_at'].replace(tzinfo=iso8601.iso8601.Utc()) - expected_service['deleted_at'] = expected_service[ - 'deleted_at'].replace(tzinfo=iso8601.iso8601.Utc()) - - _id = self.fake_service['id'] - service = objects.Service.get(self.context, _id) - service.soft_delete() - mock_get_service.assert_called_once_with(self.context, _id) - mock_soft_delete.assert_called_once_with(_id) - self.assertEqual(self.context, service._context) - self.assertEqual(expected_service, service.as_dict()) diff --git a/watcher/tests/objects/test_strategy.py b/watcher/tests/objects/test_strategy.py deleted file mode 100644 index 3d1a47d..0000000 --- a/watcher/tests/objects/test_strategy.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from watcher.common import exception -from watcher.db.sqlalchemy import api as db_api -from watcher import objects -from watcher.tests.db import base -from watcher.tests.db import utils - - -class TestStrategyObject(base.DbTestCase): - - goal_id = 2 - - scenarios = [ - ('non_eager', dict( - eager=False, fake_strategy=utils.get_test_strategy( - goal_id=goal_id))), - ('eager_with_non_eager_load', dict( - eager=True, fake_strategy=utils.get_test_strategy( - goal_id=goal_id))), - ('eager_with_eager_load', dict( - eager=True, fake_strategy=utils.get_test_strategy( - goal_id=goal_id, goal=utils.get_test_goal(id=goal_id)))), - ] - - def setUp(self): - super(TestStrategyObject, self).setUp() - self.fake_goal = utils.create_test_goal(id=self.goal_id) - - def eager_load_strategy_assert(self, strategy): - if self.eager: - self.assertIsNotNone(strategy.goal) - fields_to_check = set( - super(objects.Goal, objects.Goal).fields - ).symmetric_difference(objects.Goal.fields) - db_data = { - k: v for k, v in self.fake_goal.as_dict().items() - if k in fields_to_check} - object_data = { - k: v for k, v in strategy.goal.as_dict().items() - if k in fields_to_check} - self.assertEqual(db_data, object_data) - - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_get_by_id(self, mock_get_strategy): - strategy_id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get( - self.context, strategy_id, eager=self.eager) - mock_get_strategy.assert_called_once_with( - self.context, strategy_id, eager=self.eager) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - @mock.patch.object(db_api.Connection, 'get_strategy_by_uuid') - def test_get_by_uuid(self, mock_get_strategy): - uuid = self.fake_strategy['uuid'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get(self.context, uuid, eager=self.eager) - mock_get_strategy.assert_called_once_with( - self.context, uuid, eager=self.eager) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - def test_get_bad_uuid(self): - self.assertRaises(exception.InvalidIdentity, - objects.Strategy.get, self.context, 'not-a-uuid') - - @mock.patch.object(db_api.Connection, 'get_strategy_list') - def test_list(self, mock_get_list): - mock_get_list.return_value = [self.fake_strategy] - strategies = objects.Strategy.list(self.context, eager=self.eager) - self.assertEqual(1, mock_get_list.call_count, 1) - self.assertEqual(1, len(strategies)) - self.assertIsInstance(strategies[0], objects.Strategy) - self.assertEqual(self.context, strategies[0]._context) - for strategy in strategies: - self.eager_load_strategy_assert(strategy) - - @mock.patch.object(db_api.Connection, 'update_strategy') - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_save(self, mock_get_strategy, mock_update_strategy): - _id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get_by_id( - self.context, _id, eager=self.eager) - strategy.name = 'UPDATED NAME' - strategy.save() - - mock_get_strategy.assert_called_once_with( - self.context, _id, eager=self.eager) - mock_update_strategy.assert_called_once_with( - _id, {'name': 'UPDATED NAME'}) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_refresh(self, mock_get_strategy): - _id = self.fake_strategy['id'] - returns = [dict(self.fake_strategy, name="first name"), - dict(self.fake_strategy, name="second name")] - mock_get_strategy.side_effect = returns - expected = [mock.call(self.context, _id, eager=self.eager), - mock.call(self.context, _id, eager=self.eager)] - strategy = objects.Strategy.get(self.context, _id, eager=self.eager) - self.assertEqual("first name", strategy.name) - strategy.refresh(eager=self.eager) - self.assertEqual("second name", strategy.name) - self.assertEqual(expected, mock_get_strategy.call_args_list) - self.assertEqual(self.context, strategy._context) - self.eager_load_strategy_assert(strategy) - - -class TestCreateDeleteStrategyObject(base.DbTestCase): - - def setUp(self): - super(TestCreateDeleteStrategyObject, self).setUp() - self.fake_goal = utils.create_test_goal() - self.fake_strategy = utils.get_test_strategy(goal_id=self.fake_goal.id) - - @mock.patch.object(db_api.Connection, 'create_strategy') - def test_create(self, mock_create_strategy): - mock_create_strategy.return_value = self.fake_strategy - strategy = objects.Strategy(self.context, **self.fake_strategy) - strategy.create() - mock_create_strategy.assert_called_once_with(self.fake_strategy) - self.assertEqual(self.context, strategy._context) - - @mock.patch.object(db_api.Connection, 'soft_delete_strategy') - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_soft_delete(self, mock_get_strategy, mock_soft_delete): - _id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get_by_id(self.context, _id) - strategy.soft_delete() - mock_get_strategy.assert_called_once_with( - self.context, _id, eager=False) - mock_soft_delete.assert_called_once_with(_id) - self.assertEqual(self.context, strategy._context) - - @mock.patch.object(db_api.Connection, 'destroy_strategy') - @mock.patch.object(db_api.Connection, 'get_strategy_by_id') - def test_destroy(self, mock_get_strategy, mock_destroy_strategy): - _id = self.fake_strategy['id'] - mock_get_strategy.return_value = self.fake_strategy - strategy = objects.Strategy.get_by_id(self.context, _id) - strategy.destroy() - mock_get_strategy.assert_called_once_with( - self.context, _id, eager=False) - mock_destroy_strategy.assert_called_once_with(_id) - self.assertEqual(self.context, strategy._context) diff --git a/watcher/tests/objects/utils.py b/watcher/tests/objects/utils.py deleted file mode 100644 index 18ec4aa..0000000 --- a/watcher/tests/objects/utils.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Watcher object test utilities.""" - -from watcher import objects -from watcher.tests.db import utils as db_utils - - -def _load_related_objects(context, cls, db_data): - """Replace the DB data with its object counterpart""" - obj_data = db_data.copy() - for name, (obj_cls, _) in cls.object_fields.items(): - if obj_data.get(name): - obj_data[name] = obj_cls(context, **obj_data.get(name).as_dict()) - else: - del obj_data[name] - - return obj_data - - -def _load_test_obj(context, cls, obj_data, **kw): - # Let DB generate ID if it isn't specified explicitly - if 'id' not in kw: - del obj_data['id'] - obj = cls(context) - for key in obj_data: - setattr(obj, key, obj_data[key]) - return obj - - -def get_test_audit_template(context, **kw): - """Return a AuditTemplate object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.AuditTemplate - db_data = db_utils.get_test_audit_template(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_audit_template(context, **kw): - """Create and return a test audit_template object. - - Create a audit template in the DB and return an AuditTemplate object - with appropriate attributes. - """ - audit_template = get_test_audit_template(context, **kw) - audit_template.create() - return audit_template - - -def get_test_audit(context, **kw): - """Return a Audit object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Audit - db_data = db_utils.get_test_audit(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_audit(context, **kw): - """Create and return a test audit object. - - Create a audit in the DB and return an Audit object with appropriate - attributes. - """ - audit = get_test_audit(context, **kw) - audit.create() - return audit - - -def get_test_action_plan(context, **kw): - """Return a ActionPlan object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.ActionPlan - db_data = db_utils.get_test_action_plan(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_action_plan(context, **kw): - """Create and return a test action_plan object. - - Create a action plan in the DB and return a ActionPlan object with - appropriate attributes. - """ - action_plan = get_test_action_plan(context, **kw) - action_plan.create() - return action_plan - - -def get_test_action(context, **kw): - """Return a Action object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Action - db_data = db_utils.get_test_action(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_action(context, **kw): - """Create and return a test action object. - - Create a action in the DB and return a Action object with appropriate - attributes. - """ - action = get_test_action(context, **kw) - action.create() - return action - - -def get_test_goal(context, **kw): - """Return a Goal object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Goal - db_data = db_utils.get_test_goal(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_goal(context, **kw): - """Create and return a test goal object. - - Create a goal in the DB and return a Goal object with appropriate - attributes. - """ - goal = get_test_goal(context, **kw) - goal.create() - return goal - - -def get_test_scoring_engine(context, **kw): - """Return a ScoringEngine object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.ScoringEngine - db_data = db_utils.get_test_scoring_engine(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_scoring_engine(context, **kw): - """Create and return a test scoring engine object. - - Create a scoring engine in the DB and return a ScoringEngine object with - appropriate attributes. - """ - scoring_engine = get_test_scoring_engine(context, **kw) - scoring_engine.create() - return scoring_engine - - -def get_test_service(context, **kw): - """Return a Service object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Service - db_data = db_utils.get_test_service(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_service(context, **kw): - """Create and return a test service object. - - Create a service in the DB and return a Service object with - appropriate attributes. - """ - service = get_test_service(context, **kw) - service.create() - return service - - -def get_test_strategy(context, **kw): - """Return a Strategy object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.Strategy - db_data = db_utils.get_test_strategy(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_strategy(context, **kw): - """Create and return a test strategy object. - - Create a strategy in the DB and return a Strategy object with appropriate - attributes. - """ - strategy = get_test_strategy(context, **kw) - strategy.create() - return strategy - - -def get_test_efficacy_indicator(context, **kw): - """Return a EfficacyIndicator object with appropriate attributes. - - NOTE: The object leaves the attributes marked as changed, such - that a create() could be used to commit it to the DB. - """ - obj_cls = objects.EfficacyIndicator - db_data = db_utils.get_test_efficacy_indicator(**kw) - obj_data = _load_related_objects(context, obj_cls, db_data) - - return _load_test_obj(context, obj_cls, obj_data, **kw) - - -def create_test_efficacy_indicator(context, **kw): - """Create and return a test efficacy indicator object. - - Create a efficacy indicator in the DB and return a EfficacyIndicator object - with appropriate attributes. - """ - efficacy_indicator = get_test_efficacy_indicator(context, **kw) - efficacy_indicator.create() - return efficacy_indicator diff --git a/watcher/tests/policy_fixture.py b/watcher/tests/policy_fixture.py deleted file mode 100644 index 8a5b4e8..0000000 --- a/watcher/tests/policy_fixture.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2012 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import fixtures -from oslo_config import cfg -from oslo_policy import _parser -from oslo_policy import opts as policy_opts - -from watcher.common import policy as watcher_policy -from watcher.tests import fake_policy - -CONF = cfg.CONF - - -class PolicyFixture(fixtures.Fixture): - - def _setUp(self): - self.policy_dir = self.useFixture(fixtures.TempDir()) - self.policy_file_name = os.path.join(self.policy_dir.path, - 'policy.json') - with open(self.policy_file_name, 'w') as policy_file: - policy_file.write(fake_policy.policy_data) - policy_opts.set_defaults(CONF) - CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') - watcher_policy._ENFORCER = None - self.addCleanup(watcher_policy.init().clear) - - def set_rules(self, rules): - policy = watcher_policy._ENFORCER - policy.set_rules({k: _parser.parse_rule(v) - for k, v in rules.items()}) diff --git a/watcher/version.py b/watcher/version.py deleted file mode 100644 index 42c81b0..0000000 --- a/watcher/version.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('python-watcher') -version_string = version_info.version_string diff --git a/watcher_tempest_plugin/README.rst b/watcher_tempest_plugin/README.rst deleted file mode 100644 index 1fd805f..0000000 --- a/watcher_tempest_plugin/README.rst +++ /dev/null @@ -1,158 +0,0 @@ -.. - Except where otherwise noted, this document is licensed under Creative - Commons Attribution 3.0 License. You can view the license at: - - https://creativecommons.org/licenses/by/3.0/ - -.. _tempest_tests: - -Tempest tests -============= - -The following procedure gets you started with Tempest testing but you can also -refer to the `Tempest documentation`_ for more details. - -.. _Tempest documentation: https://docs.openstack.org/tempest/latest - - -Tempest installation --------------------- - -To install Tempest you can issue the following commands:: - - $ git clone https://github.com/openstack/tempest/ - $ cd tempest/ - $ pip install . - -The folder you are into now will be called ```` from now onwards. - -Please note that although it is fully working outside a virtual environment, it -is recommended to install within a `venv`. - - -Watcher Tempest testing setup ------------------------------ - -You can now install Watcher alongside it in development mode by issuing the -following command:: - - $ pip install -e - -Then setup a local working environment (here ``watcher-cloud``) for running -Tempest for Watcher which shall contain the configuration for your OpenStack -integration platform. - -In a virtual environment, you can do so by issuing the following command:: - - $ cd - $ tempest init watcher-cloud - -Otherwise, if you are not using a virtualenv:: - - $ cd - $ tempest init --config-dir ./etc watcher-cloud - -By default the configuration file is empty so before starting, you need to -issue the following commands:: - - $ cd /watcher-cloud/etc - $ cp tempest.conf.sample tempest.conf - -At this point you need to edit the ``watcher-cloud/etc/tempest.conf`` -file as described in the `Tempest configuration guide`_. -Shown below is a minimal configuration you need to set within your -``tempest.conf`` configuration file which can get you started. - -For Keystone V3:: - - [identity] - uri_v3 = http://:/v3 - auth_version = v3 - - [auth] - admin_username = - admin_password = - admin_tenant_name = - admin_domain_name = - - [identity-feature-enabled] - api_v2 = false - api_v3 = true - -For Keystone V2:: - - [identity] - uri = http://:/v2.0 - auth_version = v2 - - [auth] - admin_tenant_name = - admin_username = - admin_password = - -In both cases:: - - [network] - public_network_id = - -You now have the minimum configuration for running Watcher Tempest tests on a -single node. - -Since deploying Watcher with only a single compute node is not very useful, a -few more configuration have to be set in your ``tempest.conf`` file in order to -enable the execution of multi-node scenarios:: - - [compute] - # To indicate Tempest test that you have provided enough compute nodes - min_compute_nodes = 2 - - # Image UUID you can get using the "glance image-list" command - image_ref = - - -For more information, please refer to: - -- Keystone connection: https://docs.openstack.org/tempest/latest/configuration.html#keystone-connection-info -- Dynamic Keystone Credentials: https://docs.openstack.org/tempest/latest/configuration.html#dynamic-credentials - -.. _virtual environment: http://docs.python-guide.org/en/latest/dev/virtualenvs/ -.. _Tempest configuration guide: http://docs.openstack.org/tempest/latest/configuration.html - - -Watcher Tempest tests execution -------------------------------- - -To list all Watcher Tempest cases, you can issue the following commands:: - - $ cd - $ testr list-tests watcher - -To run only these tests in Tempest, you can then issue these commands:: - - $ ./run_tempest.sh --config watcher-cloud/etc/tempest.conf -N -- watcher - -Or alternatively the following commands if you are:: - - $ cd /watcher-cloud - $ ../run_tempest.sh -N -- watcher - -To run a single test case, go to Tempest directory, then run with test case -name, e.g.:: - - $ cd - $ ./run_tempest.sh --config watcher-cloud/etc/tempest.conf -N \ - -- watcher_tempest_plugin.tests.api.admin.test_audit_template.TestCreateDeleteAuditTemplate.test_create_audit_template - -Alternatively, you can also run the Watcher Tempest plugin tests using tox. But -before you can do so, you need to follow the Tempest explanation on running -`tox with plugins`_. Then, run:: - - $ export TEMPEST_CONFIG_DIR=/watcher-cloud/etc/ - $ tox -eall-plugin watcher - -.. _tox with plugins: https://docs.openstack.org/tempest/latest/plugin.html#notes-for-using-plugins-with-virtualenvs - -And, to run a specific test:: - - $ export TEMPEST_CONFIG_DIR=/watcher-cloud/etc/ - $ tox -eall-plugin watcher_tempest_plugin.tests.api.admin.test_audit_template.TestCreateDeleteAuditTemplate.test_create_audit_template diff --git a/watcher_tempest_plugin/__init__.py b/watcher_tempest_plugin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/config.py b/watcher_tempest_plugin/config.py deleted file mode 100644 index 426399d..0000000 --- a/watcher_tempest_plugin/config.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg - - -service_option = cfg.BoolOpt("watcher", - default=True, - help="Whether or not watcher is expected to be " - "available") diff --git a/watcher_tempest_plugin/infra_optim_clients.py b/watcher_tempest_plugin/infra_optim_clients.py deleted file mode 100644 index edf2091..0000000 --- a/watcher_tempest_plugin/infra_optim_clients.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six -from tempest import clients -from tempest.common import credentials_factory as creds_factory -from tempest import config - -from watcher_tempest_plugin.services.infra_optim.v1.json import client as ioc - -CONF = config.CONF - - -@six.add_metaclass(abc.ABCMeta) -class BaseManager(clients.Manager): - - def __init__(self, credentials): - super(BaseManager, self).__init__(credentials) - self.io_client = ioc.InfraOptimClientJSON( - self.auth_provider, 'infra-optim', CONF.identity.region) - - -class AdminManager(BaseManager): - def __init__(self): - super(AdminManager, self).__init__( - creds_factory.get_configured_admin_credentials(), - ) diff --git a/watcher_tempest_plugin/plugin.py b/watcher_tempest_plugin/plugin.py deleted file mode 100644 index 560c544..0000000 --- a/watcher_tempest_plugin/plugin.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from tempest.test_discover import plugins - -from watcher_tempest_plugin import config as watcher_config - - -class WatcherTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(__file__)))[0] - test_dir = "watcher_tempest_plugin/tests" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - conf.register_opt(watcher_config.service_option, - group='service_available') - - def get_opt_lists(self): - return [('service_available', [watcher_config.service_option])] diff --git a/watcher_tempest_plugin/services/__init__.py b/watcher_tempest_plugin/services/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/__init__.py b/watcher_tempest_plugin/services/infra_optim/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/base.py b/watcher_tempest_plugin/services/infra_optim/base.py deleted file mode 100644 index d248774..0000000 --- a/watcher_tempest_plugin/services/infra_optim/base.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import functools - -import six -import six.moves.urllib.parse as urlparse - -from tempest.lib.common import rest_client - - -def handle_errors(f): - """A decorator that allows to ignore certain types of errors.""" - - @functools.wraps(f) - def wrapper(*args, **kwargs): - param_name = 'ignore_errors' - ignored_errors = kwargs.get(param_name, tuple()) - - if param_name in kwargs: - del kwargs[param_name] - - try: - return f(*args, **kwargs) - except ignored_errors: - # Silently ignore errors - pass - - return wrapper - - -@six.add_metaclass(abc.ABCMeta) -class BaseInfraOptimClient(rest_client.RestClient): - """Base Tempest REST client for Watcher API.""" - - URI_PREFIX = '' - - @abc.abstractmethod - def serialize(self, object_dict): - """Serialize an Watcher object.""" - raise NotImplementedError() - - @abc.abstractmethod - def deserialize(self, object_str): - """Deserialize an Watcher object.""" - raise NotImplementedError() - - def _get_uri(self, resource_name, uuid=None, permanent=False): - """Get URI for a specific resource or object. - - :param resource_name: The name of the REST resource, e.g., 'audits'. - :param uuid: The unique identifier of an object in UUID format. - :return: Relative URI for the resource or object. - """ - - prefix = self.URI_PREFIX if not permanent else '' - - return '{pref}/{res}{uuid}'.format(pref=prefix, - res=resource_name, - uuid='/%s' % uuid if uuid else '') - - def _make_patch(self, allowed_attributes, **kw): - """Create a JSON patch according to RFC 6902. - - :param allowed_attributes: An iterable object that contains a set of - allowed attributes for an object. - :param **kw: Attributes and new values for them. - :return: A JSON path that sets values of the specified attributes to - the new ones. - """ - - def get_change(kw, path='/'): - for name, value in kw.items(): - if isinstance(value, dict): - for ch in get_change(value, path + '%s/' % name): - yield ch - else: - if value is None: - yield {'path': path + name, - 'op': 'remove'} - else: - yield {'path': path + name, - 'value': value, - 'op': 'replace'} - - patch = [ch for ch in get_change(kw) - if ch['path'].lstrip('/') in allowed_attributes] - - return patch - - def _list_request(self, resource, permanent=False, **kwargs): - """Get the list of objects of the specified type. - - :param resource: The name of the REST resource, e.g., 'audits'. - "param **kw: Parameters for the request. - :return: A tuple with the server response and deserialized JSON list - of objects - """ - - uri = self._get_uri(resource, permanent=permanent) - if kwargs: - uri += "?%s" % urlparse.urlencode(kwargs) - - resp, body = self.get(uri) - self.expected_success(200, int(resp['status'])) - - return resp, self.deserialize(body) - - def _show_request(self, resource, uuid, permanent=False, **kwargs): - """Gets a specific object of the specified type. - - :param uuid: Unique identifier of the object in UUID format. - :return: Serialized object as a dictionary. - """ - - if 'uri' in kwargs: - uri = kwargs['uri'] - else: - uri = self._get_uri(resource, uuid=uuid, permanent=permanent) - resp, body = self.get(uri) - self.expected_success(200, int(resp['status'])) - - return resp, self.deserialize(body) - - def _create_request(self, resource, object_dict): - """Create an object of the specified type. - - :param resource: The name of the REST resource, e.g., 'audits'. - :param object_dict: A Python dict that represents an object of the - specified type. - :return: A tuple with the server response and the deserialized created - object. - """ - - body = self.serialize(object_dict) - uri = self._get_uri(resource) - - resp, body = self.post(uri, body=body) - self.expected_success(201, int(resp['status'])) - - return resp, self.deserialize(body) - - def _delete_request(self, resource, uuid): - """Delete specified object. - - :param resource: The name of the REST resource, e.g., 'audits'. - :param uuid: The unique identifier of an object in UUID format. - :return: A tuple with the server response and the response body. - """ - - uri = self._get_uri(resource, uuid) - - resp, body = self.delete(uri) - self.expected_success(204, int(resp['status'])) - return resp, body - - def _patch_request(self, resource, uuid, patch_object): - """Update specified object with JSON-patch. - - :param resource: The name of the REST resource, e.g., 'audits'. - :param uuid: The unique identifier of an object in UUID format. - :return: A tuple with the server response and the serialized patched - object. - """ - - uri = self._get_uri(resource, uuid) - patch_body = self.serialize(patch_object) - - resp, body = self.patch(uri, body=patch_body) - self.expected_success(200, int(resp['status'])) - return resp, self.deserialize(body) - - @handle_errors - def get_api_description(self): - """Retrieves all versions of the Watcher API.""" - - return self._list_request('', permanent=True) - - @handle_errors - def get_version_description(self, version='v1'): - """Retrieves the description of the API. - - :param version: The version of the API. Default: 'v1'. - :return: Serialized description of API resources. - """ - - return self._list_request(version, permanent=True) - - def _put_request(self, resource, put_object): - """Update specified object with JSON-patch.""" - - uri = self._get_uri(resource) - put_body = self.serialize(put_object) - - resp, body = self.put(uri, body=put_body) - self.expected_success(202, int(resp['status'])) - return resp, body diff --git a/watcher_tempest_plugin/services/infra_optim/v1/__init__.py b/watcher_tempest_plugin/services/infra_optim/v1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/v1/json/__init__.py b/watcher_tempest_plugin/services/infra_optim/v1/json/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/services/infra_optim/v1/json/client.py b/watcher_tempest_plugin/services/infra_optim/v1/json/client.py deleted file mode 100644 index 2ee27f5..0000000 --- a/watcher_tempest_plugin/services/infra_optim/v1/json/client.py +++ /dev/null @@ -1,331 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_serialization import jsonutils -from watcher.common import utils -from watcher_tempest_plugin.services.infra_optim import base - - -class InfraOptimClientJSON(base.BaseInfraOptimClient): - """Base Tempest REST client for Watcher API v1.""" - - URI_PREFIX = 'v1' - - def serialize(self, object_dict): - """Serialize an Watcher object.""" - return jsonutils.dumps(object_dict) - - def deserialize(self, object_str): - """Deserialize an Watcher object.""" - return jsonutils.loads(object_str.decode('utf-8')) - - # ### AUDIT TEMPLATES ### # - - @base.handle_errors - def list_audit_templates(self, **kwargs): - """List all existing audit templates.""" - return self._list_request('audit_templates', **kwargs) - - @base.handle_errors - def list_audit_templates_detail(self, **kwargs): - """Lists details of all existing audit templates.""" - return self._list_request('/audit_templates/detail', **kwargs) - - @base.handle_errors - def show_audit_template(self, audit_template_uuid): - """Gets a specific audit template. - - :param audit_template_uuid: Unique identifier of the audit template - :return: Serialized audit template as a dictionary. - """ - return self._show_request('audit_templates', audit_template_uuid) - - @base.handle_errors - def create_audit_template(self, **kwargs): - """Creates an audit template with the specified parameters. - - :param name: The name of the audit template. - :param description: The description of the audit template. - :param goal_uuid: The related Goal UUID associated. - :param strategy_uuid: The related Strategy UUID associated. - :param audit_scope: Scope the audit should apply to. - :return: A tuple with the server response and the created audit - template. - """ - - parameters = {k: v for k, v in kwargs.items() if v is not None} - # This name is unique to avoid the DB unique constraint on names - unique_name = 'Tempest Audit Template %s' % utils.generate_uuid() - - audit_template = { - 'name': parameters.get('name', unique_name), - 'description': parameters.get('description'), - 'goal': parameters.get('goal'), - 'strategy': parameters.get('strategy'), - 'scope': parameters.get('scope', []), - } - - return self._create_request('audit_templates', audit_template) - - @base.handle_errors - def delete_audit_template(self, audit_template_uuid): - """Deletes an audit template having the specified UUID. - - :param audit_template_uuid: The unique identifier of the audit template - :return: A tuple with the server response and the response body. - """ - - return self._delete_request('audit_templates', audit_template_uuid) - - @base.handle_errors - def update_audit_template(self, audit_template_uuid, patch): - """Update the specified audit template. - - :param audit_template_uuid: The unique identifier of the audit template - :param patch: List of dicts representing json patches. - :return: A tuple with the server response and the updated audit - template. - """ - - return self._patch_request('audit_templates', - audit_template_uuid, patch) - - # ### AUDITS ### # - - @base.handle_errors - def list_audits(self, **kwargs): - """List all existing audit templates.""" - return self._list_request('audits', **kwargs) - - @base.handle_errors - def list_audits_detail(self, **kwargs): - """Lists details of all existing audit templates.""" - return self._list_request('/audits/detail', **kwargs) - - @base.handle_errors - def show_audit(self, audit_uuid): - """Gets a specific audit template. - - :param audit_uuid: Unique identifier of the audit template - :return: Serialized audit template as a dictionary - """ - return self._show_request('audits', audit_uuid) - - @base.handle_errors - def create_audit(self, audit_template_uuid, **kwargs): - """Create an audit with the specified parameters - - :param audit_template_uuid: Audit template ID used by the audit - :return: A tuple with the server response and the created audit - """ - audit = {'audit_template_uuid': audit_template_uuid} - audit.update(kwargs) - if not audit['state']: - del audit['state'] - - return self._create_request('audits', audit) - - @base.handle_errors - def delete_audit(self, audit_uuid): - """Deletes an audit having the specified UUID - - :param audit_uuid: The unique identifier of the audit - :return: A tuple with the server response and the response body - """ - - return self._delete_request('audits', audit_uuid) - - @base.handle_errors - def update_audit(self, audit_uuid, patch): - """Update the specified audit. - - :param audit_uuid: The unique identifier of the audit - :param patch: List of dicts representing json patches. - :return: Tuple with the server response and the updated audit - """ - - return self._patch_request('audits', audit_uuid, patch) - - # ### ACTION PLANS ### # - - @base.handle_errors - def list_action_plans(self, **kwargs): - """List all existing action plan""" - return self._list_request('action_plans', **kwargs) - - @base.handle_errors - def list_action_plans_detail(self, **kwargs): - """Lists details of all existing action plan""" - return self._list_request('/action_plans/detail', **kwargs) - - @base.handle_errors - def show_action_plan(self, action_plan_uuid): - """Gets a specific action plan - - :param action_plan_uuid: Unique identifier of the action plan - :return: Serialized action plan as a dictionary - """ - return self._show_request('/action_plans', action_plan_uuid) - - @base.handle_errors - def delete_action_plan(self, action_plan_uuid): - """Deletes an action plan having the specified UUID - - :param action_plan_uuid: The unique identifier of the action_plan - :return: A tuple with the server response and the response body - """ - - return self._delete_request('/action_plans', action_plan_uuid) - - @base.handle_errors - def delete_action_plans_by_audit(self, audit_uuid): - """Deletes an action plan having the specified UUID - - :param audit_uuid: The unique identifier of the related Audit - """ - - action_plans = self.list_action_plans(audit_uuid=audit_uuid)[1] - - for action_plan in action_plans: - self.delete_action_plan(action_plan['uuid']) - - @base.handle_errors - def update_action_plan(self, action_plan_uuid, patch): - """Update the specified action plan - - :param action_plan_uuid: The unique identifier of the action_plan - :param patch: List of dicts representing json patches. - :return: Tuple with the server response and the updated action_plan - """ - - return self._patch_request('/action_plans', action_plan_uuid, patch) - - @base.handle_errors - def start_action_plan(self, action_plan_uuid): - """Start the specified action plan - - :param action_plan_uuid: The unique identifier of the action_plan - :return: Tuple with the server response and the updated action_plan - """ - - return self._patch_request( - '/action_plans', action_plan_uuid, - [{'path': '/state', 'op': 'replace', 'value': 'PENDING'}]) - - # ### GOALS ### # - - @base.handle_errors - def list_goals(self, **kwargs): - """List all existing goals""" - return self._list_request('/goals', **kwargs) - - @base.handle_errors - def list_goals_detail(self, **kwargs): - """Lists details of all existing goals""" - return self._list_request('/goals/detail', **kwargs) - - @base.handle_errors - def show_goal(self, goal): - """Gets a specific goal - - :param goal: UUID or Name of the goal - :return: Serialized goal as a dictionary - """ - return self._show_request('/goals', goal) - - # ### ACTIONS ### # - - @base.handle_errors - def list_actions(self, **kwargs): - """List all existing actions""" - return self._list_request('/actions', **kwargs) - - @base.handle_errors - def list_actions_detail(self, **kwargs): - """Lists details of all existing actions""" - return self._list_request('/actions/detail', **kwargs) - - @base.handle_errors - def show_action(self, action_uuid): - """Gets a specific action - - :param action_uuid: Unique identifier of the action - :return: Serialized action as a dictionary - """ - return self._show_request('/actions', action_uuid) - - # ### STRATEGIES ### # - - @base.handle_errors - def list_strategies(self, **kwargs): - """List all existing strategies""" - return self._list_request('/strategies', **kwargs) - - @base.handle_errors - def list_strategies_detail(self, **kwargs): - """Lists details of all existing strategies""" - return self._list_request('/strategies/detail', **kwargs) - - @base.handle_errors - def show_strategy(self, strategy): - """Gets a specific strategy - - :param strategy_id: Name of the strategy - :return: Serialized strategy as a dictionary - """ - return self._show_request('/strategies', strategy) - - # ### SCORING ENGINE ### # - - @base.handle_errors - def list_scoring_engines(self, **kwargs): - """List all existing scoring_engines""" - return self._list_request('/scoring_engines', **kwargs) - - @base.handle_errors - def list_scoring_engines_detail(self, **kwargs): - """Lists details of all existing scoring_engines""" - return self._list_request('/scoring_engines/detail', **kwargs) - - @base.handle_errors - def show_scoring_engine(self, scoring_engine): - """Gets a specific scoring_engine - - :param scoring_engine: UUID or Name of the scoring_engine - :return: Serialized scoring_engine as a dictionary - """ - return self._show_request('/scoring_engines', scoring_engine) - - # ### SERVICES ### # - - @base.handle_errors - def list_services(self, **kwargs): - """List all existing services""" - return self._list_request('/services', **kwargs) - - @base.handle_errors - def list_services_detail(self, **kwargs): - """Lists details of all existing services""" - return self._list_request('/services/detail', **kwargs) - - @base.handle_errors - def show_service(self, service): - """Gets a specific service - - :param service: Name of the strategy - :return: Serialized strategy as a dictionary - """ - return self._show_request('/services', service) diff --git a/watcher_tempest_plugin/tests/__init__.py b/watcher_tempest_plugin/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/api/__init__.py b/watcher_tempest_plugin/tests/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/api/admin/__init__.py b/watcher_tempest_plugin/tests/api/admin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/api/admin/base.py b/watcher_tempest_plugin/tests/api/admin/base.py deleted file mode 100644 index 5373623..0000000 --- a/watcher_tempest_plugin/tests/api/admin/base.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools - -from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils -from tempest import test - -from watcher_tempest_plugin import infra_optim_clients as clients - - -class BaseInfraOptimTest(test.BaseTestCase): - """Base class for Infrastructure Optimization API tests.""" - - # States where the object is waiting for some event to perform a transition - IDLE_STATES = ('RECOMMENDED', - 'FAILED', - 'SUCCEEDED', - 'CANCELLED', - 'SUSPENDED') - # States where the object can only be DELETED (end of its life-cycle) - FINISHED_STATES = ('FAILED', - 'SUCCEEDED', - 'CANCELLED', - 'SUPERSEDED') - - @classmethod - def setup_credentials(cls): - super(BaseInfraOptimTest, cls).setup_credentials() - cls.mgr = clients.AdminManager() - - @classmethod - def setup_clients(cls): - super(BaseInfraOptimTest, cls).setup_clients() - cls.client = cls.mgr.io_client - - @classmethod - def resource_setup(cls): - super(BaseInfraOptimTest, cls).resource_setup() - - # Set of all created audit templates UUIDs - cls.created_audit_templates = set() - # Set of all created audit UUIDs - cls.created_audits = set() - # Set of all created audit UUIDs. We use it to build the list of - # action plans to delete (including potential orphan one(s)) - cls.created_action_plans_audit_uuids = set() - - @classmethod - def resource_cleanup(cls): - """Ensure that all created objects get destroyed.""" - try: - action_plans_to_be_deleted = set() - # Phase 1: Make sure all objects are in an idle state - for audit_uuid in cls.created_audits: - test_utils.call_until_true( - func=functools.partial( - cls.is_audit_idle, audit_uuid), - duration=30, - sleep_for=.5 - ) - - for audit_uuid in cls.created_action_plans_audit_uuids: - _, action_plans = cls.client.list_action_plans( - audit_uuid=audit_uuid) - action_plans_to_be_deleted.update( - ap['uuid'] for ap in action_plans['action_plans']) - - for action_plan in action_plans['action_plans']: - try: - test_utils.call_until_true( - func=functools.partial( - cls.is_action_plan_idle, action_plan['uuid']), - duration=30, - sleep_for=.5 - ) - except Exception: - action_plans_to_be_deleted.remove( - action_plan['uuid']) - - # Phase 2: Delete them all - for action_plan_uuid in action_plans_to_be_deleted: - cls.delete_action_plan(action_plan_uuid) - - for audit_uuid in cls.created_audits.copy(): - cls.delete_audit(audit_uuid) - - for audit_template_uuid in cls.created_audit_templates.copy(): - cls.delete_audit_template(audit_template_uuid) - - finally: - super(BaseInfraOptimTest, cls).resource_cleanup() - - def validate_self_link(self, resource, uuid, link): - """Check whether the given self link formatted correctly.""" - expected_link = "{base}/{pref}/{res}/{uuid}".format( - base=self.client.base_url, - pref=self.client.URI_PREFIX, - res=resource, - uuid=uuid - ) - self.assertEqual(expected_link, link) - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - # Check if not expected keys/values exists in actual response body - for key, value in expected.items(): - if key not in keys: - self.assertIn(key, actual) - self.assertEqual(value, actual[key]) - - # ### AUDIT TEMPLATES ### # - - @classmethod - def create_audit_template(cls, goal, name=None, description=None, - strategy=None, scope=None): - """Wrapper utility for creating a test audit template - - :param goal: Goal UUID or name related to the audit template. - :param name: The name of the audit template. Default: My Audit Template - :param description: The description of the audit template. - :param strategy: Strategy UUID or name related to the audit template. - :param scope: Scope that will be applied on all derived audits. - :return: A tuple with The HTTP response and its body - """ - description = description or data_utils.rand_name( - 'test-audit_template') - resp, body = cls.client.create_audit_template( - name=name, description=description, - goal=goal, strategy=strategy, scope=scope) - - cls.created_audit_templates.add(body['uuid']) - - return resp, body - - @classmethod - def delete_audit_template(cls, uuid): - """Deletes a audit_template having the specified UUID - - :param uuid: The unique identifier of the audit template - :return: Server response - """ - resp, _ = cls.client.delete_audit_template(uuid) - - if uuid in cls.created_audit_templates: - cls.created_audit_templates.remove(uuid) - - return resp - - # ### AUDITS ### # - - @classmethod - def create_audit(cls, audit_template_uuid, audit_type='ONESHOT', - state=None, interval=None): - """Wrapper utility for creating a test audit - - :param audit_template_uuid: Audit Template UUID this audit will use - :param audit_type: Audit type (either ONESHOT or CONTINUOUS) - :param state: Audit state (str) - :param interval: Audit interval in seconds or cron syntax (str) - :return: A tuple with The HTTP response and its body - """ - resp, body = cls.client.create_audit( - audit_template_uuid=audit_template_uuid, audit_type=audit_type, - state=state, interval=interval) - - cls.created_audits.add(body['uuid']) - cls.created_action_plans_audit_uuids.add(body['uuid']) - - return resp, body - - @classmethod - def delete_audit(cls, audit_uuid): - """Deletes an audit having the specified UUID - - :param audit_uuid: The unique identifier of the audit. - :return: the HTTP response - """ - resp, _ = cls.client.delete_audit(audit_uuid) - - if audit_uuid in cls.created_audits: - cls.created_audits.remove(audit_uuid) - - return resp - - @classmethod - def has_audit_succeeded(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') == 'SUCCEEDED' - - @classmethod - def has_audit_finished(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') in cls.FINISHED_STATES - - @classmethod - def is_audit_idle(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') in cls.IDLE_STATES - - # ### ACTION PLANS ### # - - @classmethod - def create_action_plan(cls, audit_template_uuid, **audit_kwargs): - """Wrapper utility for creating a test action plan - - :param audit_template_uuid: Audit template UUID to use - :param audit_kwargs: Dict of audit properties to set - :return: The action plan as dict - """ - _, audit = cls.create_audit(audit_template_uuid, **audit_kwargs) - audit_uuid = audit['uuid'] - - assert test_utils.call_until_true( - func=functools.partial(cls.has_audit_finished, audit_uuid), - duration=30, - sleep_for=.5 - ) - - _, action_plans = cls.client.list_action_plans(audit_uuid=audit_uuid) - if len(action_plans['action_plans']) == 0: - return - - return action_plans['action_plans'][0] - - @classmethod - def delete_action_plan(cls, action_plan_uuid): - """Deletes an action plan having the specified UUID - - :param action_plan_uuid: The unique identifier of the action plan. - :return: the HTTP response - """ - resp, _ = cls.client.delete_action_plan(action_plan_uuid) - - if action_plan_uuid in cls.created_action_plans_audit_uuids: - cls.created_action_plans_audit_uuids.remove(action_plan_uuid) - - return resp - - @classmethod - def has_action_plan_finished(cls, action_plan_uuid): - _, action_plan = cls.client.show_action_plan(action_plan_uuid) - return action_plan.get('state') in cls.FINISHED_STATES - - @classmethod - def is_action_plan_idle(cls, action_plan_uuid): - """This guard makes sure your action plan is not running""" - _, action_plan = cls.client.show_action_plan(action_plan_uuid) - return action_plan.get('state') in cls.IDLE_STATES diff --git a/watcher_tempest_plugin/tests/api/admin/test_action.py b/watcher_tempest_plugin/tests/api/admin/test_action.py deleted file mode 100644 index 3fa2d94..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_action.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import collections -import functools - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListAction(base.BaseInfraOptimTest): - """Tests for actions""" - - @classmethod - def resource_setup(cls): - super(TestShowListAction, cls).resource_setup() - _, cls.goal = cls.client.show_goal("DUMMY") - _, cls.audit_template = cls.create_audit_template(cls.goal['uuid']) - _, cls.audit = cls.create_audit(cls.audit_template['uuid']) - - assert test_utils.call_until_true( - func=functools.partial(cls.has_audit_finished, cls.audit['uuid']), - duration=30, - sleep_for=.5 - ) - _, action_plans = cls.client.list_action_plans( - audit_uuid=cls.audit['uuid']) - cls.action_plan = action_plans['action_plans'][0] - - @decorators.attr(type='smoke') - def test_show_one_action(self): - _, body = self.client.list_actions( - action_plan_uuid=self.action_plan["uuid"]) - actions = body['actions'] - - _, action = self.client.show_action(actions[0]["uuid"]) - - self.assertEqual(self.action_plan["uuid"], action['action_plan_uuid']) - self.assertEqual("PENDING", action['state']) - - @decorators.attr(type='smoke') - def test_show_action_with_links(self): - _, body = self.client.list_actions( - action_plan_uuid=self.action_plan["uuid"]) - actions = body['actions'] - - _, action = self.client.show_action(actions[0]["uuid"]) - - self.assertIn('links', action.keys()) - self.assertEqual(2, len(action['links'])) - self.assertIn(action['uuid'], action['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_actions(self): - _, body = self.client.list_actions() - - # Verify self links. - for action in body['actions']: - self.validate_self_link('actions', action['uuid'], - action['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_actions_by_action_plan(self): - _, body = self.client.list_actions( - action_plan_uuid=self.action_plan["uuid"]) - - for item in body['actions']: - self.assertEqual(self.action_plan["uuid"], - item['action_plan_uuid']) - - action_counter = collections.Counter( - act['action_type'] for act in body['actions']) - - # A dummy strategy generates 2 "nop" actions and 1 "sleep" action - self.assertEqual(3, len(body['actions'])) - self.assertEqual(2, action_counter.get("nop")) - self.assertEqual(1, action_counter.get("sleep")) - - @decorators.attr(type="smoke") - def test_list_actions_by_audit(self): - _, body = self.client.list_actions(audit_uuid=self.audit["uuid"]) - - for item in body['actions']: - self.assertEqual(self.action_plan["uuid"], - item['action_plan_uuid']) - - action_counter = collections.Counter( - act['action_type'] for act in body['actions']) - - # A dummy strategy generates 2 "nop" actions and 1 "sleep" action - self.assertEqual(3, len(body['actions'])) - self.assertEqual(2, action_counter.get("nop")) - self.assertEqual(1, action_counter.get("sleep")) diff --git a/watcher_tempest_plugin/tests/api/admin/test_action_plan.py b/watcher_tempest_plugin/tests/api/admin/test_action_plan.py deleted file mode 100644 index b31b5df..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_action_plan.py +++ /dev/null @@ -1,176 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestCreateDeleteExecuteActionPlan(base.BaseInfraOptimTest): - """Tests for action plans""" - - @decorators.attr(type='smoke') - def test_create_action_plan(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - self.assertEqual(audit['uuid'], action_plan['audit_uuid']) - self.assertEqual('RECOMMENDED', action_plan['state']) - - @decorators.attr(type='smoke') - def test_delete_action_plan(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - self.client.delete_action_plan(action_plan['uuid']) - - self.assertRaises(exceptions.NotFound, self.client.show_action_plan, - action_plan['uuid']) - - @decorators.attr(type='smoke') - def test_execute_dummy_action_plan(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']: - # This means the action plan is superseded so we cannot trigger it, - # or it is empty. - return - - # Execute the action by changing its state to PENDING - _, updated_ap = self.client.start_action_plan(action_plan['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_action_plan_finished, action_plan['uuid']), - duration=30, - sleep_for=.5 - )) - _, finished_ap = self.client.show_action_plan(action_plan['uuid']) - - self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING')) - self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED')) - - -class TestShowListActionPlan(base.BaseInfraOptimTest): - """Tests for action_plan.""" - - @classmethod - def resource_setup(cls): - super(TestShowListActionPlan, cls).resource_setup() - _, cls.goal = cls.client.show_goal("dummy") - _, cls.audit_template = cls.create_audit_template(cls.goal['uuid']) - _, cls.audit = cls.create_audit(cls.audit_template['uuid']) - - assert test_utils.call_until_true( - func=functools.partial(cls.has_audit_finished, cls.audit['uuid']), - duration=30, - sleep_for=.5 - ) - _, action_plans = cls.client.list_action_plans( - audit_uuid=cls.audit['uuid']) - if len(action_plans['action_plans']) > 0: - cls.action_plan = action_plans['action_plans'][0] - - @decorators.attr(type='smoke') - def test_show_action_plan(self): - _, action_plan = self.client.show_action_plan( - self.action_plan['uuid']) - - self.assert_expected(self.action_plan, action_plan) - - @decorators.attr(type='smoke') - def test_show_action_plan_detail(self): - _, action_plans = self.client.list_action_plans_detail( - audit_uuid=self.audit['uuid']) - - action_plan = action_plans['action_plans'][0] - - self.assert_expected(self.action_plan, action_plan) - - @decorators.attr(type='smoke') - def test_show_action_plan_with_links(self): - _, action_plan = self.client.show_action_plan( - self.action_plan['uuid']) - self.assertIn('links', action_plan.keys()) - self.assertEqual(2, len(action_plan['links'])) - self.assertIn(action_plan['uuid'], - action_plan['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_action_plans(self): - _, body = self.client.list_action_plans() - self.assertIn(self.action_plan['uuid'], - [i['uuid'] for i in body['action_plans']]) - # Verify self links. - for action_plan in body['action_plans']: - self.validate_self_link('action_plans', action_plan['uuid'], - action_plan['links'][0]['href']) - - @decorators.attr(type='smoke') - def test_list_with_limit(self): - # We create 3 extra audits to exceed the limit we fix - for _ in range(3): - self.create_action_plan(self.audit_template['uuid']) - - _, body = self.client.list_action_plans(limit=3) - - next_marker = body['action_plans'][-1]['uuid'] - - self.assertEqual(3, len(body['action_plans'])) - self.assertIn(next_marker, body['next']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_api_discovery.py b/watcher_tempest_plugin/tests/api/admin/test_api_discovery.py deleted file mode 100644 index f30cb4b..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_api_discovery.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestApiDiscovery(base.BaseInfraOptimTest): - """Tests for API discovery features.""" - - @decorators.attr(type='smoke') - def test_api_versions(self): - _, descr = self.client.get_api_description() - expected_versions = ('v1',) - versions = [version['id'] for version in descr['versions']] - - for v in expected_versions: - self.assertIn(v, versions) - - @decorators.attr(type='smoke') - def test_default_version(self): - _, descr = self.client.get_api_description() - default_version = descr['default_version'] - self.assertEqual('v1', default_version['id']) - - @decorators.attr(type='smoke') - def test_version_1_resources(self): - _, descr = self.client.get_version_description(version='v1') - expected_resources = ('audit_templates', 'audits', 'action_plans', - 'actions', 'links', 'media_types') - - for res in expected_resources: - self.assertIn(res, descr) diff --git a/watcher_tempest_plugin/tests/api/admin/test_audit.py b/watcher_tempest_plugin/tests/api/admin/test_audit.py deleted file mode 100644 index 13a187e..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_audit.py +++ /dev/null @@ -1,221 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from tempest.lib.common.utils import test_utils -from tempest.lib import decorators -from tempest.lib import exceptions - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestCreateUpdateDeleteAudit(base.BaseInfraOptimTest): - """Tests for audit.""" - - audit_states = ['ONGOING', 'SUCCEEDED', 'FAILED', - 'CANCELLED', 'DELETED', 'PENDING', 'SUSPENDED'] - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', - 'deleted_at', 'state')): - super(TestCreateUpdateDeleteAudit, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_create_audit_oneshot(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - audit_type='ONESHOT', - ) - - _, body = self.create_audit(**audit_params) - audit_params.pop('audit_template_uuid') - audit_params['goal_uuid'] = goal['uuid'] - self.assert_expected(audit_params, body) - - _, audit = self.client.show_audit(body['uuid']) - self.assert_expected(audit, body) - - @decorators.attr(type='smoke') - def test_create_audit_continuous(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - audit_type='CONTINUOUS', - interval='7200', - ) - - _, body = self.create_audit(**audit_params) - audit_params.pop('audit_template_uuid') - audit_params['goal_uuid'] = goal['uuid'] - self.assert_expected(audit_params, body) - - _, audit = self.client.show_audit(body['uuid']) - self.assert_expected(audit, body) - - @decorators.attr(type='smoke') - def test_create_audit_with_wrong_audit_template(self): - audit_params = dict( - audit_template_uuid='INVALID', - audit_type='ONESHOT', - ) - - self.assertRaises( - exceptions.BadRequest, self.create_audit, **audit_params) - - @decorators.attr(type='smoke') - def test_create_audit_with_invalid_state(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - state='INVALID', - ) - - self.assertRaises( - exceptions.BadRequest, self.create_audit, **audit_params) - - @decorators.attr(type='smoke') - def test_create_audit_with_no_state(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - - audit_params = dict( - audit_template_uuid=audit_template['uuid'], - state='', - ) - - _, body = self.create_audit(**audit_params) - audit_params.pop('audit_template_uuid') - audit_params['goal_uuid'] = goal['uuid'] - self.assert_expected(audit_params, body) - - _, audit = self.client.show_audit(body['uuid']) - - initial_audit_state = audit.pop('state') - self.assertIn(initial_audit_state, self.audit_states) - - self.assert_expected(audit, body) - - @decorators.attr(type='smoke') - def test_delete_audit(self): - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, body = self.create_audit(audit_template['uuid']) - audit_uuid = body['uuid'] - - test_utils.call_until_true( - func=functools.partial( - self.is_audit_idle, audit_uuid), - duration=10, - sleep_for=.5 - ) - - def is_audit_deleted(uuid): - try: - return not bool(self.client.show_audit(uuid)) - except exceptions.NotFound: - return True - - self.delete_audit(audit_uuid) - - test_utils.call_until_true( - func=functools.partial(is_audit_deleted, audit_uuid), - duration=5, - sleep_for=1 - ) - - self.assertTrue(is_audit_deleted(audit_uuid)) - - -class TestShowListAudit(base.BaseInfraOptimTest): - """Tests for audit.""" - - audit_states = ['ONGOING', 'SUCCEEDED', 'FAILED', - 'CANCELLED', 'DELETED', 'PENDING', 'SUSPENDED'] - - @classmethod - def resource_setup(cls): - super(TestShowListAudit, cls).resource_setup() - _, cls.goal = cls.client.show_goal("dummy") - _, cls.audit_template = cls.create_audit_template(cls.goal['uuid']) - _, cls.audit = cls.create_audit(cls.audit_template['uuid']) - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', - 'deleted_at', 'state')): - super(TestShowListAudit, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_audit(self): - _, audit = self.client.show_audit( - self.audit['uuid']) - - initial_audit = self.audit.copy() - del initial_audit['state'] - audit_state = audit['state'] - actual_audit = audit.copy() - del actual_audit['state'] - - self.assertIn(audit_state, self.audit_states) - self.assert_expected(initial_audit, actual_audit) - - @decorators.attr(type='smoke') - def test_show_audit_with_links(self): - _, audit = self.client.show_audit( - self.audit['uuid']) - self.assertIn('links', audit.keys()) - self.assertEqual(2, len(audit['links'])) - self.assertIn(audit['uuid'], - audit['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_audits(self): - _, body = self.client.list_audits() - self.assertIn(self.audit['uuid'], - [i['uuid'] for i in body['audits']]) - # Verify self links. - for audit in body['audits']: - self.validate_self_link('audits', audit['uuid'], - audit['links'][0]['href']) - - @decorators.attr(type='smoke') - def test_list_with_limit(self): - # We create 3 extra audits to exceed the limit we fix - for _ in range(3): - self.create_audit(self.audit_template['uuid']) - - _, body = self.client.list_audits(limit=3) - - next_marker = body['audits'][-1]['uuid'] - self.assertEqual(3, len(body['audits'])) - self.assertIn(next_marker, body['next']) - - @decorators.attr(type='smoke') - def test_list_audits_related_to_given_audit_template(self): - _, body = self.client.list_audits( - goal=self.goal['uuid']) - self.assertIn(self.audit['uuid'], [n['uuid'] for n in body['audits']]) diff --git a/watcher_tempest_plugin/tests/api/admin/test_audit_template.py b/watcher_tempest_plugin/tests/api/admin/test_audit_template.py deleted file mode 100644 index 75ac80a..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_audit_template.py +++ /dev/null @@ -1,226 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from oslo_utils import uuidutils - -from tempest.lib import decorators -from tempest.lib import exceptions - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestCreateDeleteAuditTemplate(base.BaseInfraOptimTest): - """Tests on audit templates""" - - @decorators.attr(type='smoke') - def test_create_audit_template(self): - goal_name = "dummy" - _, goal = self.client.show_goal(goal_name) - - params = { - 'name': 'my at name %s' % uuidutils.generate_uuid(), - 'description': 'my at description', - 'goal': goal['uuid']} - expected_data = { - 'name': params['name'], - 'description': params['description'], - 'goal_uuid': params['goal'], - 'goal_name': goal_name, - 'strategy_uuid': None, - 'strategy_name': None} - - _, body = self.create_audit_template(**params) - self.assert_expected(expected_data, body) - - _, audit_template = self.client.show_audit_template(body['uuid']) - self.assert_expected(audit_template, body) - - @decorators.attr(type='smoke') - def test_create_audit_template_unicode_description(self): - goal_name = "dummy" - _, goal = self.client.show_goal(goal_name) - # Use a unicode string for testing: - params = { - 'name': 'my at name %s' % uuidutils.generate_uuid(), - 'description': 'my àt déscrïptïôn', - 'goal': goal['uuid']} - - expected_data = { - 'name': params['name'], - 'description': params['description'], - 'goal_uuid': params['goal'], - 'goal_name': goal_name, - 'strategy_uuid': None, - 'strategy_name': None} - - _, body = self.create_audit_template(**params) - self.assert_expected(expected_data, body) - - _, audit_template = self.client.show_audit_template(body['uuid']) - self.assert_expected(audit_template, body) - - @decorators.attr(type='smoke') - def test_delete_audit_template(self): - _, goal = self.client.show_goal("dummy") - _, body = self.create_audit_template(goal=goal['uuid']) - audit_uuid = body['uuid'] - - self.delete_audit_template(audit_uuid) - - self.assertRaises(exceptions.NotFound, self.client.show_audit_template, - audit_uuid) - - -class TestAuditTemplate(base.BaseInfraOptimTest): - """Tests for audit_template.""" - - @classmethod - def resource_setup(cls): - super(TestAuditTemplate, cls).resource_setup() - _, cls.goal = cls.client.show_goal("dummy") - _, cls.strategy = cls.client.show_strategy("dummy") - _, cls.audit_template = cls.create_audit_template( - goal=cls.goal['uuid'], strategy=cls.strategy['uuid']) - - @decorators.attr(type='smoke') - def test_show_audit_template(self): - _, audit_template = self.client.show_audit_template( - self.audit_template['uuid']) - - self.assert_expected(self.audit_template, audit_template) - - @decorators.attr(type='smoke') - def test_filter_audit_template_by_goal_uuid(self): - _, audit_templates = self.client.list_audit_templates( - goal=self.audit_template['goal_uuid']) - - audit_template_uuids = [ - at["uuid"] for at in audit_templates['audit_templates']] - self.assertIn(self.audit_template['uuid'], audit_template_uuids) - - @decorators.attr(type='smoke') - def test_filter_audit_template_by_strategy_uuid(self): - _, audit_templates = self.client.list_audit_templates( - strategy=self.audit_template['strategy_uuid']) - - audit_template_uuids = [ - at["uuid"] for at in audit_templates['audit_templates']] - self.assertIn(self.audit_template['uuid'], audit_template_uuids) - - @decorators.attr(type='smoke') - def test_show_audit_template_with_links(self): - _, audit_template = self.client.show_audit_template( - self.audit_template['uuid']) - self.assertIn('links', audit_template.keys()) - self.assertEqual(2, len(audit_template['links'])) - self.assertIn(audit_template['uuid'], - audit_template['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_audit_templates(self): - _, body = self.client.list_audit_templates() - self.assertIn(self.audit_template['uuid'], - [i['uuid'] for i in body['audit_templates']]) - # Verify self links. - for audit_template in body['audit_templates']: - self.validate_self_link('audit_templates', audit_template['uuid'], - audit_template['links'][0]['href']) - - @decorators.attr(type='smoke') - def test_list_with_limit(self): - # We create 3 extra audit templates to exceed the limit we fix - for _ in range(3): - self.create_audit_template(self.goal['uuid']) - - _, body = self.client.list_audit_templates(limit=3) - - next_marker = body['audit_templates'][-1]['uuid'] - self.assertEqual(3, len(body['audit_templates'])) - self.assertIn(next_marker, body['next']) - - @decorators.attr(type='smoke') - def test_update_audit_template_replace(self): - _, new_goal = self.client.show_goal("server_consolidation") - _, new_strategy = self.client.show_strategy("basic") - - params = {'name': 'my at name %s' % uuidutils.generate_uuid(), - 'description': 'my at description', - 'goal': self.goal['uuid']} - - _, body = self.create_audit_template(**params) - - new_name = 'my at new name %s' % uuidutils.generate_uuid() - new_description = 'my new at description' - - patch = [{'path': '/name', - 'op': 'replace', - 'value': new_name}, - {'path': '/description', - 'op': 'replace', - 'value': new_description}, - {'path': '/goal', - 'op': 'replace', - 'value': new_goal['uuid']}, - {'path': '/strategy', - 'op': 'replace', - 'value': new_strategy['uuid']}] - - self.client.update_audit_template(body['uuid'], patch) - - _, body = self.client.show_audit_template(body['uuid']) - self.assertEqual(new_name, body['name']) - self.assertEqual(new_description, body['description']) - self.assertEqual(new_goal['uuid'], body['goal_uuid']) - self.assertEqual(new_strategy['uuid'], body['strategy_uuid']) - - @decorators.attr(type='smoke') - def test_update_audit_template_remove(self): - description = 'my at description' - name = 'my at name %s' % uuidutils.generate_uuid() - params = {'name': name, - 'description': description, - 'goal': self.goal['uuid']} - - _, audit_template = self.create_audit_template(**params) - - # Removing the description - self.client.update_audit_template( - audit_template['uuid'], - [{'path': '/description', 'op': 'remove'}]) - - _, body = self.client.show_audit_template(audit_template['uuid']) - self.assertIsNone(body.get('description')) - - # Assert nothing else was changed - self.assertEqual(name, body['name']) - self.assertIsNone(body['description']) - self.assertEqual(self.goal['uuid'], body['goal_uuid']) - - @decorators.attr(type='smoke') - def test_update_audit_template_add(self): - params = {'name': 'my at name %s' % uuidutils.generate_uuid(), - 'goal': self.goal['uuid']} - - _, body = self.create_audit_template(**params) - - patch = [{'path': '/description', 'op': 'add', 'value': 'description'}] - - self.client.update_audit_template(body['uuid'], patch) - - _, body = self.client.show_audit_template(body['uuid']) - self.assertEqual('description', body['description']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_goal.py b/watcher_tempest_plugin/tests/api/admin/test_goal.py deleted file mode 100644 index 2cf228e..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_goal.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListGoal(base.BaseInfraOptimTest): - """Tests for goals""" - - DUMMY_GOAL = "dummy" - - @classmethod - def resource_setup(cls): - super(TestShowListGoal, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListGoal, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_goal(self): - _, goal = self.client.show_goal(self.DUMMY_GOAL) - - self.assertEqual(self.DUMMY_GOAL, goal['name']) - expected_fields = { - 'created_at', 'deleted_at', 'display_name', - 'efficacy_specification', 'links', 'name', - 'updated_at', 'uuid'} - self.assertEqual(expected_fields, set(goal.keys())) - - @decorators.attr(type='smoke') - def test_show_goal_with_links(self): - _, goal = self.client.show_goal(self.DUMMY_GOAL) - self.assertIn('links', goal.keys()) - self.assertEqual(2, len(goal['links'])) - self.assertIn(goal['uuid'], - goal['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_goals(self): - _, body = self.client.list_goals() - self.assertIn(self.DUMMY_GOAL, - [i['name'] for i in body['goals']]) - - # Verify self links. - for goal in body['goals']: - self.validate_self_link('goals', goal['uuid'], - goal['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py b/watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py deleted file mode 100644 index 466fe41..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_scoring_engine.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListScoringEngine(base.BaseInfraOptimTest): - """Tests for scoring engines""" - - DUMMY_SCORING_ENGINE = "dummy_scorer" - - @classmethod - def resource_setup(cls): - super(TestShowListScoringEngine, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListScoringEngine, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_scoring_engine(self): - _, scoring_engine = self.client.show_scoring_engine( - self.DUMMY_SCORING_ENGINE) - - self.assertEqual(self.DUMMY_SCORING_ENGINE, scoring_engine['name']) - - expected_fields = {'metainfo', 'description', 'name', 'uuid', 'links'} - self.assertEqual(expected_fields, set(scoring_engine.keys())) - - @decorators.attr(type='smoke') - def test_show_scoring_engine_with_links(self): - _, scoring_engine = self.client.show_scoring_engine( - self.DUMMY_SCORING_ENGINE) - self.assertIn('links', scoring_engine.keys()) - self.assertEqual(2, len(scoring_engine['links'])) - self.assertIn(scoring_engine['uuid'], - scoring_engine['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_scoring_engines(self): - _, body = self.client.list_scoring_engines() - self.assertIn(self.DUMMY_SCORING_ENGINE, - [i['name'] for i in body['scoring_engines']]) - - # Verify self links. - for scoring_engine in body['scoring_engines']: - self.validate_self_link('scoring_engines', scoring_engine['uuid'], - scoring_engine['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_service.py b/watcher_tempest_plugin/tests/api/admin/test_service.py deleted file mode 100644 index 948d8b1..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_service.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 Servionica -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListService(base.BaseInfraOptimTest): - """Tests for services""" - - DECISION_ENGINE = "watcher-decision-engine" - APPLIER = "watcher-applier" - - @classmethod - def resource_setup(cls): - super(TestShowListService, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListService, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_service(self): - _, body = self.client.list_services() - self.assertIn('services', body) - services = body['services'] - self.assertIn(self.DECISION_ENGINE, - [i['name'] for i in body['services']]) - - service_id = filter(lambda x: self.DECISION_ENGINE == x['name'], - services)[0]['id'] - _, service = self.client.show_service(service_id) - - self.assertEqual(self.DECISION_ENGINE, service['name']) - self.assertIn("host", service.keys()) - self.assertIn("last_seen_up", service.keys()) - self.assertIn("status", service.keys()) - - @decorators.attr(type='smoke') - def test_show_service_with_links(self): - _, body = self.client.list_services() - self.assertIn('services', body) - services = body['services'] - self.assertIn(self.DECISION_ENGINE, - [i['name'] for i in body['services']]) - - service_id = filter(lambda x: self.DECISION_ENGINE == x['name'], - services)[0]['id'] - _, service = self.client.show_service(service_id) - - self.assertIn('links', service.keys()) - self.assertEqual(2, len(service['links'])) - self.assertIn(str(service['id']), - service['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_services(self): - _, body = self.client.list_services() - self.assertIn('services', body) - services = body['services'] - self.assertIn(self.DECISION_ENGINE, - [i['name'] for i in body['services']]) - - for service in services: - self.assertTrue( - all(val is not None for key, val in service.items() - if key in ['id', 'name', 'host', 'status', - 'last_seen_up'])) - - # Verify self links. - for service in body['services']: - self.validate_self_link('services', service['id'], - service['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/api/admin/test_strategy.py b/watcher_tempest_plugin/tests/api/admin/test_strategy.py deleted file mode 100644 index 73eefd7..0000000 --- a/watcher_tempest_plugin/tests/api/admin/test_strategy.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -from tempest.lib import decorators - -from watcher_tempest_plugin.tests.api.admin import base - - -class TestShowListStrategy(base.BaseInfraOptimTest): - """Tests for strategies""" - - DUMMY_STRATEGY = "dummy" - - @classmethod - def resource_setup(cls): - super(TestShowListStrategy, cls).resource_setup() - - def assert_expected(self, expected, actual, - keys=('created_at', 'updated_at', 'deleted_at')): - super(TestShowListStrategy, self).assert_expected( - expected, actual, keys) - - @decorators.attr(type='smoke') - def test_show_strategy(self): - _, strategy = self.client.show_strategy(self.DUMMY_STRATEGY) - - self.assertEqual(self.DUMMY_STRATEGY, strategy['name']) - self.assertIn("display_name", strategy.keys()) - - @decorators.attr(type='smoke') - def test_show_strategy_with_links(self): - _, strategy = self.client.show_strategy(self.DUMMY_STRATEGY) - self.assertIn('links', strategy.keys()) - self.assertEqual(2, len(strategy['links'])) - self.assertIn(strategy['uuid'], - strategy['links'][0]['href']) - - @decorators.attr(type="smoke") - def test_list_strategies(self): - _, body = self.client.list_strategies() - self.assertIn('strategies', body) - strategies = body['strategies'] - self.assertIn(self.DUMMY_STRATEGY, - [i['name'] for i in body['strategies']]) - - for strategy in strategies: - self.assertTrue( - all(val is not None for key, val in strategy.items() - if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) - - # Verify self links. - for strategy in body['strategies']: - self.validate_self_link('strategies', strategy['uuid'], - strategy['links'][0]['href']) diff --git a/watcher_tempest_plugin/tests/scenario/__init__.py b/watcher_tempest_plugin/tests/scenario/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/watcher_tempest_plugin/tests/scenario/base.py b/watcher_tempest_plugin/tests/scenario/base.py deleted file mode 100644 index 8b7e268..0000000 --- a/watcher_tempest_plugin/tests/scenario/base.py +++ /dev/null @@ -1,170 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from __future__ import unicode_literals - -import time - -from oslo_log import log -from tempest import config -from tempest import exceptions -from tempest.lib.common.utils import data_utils - -from watcher_tempest_plugin import infra_optim_clients as clients -from watcher_tempest_plugin.tests.scenario import manager - -LOG = log.getLogger(__name__) -CONF = config.CONF - - -class BaseInfraOptimScenarioTest(manager.ScenarioTest): - """Base class for Infrastructure Optimization API tests.""" - - # States where the object is waiting for some event to perform a transition - IDLE_STATES = ('RECOMMENDED', 'FAILED', 'SUCCEEDED', 'CANCELLED') - # States where the object can only be DELETED (end of its life-cycle) - FINISHED_STATES = ('FAILED', 'SUCCEEDED', 'CANCELLED', 'SUPERSEDED') - - @classmethod - def setup_credentials(cls): - cls._check_network_config() - super(BaseInfraOptimScenarioTest, cls).setup_credentials() - cls.mgr = clients.AdminManager() - - @classmethod - def setup_clients(cls): - super(BaseInfraOptimScenarioTest, cls).setup_clients() - cls.client = cls.mgr.io_client - - @classmethod - def resource_setup(cls): - super(BaseInfraOptimScenarioTest, cls).resource_setup() - - @classmethod - def resource_cleanup(cls): - """Ensure that all created objects get destroyed.""" - super(BaseInfraOptimScenarioTest, cls).resource_cleanup() - - @classmethod - def wait_for(cls, condition, timeout=30): - start_time = time.time() - while time.time() - start_time < timeout: - if condition(): - break - time.sleep(.5) - - @classmethod - def _check_network_config(cls): - if not CONF.network.public_network_id: - msg = 'public network not defined.' - LOG.error(msg) - raise exceptions.InvalidConfiguration(msg) - - # ### AUDIT TEMPLATES ### # - - def create_audit_template(self, goal, name=None, description=None, - strategy=None): - """Wrapper utility for creating a test audit template - - :param goal: Goal UUID or name related to the audit template. - :param name: The name of the audit template. Default: My Audit Template - :param description: The description of the audit template. - :param strategy: Strategy UUID or name related to the audit template. - :return: A tuple with The HTTP response and its body - """ - description = description or data_utils.rand_name( - 'test-audit_template') - resp, body = self.client.create_audit_template( - name=name, description=description, goal=goal, strategy=strategy) - - self.addCleanup( - self.delete_audit_template, - audit_template_uuid=body["uuid"] - ) - - return resp, body - - def delete_audit_template(self, audit_template_uuid): - """Deletes a audit_template having the specified UUID - - :param audit_template_uuid: The unique identifier of the audit template - :return: Server response - """ - resp, _ = self.client.delete_audit_template(audit_template_uuid) - return resp - - # ### AUDITS ### # - - def create_audit(self, audit_template_uuid, audit_type='ONESHOT', - state=None, parameters=None): - """Wrapper utility for creating a test audit - - :param audit_template_uuid: Audit Template UUID this audit will use - :param audit_type: Audit type (either ONESHOT or CONTINUOUS) - :param state: Audit state - :param parameters: Input parameters of the audit - :return: A tuple with The HTTP response and its body - """ - resp, body = self.client.create_audit( - audit_template_uuid=audit_template_uuid, audit_type=audit_type, - state=state, parameters=parameters) - - self.addCleanup(self.delete_audit, audit_uuid=body["uuid"]) - return resp, body - - def delete_audit(self, audit_uuid): - """Deletes an audit having the specified UUID - - :param audit_uuid: The unique identifier of the audit. - :return: the HTTP response - """ - - _, action_plans = self.client.list_action_plans(audit_uuid=audit_uuid) - for action_plan in action_plans.get("action_plans", []): - self.delete_action_plan(action_plan_uuid=action_plan["uuid"]) - - resp, _ = self.client.delete_audit(audit_uuid) - return resp - - def has_audit_succeeded(self, audit_uuid): - _, audit = self.client.show_audit(audit_uuid) - if audit.get('state') in ('FAILED', 'CANCELLED'): - raise ValueError() - - return audit.get('state') == 'SUCCEEDED' - - @classmethod - def has_audit_finished(cls, audit_uuid): - _, audit = cls.client.show_audit(audit_uuid) - return audit.get('state') in cls.FINISHED_STATES - - # ### ACTION PLANS ### # - - def delete_action_plan(self, action_plan_uuid): - """Deletes an action plan having the specified UUID - - :param action_plan_uuid: The unique identifier of the action plan. - :return: the HTTP response - """ - resp, _ = self.client.delete_action_plan(action_plan_uuid) - return resp - - def has_action_plan_finished(self, action_plan_uuid): - _, action_plan = self.client.show_action_plan(action_plan_uuid) - return action_plan.get('state') in ('FAILED', 'SUCCEEDED', 'CANCELLED', - 'SUPERSEDED') diff --git a/watcher_tempest_plugin/tests/scenario/manager.py b/watcher_tempest_plugin/tests/scenario/manager.py deleted file mode 100644 index 5364525..0000000 --- a/watcher_tempest_plugin/tests/scenario/manager.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from tempest.common import compute -from tempest.common import waiters -from tempest import config -from tempest.lib.common.utils import data_utils -from tempest.lib.common.utils import test_utils -from tempest.lib import exceptions as lib_exc -import tempest.test - -CONF = config.CONF - -LOG = log.getLogger(__name__) - - -class ScenarioTest(tempest.test.BaseTestCase): - """Base class for scenario tests. Uses tempest own clients. """ - - credentials = ['primary'] - - @classmethod - def setup_clients(cls): - super(ScenarioTest, cls).setup_clients() - # Clients (in alphabetical order) - cls.flavors_client = cls.os_primary.flavors_client - cls.compute_floating_ips_client = ( - cls.os_primary.compute_floating_ips_client) - if CONF.service_available.glance: - # Check if glance v1 is available to determine which client to use. - if CONF.image_feature_enabled.api_v1: - cls.image_client = cls.os_primary.image_client - elif CONF.image_feature_enabled.api_v2: - cls.image_client = cls.os_primary.image_client_v2 - else: - raise lib_exc.InvalidConfiguration( - 'Either api_v1 or api_v2 must be True in ' - '[image-feature-enabled].') - # Compute image client - cls.compute_images_client = cls.os_primary.compute_images_client - cls.keypairs_client = cls.os_primary.keypairs_client - # Nova security groups client - cls.compute_security_groups_client = ( - cls.os_primary.compute_security_groups_client) - cls.compute_security_group_rules_client = ( - cls.os_primary.compute_security_group_rules_client) - cls.servers_client = cls.os_primary.servers_client - cls.interface_client = cls.os_primary.interfaces_client - # Neutron network client - cls.networks_client = cls.os_primary.networks_client - cls.ports_client = cls.os_primary.ports_client - cls.routers_client = cls.os_primary.routers_client - cls.subnets_client = cls.os_primary.subnets_client - cls.floating_ips_client = cls.os_primary.floating_ips_client - cls.security_groups_client = cls.os_primary.security_groups_client - cls.security_group_rules_client = ( - cls.os_primary.security_group_rules_client) - - if CONF.volume_feature_enabled.api_v2: - cls.volumes_client = cls.os_primary.volumes_v2_client - cls.snapshots_client = cls.os_primary.snapshots_v2_client - else: - cls.volumes_client = cls.os_primary.volumes_client - cls.snapshots_client = cls.os_primary.snapshots_client - - # ## Test functions library - # - # The create_[resource] functions only return body and discard the - # resp part which is not used in scenario tests - - def _create_port(self, network_id, client=None, namestart='port-quotatest', - **kwargs): - if not client: - client = self.ports_client - name = data_utils.rand_name(namestart) - result = client.create_port( - name=name, - network_id=network_id, - **kwargs) - self.assertIsNotNone(result, 'Unable to allocate port') - port = result['port'] - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - client.delete_port, port['id']) - return port - - def create_keypair(self, client=None): - if not client: - client = self.keypairs_client - name = data_utils.rand_name(self.__class__.__name__) - # We don't need to create a keypair by pubkey in scenario - body = client.create_keypair(name=name) - self.addCleanup(client.delete_keypair, name) - return body['keypair'] - - def create_server(self, name=None, image_id=None, flavor=None, - validatable=False, wait_until='ACTIVE', - clients=None, **kwargs): - """Wrapper utility that returns a test server. - - This wrapper utility calls the common create test server and - returns a test server. The purpose of this wrapper is to minimize - the impact on the code of the tests already using this - function. - """ - - # NOTE(jlanoux): As a first step, ssh checks in the scenario - # tests need to be run regardless of the run_validation and - # validatable parameters and thus until the ssh validation job - # becomes voting in CI. The test resources management and IP - # association are taken care of in the scenario tests. - # Therefore, the validatable parameter is set to false in all - # those tests. In this way create_server just return a standard - # server and the scenario tests always perform ssh checks. - - # Needed for the cross_tenant_traffic test: - if clients is None: - clients = self.os_primary - - if name is None: - name = data_utils.rand_name(self.__class__.__name__ + "-server") - - vnic_type = CONF.network.port_vnic_type - - # If vnic_type is configured create port for - # every network - if vnic_type: - ports = [] - - create_port_body = {'binding:vnic_type': vnic_type, - 'namestart': 'port-smoke'} - if kwargs: - # Convert security group names to security group ids - # to pass to create_port - if 'security_groups' in kwargs: - security_groups = \ - clients.security_groups_client.list_security_groups( - ).get('security_groups') - sec_dict = dict([(s['name'], s['id']) - for s in security_groups]) - - sec_groups_names = [s['name'] for s in kwargs.pop( - 'security_groups')] - security_groups_ids = [sec_dict[s] - for s in sec_groups_names] - - if security_groups_ids: - create_port_body[ - 'security_groups'] = security_groups_ids - networks = kwargs.pop('networks', []) - else: - networks = [] - - # If there are no networks passed to us we look up - # for the project's private networks and create a port. - # The same behaviour as we would expect when passing - # the call to the clients with no networks - if not networks: - networks = clients.networks_client.list_networks( - **{'router:external': False, 'fields': 'id'})['networks'] - - # It's net['uuid'] if networks come from kwargs - # and net['id'] if they come from - # clients.networks_client.list_networks - for net in networks: - net_id = net.get('uuid', net.get('id')) - if 'port' not in net: - port = self._create_port(network_id=net_id, - client=clients.ports_client, - **create_port_body) - ports.append({'port': port['id']}) - else: - ports.append({'port': net['port']}) - if ports: - kwargs['networks'] = ports - self.ports = ports - - tenant_network = self.get_tenant_network() - - body, servers = compute.create_test_server( - clients, - tenant_network=tenant_network, - wait_until=wait_until, - name=name, flavor=flavor, - image_id=image_id, **kwargs) - - self.addCleanup(waiters.wait_for_server_termination, - clients.servers_client, body['id']) - self.addCleanup(test_utils.call_and_ignore_notfound_exc, - clients.servers_client.delete_server, body['id']) - server = clients.servers_client.show_server(body['id'])['server'] - return server diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py b/watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py deleted file mode 100644 index b4b5e76..0000000 --- a/watcher_tempest_plugin/tests/scenario/test_execute_basic_optim.py +++ /dev/null @@ -1,191 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from tempest import config -from tempest.lib.common.utils import test_utils - -from watcher_tempest_plugin.tests.scenario import base - -CONF = config.CONF - - -class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest): - """Tests for action plans""" - - GOAL_NAME = "server_consolidation" - - @classmethod - def skip_checks(cls): - super(TestExecuteBasicStrategy, cls).skip_checks() - - @classmethod - def resource_setup(cls): - super(TestExecuteBasicStrategy, cls).resource_setup() - if CONF.compute.min_compute_nodes < 2: - raise cls.skipException( - "Less than 2 compute nodes, skipping multinode tests.") - if not CONF.compute_feature_enabled.live_migration: - raise cls.skipException("Live migration is not enabled") - - cls.initial_compute_nodes_setup = cls.get_compute_nodes_setup() - enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup - if cn.get('status') == 'enabled'] - - cls.wait_for_compute_node_setup() - - if len(enabled_compute_nodes) < 2: - raise cls.skipException( - "Less than 2 compute nodes are enabled, " - "skipping multinode tests.") - - @classmethod - def get_compute_nodes_setup(cls): - services_client = cls.mgr.services_client - available_services = services_client.list_services()['services'] - - return [srv for srv in available_services - if srv.get('binary') == 'nova-compute'] - - @classmethod - def wait_for_compute_node_setup(cls): - - def _are_compute_nodes_setup(): - try: - hypervisors_client = cls.mgr.hypervisor_client - hypervisors = hypervisors_client.list_hypervisors( - detail=True)['hypervisors'] - available_hypervisors = set( - hyp['hypervisor_hostname'] for hyp in hypervisors) - available_services = set( - service['host'] - for service in cls.get_compute_nodes_setup()) - - return ( - available_hypervisors == available_services and - len(hypervisors) >= 2) - except Exception: - return False - - assert test_utils.call_until_true( - func=_are_compute_nodes_setup, - duration=600, - sleep_for=2 - ) - - @classmethod - def rollback_compute_nodes_status(cls): - current_compute_nodes_setup = cls.get_compute_nodes_setup() - for cn_setup in current_compute_nodes_setup: - cn_hostname = cn_setup.get('host') - matching_cns = [ - cns for cns in cls.initial_compute_nodes_setup - if cns.get('host') == cn_hostname - ] - initial_cn_setup = matching_cns[0] # Should return a single result - if cn_setup.get('status') != initial_cn_setup.get('status'): - if initial_cn_setup.get('status') == 'enabled': - rollback_func = cls.mgr.services_client.enable_service - else: - rollback_func = cls.mgr.services_client.disable_service - rollback_func(binary='nova-compute', host=cn_hostname) - - def _create_one_instance_per_host(self): - """Create 1 instance per compute node - - This goes up to the min_compute_nodes threshold so that things don't - get crazy if you have 1000 compute nodes but set min to 3. - """ - host_client = self.mgr.hosts_client - all_hosts = host_client.list_hosts()['hosts'] - compute_nodes = [x for x in all_hosts if x['service'] == 'compute'] - - for idx, _ in enumerate( - compute_nodes[:CONF.compute.min_compute_nodes], start=1): - # by getting to active state here, this means this has - # landed on the host in question. - self.create_server( - name="instance-%d" % idx, - image_id=CONF.compute.image_ref, - wait_until='ACTIVE', - clients=self.mgr) - - def test_execute_basic_action_plan(self): - """Execute an action plan based on the BASIC strategy - - - create an audit template with the basic strategy - - run the audit to create an action plan - - get the action plan - - run the action plan - - get results and make sure it succeeded - """ - self.addCleanup(self.rollback_compute_nodes_status) - self._create_one_instance_per_host() - - _, goal = self.client.show_goal(self.GOAL_NAME) - _, strategy = self.client.show_strategy("basic") - _, audit_template = self.create_audit_template( - goal['uuid'], strategy=strategy['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - try: - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_audit_finished, audit['uuid']), - duration=600, - sleep_for=2 - )) - except ValueError: - self.fail("The audit has failed!") - - _, finished_audit = self.client.show_audit(audit['uuid']) - if finished_audit.get('state') in ('FAILED', 'CANCELLED', 'SUSPENDED'): - self.fail("The audit ended in unexpected state: %s!" - % finished_audit.get('state')) - - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - if action_plan['state'] in ('SUPERSEDED', 'SUCCEEDED'): - # This means the action plan is superseded so we cannot trigger it, - # or it is empty. - return - - # Execute the action by changing its state to PENDING - _, updated_ap = self.client.start_action_plan(action_plan['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_action_plan_finished, action_plan['uuid']), - duration=600, - sleep_for=2 - )) - _, finished_ap = self.client.show_action_plan(action_plan['uuid']) - _, action_list = self.client.list_actions( - action_plan_uuid=finished_ap["uuid"]) - - self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING')) - self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED')) - - for action in action_list['actions']: - self.assertEqual('SUCCEEDED', action.get('state')) diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py b/watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py deleted file mode 100644 index 33b108a..0000000 --- a/watcher_tempest_plugin/tests/scenario/test_execute_dummy_optim.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import collections -import functools - -from tempest.lib.common.utils import test_utils - -from watcher_tempest_plugin.tests.scenario import base - - -class TestExecuteDummyStrategy(base.BaseInfraOptimScenarioTest): - """Tests for action plans""" - - def test_execute_dummy_action_plan(self): - """Execute an action plan based on the 'dummy' strategy - - - create an audit template with the 'dummy' strategy - - run the audit to create an action plan - - get the action plan - - run the action plan - - get results and make sure it succeeded - """ - _, goal = self.client.show_goal("dummy") - _, audit_template = self.create_audit_template(goal['uuid']) - _, audit = self.create_audit(audit_template['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial(self.has_audit_finished, audit['uuid']), - duration=30, - sleep_for=.5 - )) - - self.assertTrue(self.has_audit_succeeded(audit['uuid'])) - - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - - if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']: - # This means the action plan is superseded so we cannot trigger it, - # or it is empty. - return - - # Execute the action by changing its state to PENDING - _, updated_ap = self.client.start_action_plan(action_plan['uuid']) - - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_action_plan_finished, action_plan['uuid']), - duration=30, - sleep_for=.5 - )) - _, finished_ap = self.client.show_action_plan(action_plan['uuid']) - _, action_list = self.client.list_actions( - action_plan_uuid=finished_ap["uuid"]) - - action_counter = collections.Counter( - act['action_type'] for act in action_list['actions']) - - self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING')) - self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED')) - - # A dummy strategy generates 2 "nop" actions and 1 "sleep" action - self.assertEqual(3, len(action_list['actions'])) - self.assertEqual(2, action_counter.get("nop")) - self.assertEqual(1, action_counter.get("sleep")) diff --git a/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py b/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py deleted file mode 100644 index 8594e94..0000000 --- a/watcher_tempest_plugin/tests/scenario/test_execute_workload_balancing.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- encoding: utf-8 -*- -# Copyright (c) 2016 b<>com -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import unicode_literals - -import functools - -from oslo_log import log -from tempest import config -from tempest.lib.common.utils import test_utils - -from watcher_tempest_plugin.tests.scenario import base - -CONF = config.CONF -LOG = log.getLogger(__name__) - - -class TestExecuteWorkloadBalancingStrategy(base.BaseInfraOptimScenarioTest): - """Tests for action plans""" - - GOAL = "workload_balancing" - - @classmethod - def skip_checks(cls): - super(TestExecuteWorkloadBalancingStrategy, cls).skip_checks() - - @classmethod - def resource_setup(cls): - super(TestExecuteWorkloadBalancingStrategy, cls).resource_setup() - if CONF.compute.min_compute_nodes < 2: - raise cls.skipException( - "Less than 2 compute nodes, skipping multinode tests.") - if not CONF.compute_feature_enabled.live_migration: - raise cls.skipException("Live migration is not enabled") - - cls.initial_compute_nodes_setup = cls.get_compute_nodes_setup() - enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup - if cn.get('status') == 'enabled'] - - cls.wait_for_compute_node_setup() - - if len(enabled_compute_nodes) < 2: - raise cls.skipException( - "Less than 2 compute nodes are enabled, " - "skipping multinode tests.") - - @classmethod - def get_hypervisors_setup(cls): - hypervisors_client = cls.mgr.hypervisor_client - hypervisors = hypervisors_client.list_hypervisors( - detail=True)['hypervisors'] - return hypervisors - - @classmethod - def get_compute_nodes_setup(cls): - services_client = cls.mgr.services_client - available_services = services_client.list_services()['services'] - - return [srv for srv in available_services - if srv.get('binary') == 'nova-compute'] - - def _migrate_server_to(self, server_id, dest_host, volume_backed=False): - kwargs = dict() - kwargs['disk_over_commit'] = False - block_migration = (CONF.compute_feature_enabled. - block_migration_for_live_migration and - not volume_backed) - body = self.mgr.servers_client.live_migrate_server( - server_id, host=dest_host, block_migration=block_migration, - **kwargs) - return body - - @classmethod - def wait_for_compute_node_setup(cls): - - def _are_compute_nodes_setup(): - try: - hypervisors = cls.get_hypervisors_setup() - available_hypervisors = set( - hyp['hypervisor_hostname'] for hyp in hypervisors - if hyp['state'] == 'up') - available_services = set( - service['host'] - for service in cls.get_compute_nodes_setup() - if service['state'] == 'up') - return ( - len(available_hypervisors) == len(available_services) and - len(hypervisors) >= 2) - except Exception as exc: - LOG.exception(exc) - return False - - assert test_utils.call_until_true( - func=_are_compute_nodes_setup, - duration=600, - sleep_for=2 - ) - - @classmethod - def rollback_compute_nodes_status(cls): - current_compute_nodes_setup = cls.get_compute_nodes_setup() - for cn_setup in current_compute_nodes_setup: - cn_hostname = cn_setup.get('host') - matching_cns = [ - cns for cns in cls.initial_compute_nodes_setup - if cns.get('host') == cn_hostname - ] - initial_cn_setup = matching_cns[0] # Should return a single result - if cn_setup.get('status') != initial_cn_setup.get('status'): - if initial_cn_setup.get('status') == 'enabled': - rollback_func = cls.mgr.services_client.enable_service - else: - rollback_func = cls.mgr.services_client.disable_service - rollback_func(binary='nova-compute', host=cn_hostname) - - def _create_one_instance_per_host(self): - """Create 1 instance per compute node - - This goes up to the min_compute_nodes threshold so that things don't - get crazy if you have 1000 compute nodes but set min to 3. - """ - host_client = self.mgr.hosts_client - all_hosts = host_client.list_hosts()['hosts'] - compute_nodes = [x for x in all_hosts if x['service'] == 'compute'] - - created_instances = [] - for _ in compute_nodes[:CONF.compute.min_compute_nodes]: - # by getting to active state here, this means this has - # landed on the host in question. - created_instances.append( - self.create_server(image_id=CONF.compute.image_ref, - wait_until='ACTIVE', clients=self.mgr)) - return created_instances - - def _pack_all_created_instances_on_one_host(self, instances): - hypervisors = [ - hyp['hypervisor_hostname'] for hyp in self.get_hypervisors_setup() - if hyp['state'] == 'up'] - node = hypervisors[0] - for instance in instances: - if instance.get('OS-EXT-SRV-ATTR:hypervisor_hostname') != node: - self._migrate_server_to(instance['id'], node) - - def test_execute_workload_stabilization(self): - """Execute an action plan using the workload_stabilization strategy""" - self.addCleanup(self.rollback_compute_nodes_status) - instances = self._create_one_instance_per_host() - self._pack_all_created_instances_on_one_host(instances) - - audit_parameters = { - "metrics": ["cpu_util"], - "thresholds": {"cpu_util": 0.2}, - "weights": {"cpu_util_weight": 1.0}, - "instance_metrics": {"cpu_util": "compute.node.cpu.percent"}} - - _, goal = self.client.show_goal(self.GOAL) - _, strategy = self.client.show_strategy("workload_stabilization") - _, audit_template = self.create_audit_template( - goal['uuid'], strategy=strategy['uuid']) - _, audit = self.create_audit( - audit_template['uuid'], parameters=audit_parameters) - - try: - self.assertTrue(test_utils.call_until_true( - func=functools.partial( - self.has_audit_finished, audit['uuid']), - duration=600, - sleep_for=2 - )) - except ValueError: - self.fail("The audit has failed!") - - _, finished_audit = self.client.show_audit(audit['uuid']) - if finished_audit.get('state') in ('FAILED', 'CANCELLED'): - self.fail("The audit ended in unexpected state: %s!" % - finished_audit.get('state')) - - _, action_plans = self.client.list_action_plans( - audit_uuid=audit['uuid']) - action_plan = action_plans['action_plans'][0] - - _, action_plan = self.client.show_action_plan(action_plan['uuid']) - _, action_list = self.client.list_actions( - action_plan_uuid=action_plan["uuid"])