From be8ce42b781897578f866a5b1b50776a439e95dc Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 24 Feb 2024 11:33:48 -0800 Subject: [PATCH] Retire Tripleo: remove repo content TripleO project is retiring - https://review.opendev.org/c/openstack/governance/+/905145 this commit remove the content of this project repo Change-Id: Ie1b54f1dce996fefd4080b307b6959f2570bfeef --- .coveragerc | 7 - .gitignore | 51 -- .mailmap | 3 - .stestr.conf | 3 - .zuul.yaml | 9 - CONTRIBUTING.rst | 16 - HACKING.rst | 4 - LICENSE | 175 ---- MANIFEST.in | 6 - README.rst | 30 +- doc/source/conf.py | 89 -- doc/source/index.rst | 159 ---- doc/source/specs | 1 - images/split-controlplane/ceph-details.png | Bin 595482 -> 0 bytes requirements.txt | 5 - setup.cfg | 12 - setup.py | 23 - specs/juno/backwards-compat-policy.rst | 260 ------ specs/juno/haproxy_configuration.rst | 229 ----- specs/juno/network_configuration.rst | 272 ------ specs/juno/oac-header.rst | 162 ---- specs/juno/promote-heat-env.rst | 258 ------ specs/juno/ssl_pki.rst | 169 ---- specs/juno/tripleo-juno-ci-improvements.rst | 269 ------ .../tripleo-juno-configurable-mnt-state.rst | 238 ----- ...pleo-juno-deploy-cloud-hypervisor-type.rst | 258 ------ specs/juno/tripleo-juno-dracut-ramdisks.rst | 176 ---- .../juno/tripleo-juno-occ-localdatasource.rst | 168 ---- specs/juno/tripleo-juno-tuskar-rest-api.rst | 611 ------------- .../tripleo-juno-tuskar-template-storage.rst | 552 ------------ specs/juno/tripleo-on-openstack.rst | 246 ------ specs/juno/unit-testing.rst | 187 ---- specs/juno/virtual-public-ips.rst | 159 ---- specs/kilo/cinder_ha.rst | 183 ---- specs/kilo/remove-mergepy.rst | 486 ---------- specs/kilo/tripleo-enable-dvr.rst | 169 ---- specs/kilo/tripleo-review-standards.rst | 144 --- specs/liberty/release-branch.rst | 219 ----- specs/mitaka/external-load-balancer.rst | 169 ---- .../puppet-modules-deployment-via-swift.rst | 202 ----- specs/mitaka/refactor-puppet-manifests.rst | 129 --- .../tripleo-mistral-deployment-library.rst | 274 ------ .../tripleo-overcloud-deployment-library.rst | 244 ----- specs/mitaka/tripleo-quickstart.rst | 140 --- specs/mitaka/tripleo-ui.rst | 175 ---- specs/newton/metal-to-tenant.rst | 220 ----- specs/newton/os-net-config-teaming.rst | 197 ----- ...pacemaker-next-generation-architecture.rst | 229 ----- specs/newton/tripleo-lldp-validation.rst | 229 ----- ...ipleo-opstools-availability-monitoring.rst | 186 ---- .../tripleo-opstools-centralized-logging.rst | 147 ---- specs/newton/tripleo-ovs-dpdk.rst | 232 ----- specs/newton/tripleo-sriov.rst | 250 ------ specs/newton/undercloud-upgrade.rst | 272 ------ specs/newton/validations.rst | 159 ---- specs/newton/workflow-simplification.rst | 212 ----- .../capture-environment-status-and-logs.rst | 133 --- specs/ocata/composable-ha-architecture.rst | 201 ----- .../ocata/containerize-tripleo-overcloud.rst | 212 ----- specs/ocata/gui-deployment-configuration.rst | 236 ----- specs/ocata/gui-plan-import-export.rst | 154 ---- specs/ocata/om-dual-backends.rst | 190 ---- specs/ocata/ssl-certmonger.rst | 258 ------ specs/ocata/step-by-step-validation.rst | 149 ---- ...d-party-gating-with-tripleo-quickstart.rst | 258 ------ specs/ocata/tripleo-composable-upgrades.rst | 197 ----- ...ripleo-opstools-performance-monitoring.rst | 105 --- specs/ocata/tripleo-repos.rst | 139 --- specs/ocata/undercloud-heat.rst | 177 ---- specs/ocata/undercloud-ntp-server.rst | 142 --- specs/ocata/validations-in-workflows.rst | 224 ----- specs/pike/aide-database.rst | 185 ---- specs/pike/container-healthchecks.rst | 148 ---- specs/pike/containerized-services-logs.rst | 305 ------- specs/pike/deployment-plan-management.rst | 230 ----- specs/pike/environment-generator.rst | 167 ---- specs/pike/gui-logging.rst | 121 --- specs/pike/send-mail-tool.rst | 129 --- .../pike/tripleo-ceph-ansible-integration.rst | 571 ------------ specs/pike/tripleo-derive-parameters.rst | 440 --------- specs/pike/tripleo-realtime.rst | 235 ----- ...ipleo-routed-networks-ironic-inspector.rst | 386 -------- specs/policy-template.rst | 126 --- specs/policy/adding-ci-jobs.rst | 146 --- specs/policy/bug-tagging.rst | 150 ---- specs/policy/ci-team-structure.rst | 246 ------ specs/policy/expedited-approvals.rst | 122 --- specs/policy/first-principles.rst | 257 ------ specs/policy/patch-abandonment.rst | 109 --- specs/policy/spec-review.rst | 163 ---- specs/policy/squads.rst | 141 --- specs/policy/tech-debt-tracking.rst | 113 --- specs/queens/fast-forward-upgrades.rst | 351 -------- specs/queens/instance-ha.rst | 145 --- specs/queens/ipsec.rst | 189 ---- specs/queens/network-configuration.rst | 115 --- specs/queens/tripleo-messaging.rst | 316 ------- specs/queens/tripleo-ptp.rst | 141 --- .../tripleo-routed-networks-deployment.rst | 733 --------------- .../tripleo_ansible_upgrades_workflow.rst | 190 ---- specs/queens/triplo-ovs-hw-offload.rst | 141 --- specs/rocky/custom-validations.rst | 160 ---- specs/rocky/logging-stdout.rst | 172 ---- specs/rocky/split-controlplane.rst | 248 ------ specs/rocky/tripleo-barometer-integration.rst | 112 --- specs/rocky/tripleo-ha-utils.rst | 143 --- .../rocky/tripleo-rsyslog-remote-logging.rst | 276 ------ specs/rocky/tripleo-upgrade.rst | 100 --- specs/rocky/tripleo-vitrage-integration.rst | 119 --- specs/rocky/ui-automation-testing.rst | 123 --- specs/stein/all-in-one-upgrades-jobs.rst | 233 ----- specs/stein/inflight-validations.rst | 142 --- specs/stein/nova-less-deploy.rst | 638 -------------- specs/stein/ostempest-tripleo.rst | 154 ---- specs/stein/podman.rst | 322 ------- specs/stein/safe-side-containers.rst | 162 ---- .../tripleo-routed-networks-templates.rst | 522 ----------- .../stein/upgrades-with-operating-system.rst | 747 ---------------- specs/stein/validation-framework.rst | 279 ------ specs/stein/zero-footprint-installer.rst | 127 --- specs/template.rst | 226 ----- specs/train/certificate-management.rst | 197 ----- specs/train/undercloud-minion.rst | 167 ---- specs/ussuri/mistral-to-ansible.rst | 205 ----- .../ussuri/scaling-with-ansible-inventory.rst | 251 ------ specs/ussuri/tripleo-operator-ansible.rst | 331 ------- .../victoria/simple-container-generation.rst | 427 --------- .../tripleo-powerflex-integration.rst | 262 ------ specs/wallaby/ephemeral-heat-overcloud.rst | 248 ------ specs/wallaby/excise-swift.rst | 188 ---- .../mixed-operating-system-versions.rst | 267 ------ specs/wallaby/tripleo-ceph-client.rst | 210 ----- specs/wallaby/tripleo-ceph-ganesha.rst | 158 ---- specs/wallaby/tripleo-ceph.rst | 832 ------------------ specs/wallaby/triplo-bgp-frrouter.rst | 245 ------ .../triplo-network-data-v2-node-ports.rst | 675 -------------- specs/wallaby/triplo-network-data-v2.rst | 348 -------- specs/xena/ansible-logging-tripleoclient.rst | 304 ------- specs/xena/healthcheck-cleanup.rst | 217 ----- specs/xena/keystoneless-undercloud.rst | 196 ----- specs/xena/tripleo-independent-release.rst | 191 ---- specs/xena/tripleo-repos-single-source.rst | 339 ------- specs/xena/whole-disk-default.rst | 307 ------- specs/yoga/taskcore-directord.rst | 514 ----------- specs/yoga/tripleo_ceph_ingress.rst | 259 ------ specs/yoga/tripleo_ceph_manila.rst | 231 ----- specs/zed/decouple-tripleo-tasks.rst | 253 ------ specs/zed/placeholder.rst | 9 - tests/__init__.py | 0 tests/test_titles.py | 108 --- tools/abandon_old_reviews.sh | 169 ---- tools/unassign_bug.py | 56 -- tox.ini | 18 - 153 files changed, 8 insertions(+), 32439 deletions(-) delete mode 100644 .coveragerc delete mode 100644 .gitignore delete mode 100644 .mailmap delete mode 100644 .stestr.conf delete mode 100644 .zuul.yaml delete mode 100644 CONTRIBUTING.rst delete mode 100644 HACKING.rst delete mode 100644 LICENSE delete mode 100644 MANIFEST.in delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/index.rst delete mode 120000 doc/source/specs delete mode 100644 images/split-controlplane/ceph-details.png delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 specs/juno/backwards-compat-policy.rst delete mode 100644 specs/juno/haproxy_configuration.rst delete mode 100644 specs/juno/network_configuration.rst delete mode 100644 specs/juno/oac-header.rst delete mode 100644 specs/juno/promote-heat-env.rst delete mode 100644 specs/juno/ssl_pki.rst delete mode 100644 specs/juno/tripleo-juno-ci-improvements.rst delete mode 100644 specs/juno/tripleo-juno-configurable-mnt-state.rst delete mode 100644 specs/juno/tripleo-juno-deploy-cloud-hypervisor-type.rst delete mode 100644 specs/juno/tripleo-juno-dracut-ramdisks.rst delete mode 100644 specs/juno/tripleo-juno-occ-localdatasource.rst delete mode 100644 specs/juno/tripleo-juno-tuskar-rest-api.rst delete mode 100644 specs/juno/tripleo-juno-tuskar-template-storage.rst delete mode 100644 specs/juno/tripleo-on-openstack.rst delete mode 100644 specs/juno/unit-testing.rst delete mode 100644 specs/juno/virtual-public-ips.rst delete mode 100644 specs/kilo/cinder_ha.rst delete mode 100644 specs/kilo/remove-mergepy.rst delete mode 100644 specs/kilo/tripleo-enable-dvr.rst delete mode 100644 specs/kilo/tripleo-review-standards.rst delete mode 100644 specs/liberty/release-branch.rst delete mode 100644 specs/mitaka/external-load-balancer.rst delete mode 100644 specs/mitaka/puppet-modules-deployment-via-swift.rst delete mode 100644 specs/mitaka/refactor-puppet-manifests.rst delete mode 100644 specs/mitaka/tripleo-mistral-deployment-library.rst delete mode 100644 specs/mitaka/tripleo-overcloud-deployment-library.rst delete mode 100644 specs/mitaka/tripleo-quickstart.rst delete mode 100644 specs/mitaka/tripleo-ui.rst delete mode 100644 specs/newton/metal-to-tenant.rst delete mode 100644 specs/newton/os-net-config-teaming.rst delete mode 100644 specs/newton/pacemaker-next-generation-architecture.rst delete mode 100644 specs/newton/tripleo-lldp-validation.rst delete mode 100644 specs/newton/tripleo-opstools-availability-monitoring.rst delete mode 100644 specs/newton/tripleo-opstools-centralized-logging.rst delete mode 100644 specs/newton/tripleo-ovs-dpdk.rst delete mode 100644 specs/newton/tripleo-sriov.rst delete mode 100644 specs/newton/undercloud-upgrade.rst delete mode 100644 specs/newton/validations.rst delete mode 100644 specs/newton/workflow-simplification.rst delete mode 100644 specs/ocata/capture-environment-status-and-logs.rst delete mode 100644 specs/ocata/composable-ha-architecture.rst delete mode 100644 specs/ocata/containerize-tripleo-overcloud.rst delete mode 100644 specs/ocata/gui-deployment-configuration.rst delete mode 100644 specs/ocata/gui-plan-import-export.rst delete mode 100644 specs/ocata/om-dual-backends.rst delete mode 100644 specs/ocata/ssl-certmonger.rst delete mode 100644 specs/ocata/step-by-step-validation.rst delete mode 100644 specs/ocata/third-party-gating-with-tripleo-quickstart.rst delete mode 100644 specs/ocata/tripleo-composable-upgrades.rst delete mode 100644 specs/ocata/tripleo-opstools-performance-monitoring.rst delete mode 100644 specs/ocata/tripleo-repos.rst delete mode 100644 specs/ocata/undercloud-heat.rst delete mode 100644 specs/ocata/undercloud-ntp-server.rst delete mode 100644 specs/ocata/validations-in-workflows.rst delete mode 100644 specs/pike/aide-database.rst delete mode 100644 specs/pike/container-healthchecks.rst delete mode 100644 specs/pike/containerized-services-logs.rst delete mode 100644 specs/pike/deployment-plan-management.rst delete mode 100644 specs/pike/environment-generator.rst delete mode 100644 specs/pike/gui-logging.rst delete mode 100644 specs/pike/send-mail-tool.rst delete mode 100644 specs/pike/tripleo-ceph-ansible-integration.rst delete mode 100644 specs/pike/tripleo-derive-parameters.rst delete mode 100644 specs/pike/tripleo-realtime.rst delete mode 100644 specs/pike/tripleo-routed-networks-ironic-inspector.rst delete mode 100644 specs/policy-template.rst delete mode 100644 specs/policy/adding-ci-jobs.rst delete mode 100644 specs/policy/bug-tagging.rst delete mode 100644 specs/policy/ci-team-structure.rst delete mode 100644 specs/policy/expedited-approvals.rst delete mode 100644 specs/policy/first-principles.rst delete mode 100644 specs/policy/patch-abandonment.rst delete mode 100644 specs/policy/spec-review.rst delete mode 100644 specs/policy/squads.rst delete mode 100644 specs/policy/tech-debt-tracking.rst delete mode 100644 specs/queens/fast-forward-upgrades.rst delete mode 100644 specs/queens/instance-ha.rst delete mode 100644 specs/queens/ipsec.rst delete mode 100644 specs/queens/network-configuration.rst delete mode 100644 specs/queens/tripleo-messaging.rst delete mode 100644 specs/queens/tripleo-ptp.rst delete mode 100644 specs/queens/tripleo-routed-networks-deployment.rst delete mode 100644 specs/queens/tripleo_ansible_upgrades_workflow.rst delete mode 100644 specs/queens/triplo-ovs-hw-offload.rst delete mode 100644 specs/rocky/custom-validations.rst delete mode 100644 specs/rocky/logging-stdout.rst delete mode 100644 specs/rocky/split-controlplane.rst delete mode 100644 specs/rocky/tripleo-barometer-integration.rst delete mode 100644 specs/rocky/tripleo-ha-utils.rst delete mode 100644 specs/rocky/tripleo-rsyslog-remote-logging.rst delete mode 100644 specs/rocky/tripleo-upgrade.rst delete mode 100644 specs/rocky/tripleo-vitrage-integration.rst delete mode 100644 specs/rocky/ui-automation-testing.rst delete mode 100644 specs/stein/all-in-one-upgrades-jobs.rst delete mode 100644 specs/stein/inflight-validations.rst delete mode 100644 specs/stein/nova-less-deploy.rst delete mode 100644 specs/stein/ostempest-tripleo.rst delete mode 100644 specs/stein/podman.rst delete mode 100644 specs/stein/safe-side-containers.rst delete mode 100644 specs/stein/tripleo-routed-networks-templates.rst delete mode 100644 specs/stein/upgrades-with-operating-system.rst delete mode 100644 specs/stein/validation-framework.rst delete mode 100644 specs/stein/zero-footprint-installer.rst delete mode 100644 specs/template.rst delete mode 100644 specs/train/certificate-management.rst delete mode 100644 specs/train/undercloud-minion.rst delete mode 100644 specs/ussuri/mistral-to-ansible.rst delete mode 100644 specs/ussuri/scaling-with-ansible-inventory.rst delete mode 100644 specs/ussuri/tripleo-operator-ansible.rst delete mode 100644 specs/victoria/simple-container-generation.rst delete mode 100644 specs/victoria/tripleo-powerflex-integration.rst delete mode 100644 specs/wallaby/ephemeral-heat-overcloud.rst delete mode 100644 specs/wallaby/excise-swift.rst delete mode 100644 specs/wallaby/mixed-operating-system-versions.rst delete mode 100644 specs/wallaby/tripleo-ceph-client.rst delete mode 100644 specs/wallaby/tripleo-ceph-ganesha.rst delete mode 100644 specs/wallaby/tripleo-ceph.rst delete mode 100644 specs/wallaby/triplo-bgp-frrouter.rst delete mode 100644 specs/wallaby/triplo-network-data-v2-node-ports.rst delete mode 100644 specs/wallaby/triplo-network-data-v2.rst delete mode 100644 specs/xena/ansible-logging-tripleoclient.rst delete mode 100644 specs/xena/healthcheck-cleanup.rst delete mode 100644 specs/xena/keystoneless-undercloud.rst delete mode 100644 specs/xena/tripleo-independent-release.rst delete mode 100644 specs/xena/tripleo-repos-single-source.rst delete mode 100644 specs/xena/whole-disk-default.rst delete mode 100644 specs/yoga/taskcore-directord.rst delete mode 100644 specs/yoga/tripleo_ceph_ingress.rst delete mode 100644 specs/yoga/tripleo_ceph_manila.rst delete mode 100644 specs/zed/decouple-tripleo-tasks.rst delete mode 100644 specs/zed/placeholder.rst delete mode 100644 tests/__init__.py delete mode 100644 tests/test_titles.py delete mode 100644 tools/abandon_old_reviews.sh delete mode 100644 tools/unassign_bug.py delete mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 2fe19533..00000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[run] -branch = True -source = tripleo-specs -omit = tripleo-specs/tests/*,tripleo-specs/openstack/* - -[report] -ignore_errors = True diff --git a/.gitignore b/.gitignore deleted file mode 100644 index c19906a3..00000000 --- a/.gitignore +++ /dev/null @@ -1,51 +0,0 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml -.stestr/ - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Complexity -output/*.html -output/*/index.html - -# Sphinx -doc/build - -# pbr generates these -AUTHORS -ChangeLog - -# Editors -*~ -.*.swp diff --git a/.mailmap b/.mailmap deleted file mode 100644 index cc92f17b..00000000 --- a/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -# Format is: -# -# \ No newline at end of file diff --git a/.stestr.conf b/.stestr.conf deleted file mode 100644 index d959b47f..00000000 --- a/.stestr.conf +++ /dev/null @@ -1,3 +0,0 @@ -[DEFAULT] -test_path=./tests -top_dir=. diff --git a/.zuul.yaml b/.zuul.yaml deleted file mode 100644 index c11af82c..00000000 --- a/.zuul.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- project: - templates: - - openstack-specs-jobs - check: - jobs: - - openstack-tox-py36 - gate: - jobs: - - openstack-tox-py36 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index abc0fbc7..00000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/tripleo diff --git a/HACKING.rst b/HACKING.rst deleted file mode 100644 index c1787376..00000000 --- a/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -tripleo-specs Style Commandments -=============================================== - -Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db8588..00000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 90f8a7ae..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc \ No newline at end of file diff --git a/README.rst b/README.rst index 1d3a41d3..4ee2c5f1 100644 --- a/README.rst +++ b/README.rst @@ -1,24 +1,10 @@ -======================== -Team and repository tags -======================== +This project is no longer maintained. -.. image:: http://governance.openstack.org/badges/tripleo-specs.svg - :target: http://governance.openstack.org/reference/tags/index.html +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". -.. Change things from this point on - -=============================== -tripleo-specs -=============================== - -TripleO specs repository - -* Free software: Apache license -* Documentation: https://specs.openstack.org/openstack/tripleo-specs -* Source: http://git.openstack.org/cgit/openstack/tripleo-specs -* Bugs: http://bugs.launchpad.net/tripleo - -Features --------- - -* TODO +For any further questions, please email +openstack-discuss@lists.openstack.org or join #openstack-dev on +OFTC. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index fac31731..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - #'sphinx.ext.intersphinx', - 'openstackdocstheme', - 'yasfb', -] - -# Feed configuration for yasfb -feed_base_url = 'https://specs.openstack.org/openstack/tripleo-specs' -feed_author = 'OpenStack TripleO Team' - -exclude_patterns = [ - '**/template.rst', - '**/policy-template.rst', -] - -# openstackdocstheme options -openstackdocs_repo_name = 'openstack/tripleo-specs' -openstackdocs_bug_project = 'tripleo' -openstackdocs_bug_tag = '' - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'tripleo-specs' -copyright = 'OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'native' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] - -html_theme = 'openstackdocs' - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - '%s Documentation' % project, - 'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 39eba1c3..00000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,159 +0,0 @@ -.. tripleo documentation master file - -============================== -Tripleo Project Specifications -============================== - -Zed Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/zed/* - -Yoga Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/yoga/* - -Xena Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/xena/* - -Wallaby Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/wallaby/* - -Victoria Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/victoria/* - -Ussuri Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/ussuri/* - -Train Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/train/* - -Stein Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/stein/* - -Rocky Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/rocky/* - -Queens Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/queens/* - -Pike Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/pike/* - -Ocata Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/ocata/* - -Newton Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/newton/* - -Mitaka Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/mitaka/* - -Liberty Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/liberty/* - -Kilo Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/kilo/* - -Juno Approved Specs: - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/juno/* - -======================== -TripleO Project Policies -======================== - -Team decisions and policies that are not limited to a specific release. - -.. toctree:: - :glob: - :maxdepth: 1 - - specs/policy/* - -================== -Indices and tables -================== - -* :ref:`search` diff --git a/doc/source/specs b/doc/source/specs deleted file mode 120000 index 87a40301..00000000 --- a/doc/source/specs +++ /dev/null @@ -1 +0,0 @@ -../../specs \ No newline at end of file diff --git a/images/split-controlplane/ceph-details.png b/images/split-controlplane/ceph-details.png deleted file mode 100644 index 0f1dc2a8cb5c6f4a8b260c0220c13a2823bdad07..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 595482 zcmeFZcUV(d7dPrKSWrO$0TltI3R0za90N!fkuC)39g$umVizkm9IYO;= z3vlPik&~~E9626$;u!hPn`v$b@_)a1+)@4GNcq5}74i?N2e*tpjvQeL-T(Vr%;yao za)U94yGEWyTADIeZZ3lNt=%kb1btl|kXs))BIheZzI3tiynor(#o5(E##f&GYYQ3j z_5N)k_RC+Jcsj|m8)@CXeADfr&1G>xF+pK=g_D;rUzU4lZ7XvJpz?Qj@-KOIdr!{? zGD1Q=K0bmzqJnM@?Sw?6rKN?0uM1tjE$D z;O2UHf86_)ZeE`9?Ckq5`tj!*ot_T12XAuq_@&Nbc-;f1f&q>wowD?=Aj0 zmd-;58}h8b_7?fO_s{qKZZ9XaKk=U!@r}!0?~>W9a8gd_hsYF8zFN3Gd*q1H5jDV# zyS~4z%^rVbt)Ft0I6WI5dS&3U^xN#qeBoEi-(<6w|0dj2jsuwaU?FMnM#`!tol$*v zCNBnt*N?KnZ^FZ$T;Y2G=+>Q=`Xt=YM1mKs%gz)PnR=Bp6)&KgvB(x|ZooRExUp+J zX2dxE>}91#M}DKAJn_Ga|1sfzNcewG3xwT|`g5-jmqz}oU2RVolj%z|6oIA+FAW{s zDT`0>N0v9K$g#Q`w&&0;m~Dq2ZI-ZH0%x!r0A}djecB=Hg5dL!xBs<{wk4cfjw+B5 zo(HkBf^VsL@UlkMv(M|%^1jx-B5X`)+x`cfr9#Tm0YJYiA6-PvRxaQ>KVBh9 zufu@%T{^_Gmjr}*2i0+x#>$KFbj#^Vi7eWBmdi$o znf1ErN8B*Gd`?iFPFIoej{J5sl4Bb@>3I%)Uj1LAT7^qkR0YVO`&@XURDr8y;NsXU zHTQe75I(9>CT|}qL|-uAo;Y4p-$9mVaoNmne4I%rOppmMI!N*zCFnfKF#G++m49hX zLn`Q{_+0g~M#l#*?pAeoec0dEDkV~m>(t02-W_WedsY@^zl7Ew$HJ%Xy$KRn$45vk zjb!`^rT=m#(#T@se1lvzzgoFEK`9b64mPy79I}3L?|Qx)MnR#zNu#j8^)eAfU>F~E zEc3(1C6}zOccJ-au%pX4hv~rk`yt+>ggc>bZZe0MNqU3J$V?f#gc3y92HVBYm!3&7 zCex0|s_tB{v&!PD;r&>pziU=P!(vae=-a5K>5JE?@JnwNfiAag%+vIJBuxDV&^zp{ zhcMHk_|zK1nPk+W)RYyducrU*apt#6vRBtJXmA(DuoL=LdkQTi>h=`JZ%0r2k75UGL1+rpWsp%?R;NP8Yh}Lr)@fWT6si_zHU+ zwQ>&LIS7ZY=(GPKxPNkC1H(F2x{c(EcL~NjaWy-rp@k{2UOKmhGnQD=(j|zPXd73N`rLGW60n28Zr7 z(YignPB5X!N&S@&CBgS;SyFU|1O~V1ef9R>Q6@%#qEk$&{9(R2?Mi^E%|82?a^vGJ z0!N2kpNpDqrLrL1T!vK9dKA3I9&)|C^NXei_u6E0;8}n@IJkObor%LFaTKcWXBI)j zA-);x@>@yDuT(suG(a6D8pO$5K!d^O_CG=q3wvEp5tSo`3F3ti&IhJ>=Ek-6JAxZ) zw42v!hh8$ceW_VaQt;@o9?sG~-xTmv;gsnXtXVn0xC-l(66t>kjt`B3jC3v|zck3P zbM+*41PvLG*q#-b8TI0pFa-cMn@5c3i&b@4HCx-fkt?KOR zNvk?HVFP`ykZNXI7$R;}k3IX2*<~&l3G|vR1;SilJp+QMp=Px!2)7!!lqPW=Uh3Fu zAL!Ly!k#%^YV}kCHF5G*<3B-9c|sEyf>ZR82;!2H{uKiu`~kX0pJ*^|smZFDPrPVt zaXbH&thl--v#LN|c{dQFj8QnW^&g$S45L zKM=pz6YVn*#cLk?^34u&`0rCvI%-~Glhi7)IwR~E zejwaRJwM2-9jLN)cBp8W@5&cu_na|W%wQZ4PV2(^8e$9+ym$uetJQvkRu&+J*D&Dy zD1w|!f#y=E~(BiRseA~HcIe2-g8l`{)KQgG{u=zO$ zffc`Ab^kKN#Pl-mtquLTRTBRIBx*V47FE}TYA%bXB5g%L`>kH1W+1x83+)65KBM|? z;l2E4o2=!43e|&-Mzs3+G!q7Ea6W5My~(atZ#!&2Qu*A(&|09@7Qr4c_7LF7pC-h$ zmB-o2XH&B5d9VF3Ee1_6O{C)6a!5RD1cIwfgeJ}KAeGYS5}2cm88L45!sR@xt!CHr zgEJE%rJ=7cbre5!-w>jeR2hwN^Uk0Xj0ZJwadkwdk3ua|>ulX?KsU z5MYkcb*fpmHWBuLm$PAVD?##c!~Iw%cllZ3Mcw8kkAFSKS8axtt+r~;MaZ4FfY#DC zlY#5rane~VXocaQ00IU&bFH-eM804TvTv|A^RZY zXkNxcgocS}iS+P5x$Fe>-kqoJ9e&sn<}9Fsyml*7#r5n-Xa*YIIcY1~p@R1REUP=R z#yYEMp%pgMzn)Q-_o=V);a#y$go`YCer2;gF7isE!Hn zJRzO_-qqSW7f@Ls55-!0ZGesxo=W?#{FF)wbVSzdQjOnVVC@Z z)IIsHk4L2bL3vw>3;o}64<@AA!5()xilNHjI$m)39(6!3h2W{1db<1kusN=poR z76(Z4`0O#xM!LS}P>_Bu1F>|~+($G-+&GtVWS(#_gX9u_Tpd*)B$=}Cj@t^I&zFU- zF2^qZ&UdMa5Zs?6Yyc$NUFpLd>!J2O^ynS-nN+X}m1jqzq*w#0h@rEffmqXh$xqKj zCow}e!pX@qlTOq$Q8edfPDMwf&~wc=I_~3qk%LSAH@f9)#*VAJ<#OrEf)~Ix9vzdM z=3F(Xt{)s5%H;V_JGEQoOX{+m0jaK?*X4Jp8!c`EF)lfI2Rm#WV;InhROnws*itx| z%()1xXN`MXwU>fzZAkWe%I(A8Eg8Y+TUk|mnDCL|%IhYn7e2WRUd{Me^sK@knlGDg z$6NxT)scTG`mYgL*!E1sG_!DgEtz=F$5^c?BUmmQRvs6lg0J1zZ#ctJ|F~`qGV)Ej zO+i`Q`H>cLn8g=#j;j}7tR^>@1bHoo(La?lFF*?ov$tEvwVn|>57HP-M5^Q89wS6} zEOz&@*$)Y^p%<2Y?AvF+Dxc$ZQ?STU5iWWmN6)IbM*8elSW8nz8;-z;9h}&H9fy%D z&V&_Ks+p<#_oMW@_w?g87BImqy%1V0Rv`Va9_jEqkt!DW^Y@-N8tg%}5^12hq3LRrz_whP6zc~9QO zJ~yB$RSXN$`_s9krn%}3g)2l@NBs$*q_Kg>S&niL*#sAO>$o5Ua)J`5D*hT;4bI4AfFTD>F=tlWo#BXQr=H|AS~m4q)0Ot~IGF(DcgyrdPp| zs)04-K=u5+`(|c@&!Vf>QtuUD2YizT4ezz3FJp7SIqbHEm-Oi$A_`Cqv|wF{{KBWuVT#?CGEqMMUPbsBQ9T2fYpX#Ab?jOO$$*M}4fJ;xFvodz$Ltz!z$8u|Wr|sS%p`{vMkCi-;-i zH#DUVb+Cj;9AI`T)olG-RDlYA^d!`{>!eG+f}4Xc=&^JsnjkX-RT2AeNBP%U!ix%} z61D+x*1YGx<}huebjeq#O8bdoJJdIVgQxvK4mtjVcF+Ly98LH7m2$*oD~~0f%nP=MV;?3ZZ2}u6O^l4gyuK6gEsRGWZx_jR%2$>#Y&+%iHL-q zl+4{qzKpc3hHF#77dOygob@nuwji=Gzzm@N(52w6mIZPuP5+{jL*0*>(1iv_g*ScCFPLJ^A!QlBZ-G1*ZFPRA&7w3&T<; zCx~ui9xTspf|Q%$mv(WFgaECk!bFe;(xTbA4bpr%E~CPKHi9BVYzDK|-Ms-*@>{E9 z$7xVP!-cfL3a!kIt%AZ~z1~fikWnKD+yFR@UE#b49i)=Q&#KW&oeT`&vUquC+TD7|($#LMiA;je-i(fiPSGR92& zipxkc;u>uXVCJlQFknh+^Ceh=-JD^l&>2?vuwF2#Oo-PH^8wfM1{3Q(7e?8Ew9NE5 zw^*h~uz$(U)nCh3W$Ja+0cDVVzZ0WmN*j=3Bwu>Npq{q*Cn%Is*JO|F9lSjs_F&Sgy>mM-^D&>3CR zTe=5GK9W^1m@c1OktoDl_lq@z+e4@(kH<;n45@f3j2p*o7g5C(Inq|lW z6OSw3qunSvW`!=+caQV-DXXM+RIttMVz&GM^Lkt&HBMa=2#b2s)*~4c-KEO!WT*7B z6%wYvq?$XcoR+xN2o;+jqn+NL!1|kIy*y8u3w1s6s?$7Nv&5_Oezc%MVVJ~Ff$<+c zb#sQZ$v%}qQH5VNm;d;MM&XR3jT<+sSJqZwzlIvz1#M;w&ok&gnj{ueMX%^|F!eUq zC!3ZQ%95b&v;2MnC}O5m!)565=0>jwh(MF6p9SL7NOM%|4K%)-&N#$(0i&2R2vzyi zoezmb8+>*slQ~^C0S#sd^{O`YN1+&F#~)g?hlBY(e_T%R<{+9s9S8+v4JK|O#_`1? zXzfl2E!U^wU+Fq=nyMQ2@hZw7znU71Q!4Xh#uUwikA4Go)1 zOfbI2b z?Z@ud%V*VDVxzsLg>7+7U?+}D@GaRs^9vi+?b5UX%6EAcqqX(D39}X5)*0Gj{cd9` zScEnHVyx_8<<_eV_S?S?58t?Whtu^_JjuwR_!HqV_^Fn^P*|;Egw)IO zK?V0&=X-N4t2Ctxv2D0kqfhd*T`Fs|x`Cv;EW?5&qFK8sxo(@Vl|ctIO)Sf~3A3v+ zRpdZ*;=P(%RjN0~w10t&k_CWb7WZ)m%xHD?*YG36o$eOZV+XrYa*i_PGLrlDPx}Dl z`0Y{8;C3Gk+>GHJcquMs$oBytXZChl)U{d$`V$eD{sE}@@FegKJZ#mKlhWJL3_0L_ z`}Aa8_lpF|y<%_#^Om31<3<&Ihpst3MLdhMSbc;S?{C;^w9M7@s=#VCHeDN@afC@< zVrqQHHW#5-D)F?7$!ruBv9Gtd7Z(qKP~_bgri$szEB=ujzuF^VRtnx&AtY|H*m)xN zrn+GIY_7aG!7zT?J8opyUyp8Ogk!+vx}9Vy@%^XF{rZPml-|odvxv~0+tsAn8(CXC zD`(tp!wdN^T56?bJw_<9`NB>H_M_)NMeHiaTjgXtV)lX;>Dr8XA?!ZQ`a-pzlc7F( z7}QIzy)R=MC<|aZ#Ai`*w<_hZ2q6p!Gq~_KUM21AO*trsr$kJFe*6eySKIIGbMzax z0YQFNchebSyFeEsIGts7mov)-x8S3hs=A1lw*0-k~pPw1d%7 zm*@A$sl86lCZ1L(!qt)QELu9u!X3inyE5oTxtC#w1P+4Psp)_p0>yoL{krNZ( zp;DO9AqFLn>}E9_#D0mWPiTsUB(zQOy%a*8kj@F|g-R4&Gmxia0`eJ%NkMCGn3*TW zu_`jU>znI74D=agqURhLBoXE3-^US)8YiJ@btWTj^Q5`a-1sa>iPvHI8L5&JbkWRS ze%g0i3w3YCH`<9g^rqdh+%ihqVpXRaKV#Uu5s(8n(85VLU86BIX}+57b1kIiK}2*$ zG@1Hn`A1=g7?g8I_mmn6Kc@fUxGZE&$(=nzDq%blkxh8fsNgU$VmL821`NK!$65j@ zZ6f+bC_su4K#;T$A5A&rQmaCKu7ZADmEI?xw7&S%DYa)VB4hsjv*`gXX3jaDHpC{u}QglC~L;j$U4IzVamfK$L>2jOHLtcw!LCBDN=Fy02vp=(m#n#SIbAb=@(6kh8l298 zN=56_c_k@BdzmA3^si~GqF1TW@e(n!uUH@uS{H@J^+OXr&eqY|UcmA(9ijgb> zH%4I0_L6cg3vC)k!-YhrThlBj^SnQaZr*&@BD=a|Nhd8*18p{AanIZ|+cPIUpf6yQ0#gc;mc*^pnA)|A^q(@LqIriZlRq?R-4k*U=H|Md zKWn1*hkK6SW}}dDK-EKl*kTYfOK+S)T0hKnI`aINCPu_9rzUpG$KYl;4sF*6`|5RE zG?9;#E<~c`TYJ zw}+(AY$P>p8eok!mhLQDw0%vdSbk|PXP3c(9=@rE`|2n(Z~mU$xz4-tWPP4d)z2us z0gb6mCdJz8Pz~hpl2;_X0HP?3Z26X2-frx6Tq`^rcb6!W4ZFv?vb$SkrM7E7D#@HR z7h$EFNiS(kKjbV>gbV@eu?Mrn#`&6J2B*(NwAyeXt<$hho|<7*9K5ss(CDrSaI73u zYl%(ClvE;O**sv1i;HjbiI=_p)*=evnm(FTkmC3=fnR40$Wu}4 zeo@xx2E1N7GS+q1NTZ}i!VxOCh;*o&jH#`6Q>V~wx>RS{xEsuZfY2fpnu!0FeBnPr zB)jTWx*s4%gZ!RgwLupZxO{6pAeH2GX)}}3h2?N;aa^0fZ%2c+Q$@Z?|FnKOzAEaN zVIki8>F!8$V~&7BkPXBz5$D6i_!pfho3jX9|En*Ga~QF^&#$h2i4+82cw8KGXLCiv zl%ELUo2i1)blQygkP9*6K#rbFUHM-<$l-T9Z@lPPPp}E|beQP!%P|@;wQ`2d=p&Lk zE(w*sdUob+vmtrc#jIs{lWCuiRyFGEkSpx!vAcfTTLqu%Tzoc{K`Kvo4OO!)a3nWL zebV8ZDOi&xRY?tf**#;@=X39|NRcBNobbWe8`4aCvMg#~bAM(z4!k#)BOjdq<_+-P z$6#QQuuf2tE4lo*=>#BDHE{nLL>zi2M3B7D-GNfAP6PP3MZzk6N8t8Oke5?2AJ5}^ z!kwh5=Yvd;)+um7q@15CK>bDkM0|9eN}w=V6Jwv_q$AKwgE5z0Q{RcRbdOe9g0jAD zj(V1E_R$=JRlL%f%VLP=1p<_V;Cr|V`{|d#ZC@}~-23q2vUB&Yx0;UeZr}?TCdQp- zd4CJCeX8tg^3j~P$eS}S4K-1nS%JGK5@#p*Z?`Z|s$6-`WCdjy;@2%F*P#uwq{H!J zX9(8JSZ%~Dst91@EwxW8vAlWk?=wpH;6RbjWBE3&Geb7(unC1rI`k5#%$9M-p32{r)2PJ(Zv8@a-yFE$$E z;7Mqzq-r$E%H&06Hp*oWrJ9W84LYahI5u3Bkx28%r7E2bgf0$BFp}410{->dOxyWW zxt}_-RK+wNYIHlAdfqo%nPU=bz|s$G$Ahsd&gVtq9tNLw+;(1J<-Ow7+^b#jD3m?5 z%uB0UG3d!Dt!FyV`$!?Q1I7|-QA4kaKbh~MXX%mpakhGui{`YbFe$ltF4{74NHzUB z8BFFjU1CeViBH`Z&M42Y>DIPd63m8#kXL71{k%>avFN6k_!2==WWIsw;E>(Q&Ux~h zr_iA_GWx4{$hNHa@AQY&X8xcU46{?~wv<)L{Zy^R?i#*h4p6s1+QP)SLFRy|F)^-$ ze87M#$DN0W9`I0tvB-`dE$QsgvfVS1eTiBun9GH7o zVeVBEfLv@aZVf3Ms2SEoHr3C-Y$~~SY7p2hb#+TZAi$rXcL97WYh|lJO4c$rZyGf* z-`Kb-p6$!Fbch|3VSm3II>eGP$$7}b6Phw1iZKi%v-Sv%Mde%oj0BeAX6M?{N~rYW z|CC1#CvSBpfxtWZ0CmfIj3Tfe+lmhfeW`H|QQ%utrB?d2k<59s7J3go)u*(cC#j^f z#m40nn8Ku3ebHc9_Ff9QglgZEfY4Zv!`&J=nR~xBD4v zDl3fT7U_bkuCWeHhwO&ym4-#YRW31AWKRmeK?hmR$Z|-qecZ8q%No%+!aiU>USydz z0XorxK%r4+;6{WB^K2l&?e`K(Lm(-Lge5@dL-Q;p;XkgUTw6(XA^H_1C{im4PXunf zy;G6)<9b+y+;qCFcybI2+V2C|xnDHd|Kr*rhul=>KBj9$oE8}AJ745z_mh9U%dq3@ zd1>vT+(on-(CY>l;m$)OhIZB*A+*4nLx|ztj*uQc188g+hlU44*7_1Af-;(k2!GFc z`871$Z)B@+XU}JDYzd0aS0H6bVs=r{MT4Lvvmm0Z+bSB)qL)P)Xu^@3j4nZC_nY{T zn>3S~_-K-wpvX=9$W7Lh_nZ7Z%B%=W#30FQqbaQ*YMZI9^B@~dcBKf~Xwy;2C5en8 zcfMS82g)9E>Z2Z}f#mS=bbJUbcV!&`^-HGCW8C&+{0PiVk&7++RPGazmXEFfiLgW7 zx=&aMxrtT~vB_$mFvIG{b9DWbKSBD?V@d5!!rpz_dZzsMY*bS>U(hU6uaQL3+^*94KNi};s zF!IN(Vb{NP0lr?4??vwPwf8s7AJ#i`v=;m}LgNSoj7w6HyEj~Zu&25$;p`-Lre!bh z1}E#Ezm(SgQOMGy38~h7FUdYU&=56WSqQ)S$H8tC;m?#3kvur^iH$QKQSE;4gaGw3 zfPeJ=F8d$Wj5}Y+jUUkt-otKVx`3*txY^c&bsdYI6Z~MF2B~Z!8x)msU1#~mkADB< zqOG0j7PD3!b-^kKlm#q+BMaag*AB49-O{$_`{yb4P0$)+OqUj}0Da`(;o(DCJfzCy z9FOM+heCxi$)gp(F+UGj*4AvaI}e^$^o&??%c|C~{rG?1H9uxcX zm|_$jmU(u{=ScU-V>(plflmB1g~O-)sBMQT7d~46|HuR?0LqbUs`D-#dHMq$ZBLkz zS+4E*0XEpGJJ6)4T4Q=5FaKXZp8uB%%`+ievU^)(B2n8BOLi)z@U0hL5`Ii2UjN0I zqNARrlllYw|3cQmz#(BA?;Wd(p_x(|HFO6ILY%C|RZ-X}`+y8A%tQ}cXZVkiEwWGK z!dJ*ZVe$d?59%aeji{zM7)&|jg8Rk-c#o|d)8&!v;_CkB|GtOS;tiQ``?#AR&5_u+ zXVeA2--))EBB$hIdhDyDjZ~mebgkcrIo0=P|5Z_vFHxYaQaa=w=`F|*^bLNN0qt@` zV)~_6o|17kp~zZl?mkC>C;!*+3i)2?d(JdE+7S*I5t*n&-fKQhi%~1t#gI3fSd=m@ zDEtu1zq!yez03s(zQ(R&P^L)ao{~tPqRc#G`TM_9XHiP&Kd%A@7wVYwLxdt~qq-bL z)^Gk-t{k%CRv2@%^# zJ`W31@~1tOr)%aY7(iggxQ*87n!%c*a1zC$ts98pcg4CaiosEHO!|msP&25w)o>Nx zA0{~%4sH$(PA|yvnqzVY%~rI!8dv{fC_<=s|02Dz`a^Ian~Wb}iV$kcJ{avF8*6~^ zB_Mr;_0CktfsjMGue`ead^Zwo#(pQ;k=2ws%k4r}%E7TcrG_@Ep#+|qIff-kCT2zeL+8>+@0 zV|M=1OwH*0N{7^z+L9SKAI6d--(dN6^U9csZQp9IgmJZQEo?-hN5Z83G6I2Mxj6Zu zfK^@|VNf2x@OE_?Au}2S#D@lkCYiv^L1rWX2I*DD_S%2k>;WNA9wFd6%TQyY6NL94 z8Jm5;iAAg+yN?~eqo9@0xLfI5A6zJ&IuSJY98rWYS0v&3y~Mq+B~8vC`L~0yY%)YS zuT!M~shB`SV;xBj0ly5Yo~j`3uA=JZ5W12;!Vbn9)fhgGh6FqwCrtRI*P9Y{8WA6b zl$}(m;g{L!)(PV*X`Nf!qguim&YTw8Cx@F>^KGnvO;v49zo1X%K4;L`1tO< z$AzQwYVSVl9M?8AA54=JGHng?lki!enG%5pJ9-A7^W2d$K+?LOfYGbVrWKaT-l!I= zEq2CZP)4^j;LGdd5kfv==)|FoaWH~?7}%ih?$^F>fY$H0e!)R;Q}Hq4-Cgcq?89lQ zOIxXrz0>R5x99RS*nD^8>x5vK_33r^gZ@=wEr>?MXCVdl=hLbxYQ}+XM*dW@2JzuWx|WGq$*TMS|-!4cG8ETj*3y+4HMuxYB92tFOK51#9wxo#^dGj5$Zi zn%{uNe-5j?>2fnDyQVg}*~NbS<&&8}mJa+FE>C)5PAG#K?&cEI$hwLiB`nC$1x0u$ zc*);WY#tY@w5vzGZ=9#$RjVIo4RXQ?M)Qv6;a=7Tu33YG%I-Bh%;20zb8D4uBH|EE z?h8?pw!O~6ALJeBQt3#nCxW!@Jl60X4@v_2wE)7^<)u^0qI1BSy8V(Au~WTna~}qrlD0}s#^DdcC%5>gtr31V zyNhH2agLlN`MF~Wg)$!Az}^j+@BBRBseKulJ3S#Pnnv4Q%B$wRyfcca3^lYlGqVBk zMhTp;s{;lBy%C96TMbljeGkQYj5aiU$EMwwcE#iXtmGXbrtMlIAm^I?Fa4P8Mj9_jX{>cGT)FJ2SUoT@|X;UBU$T zL{-MyP_wIeJ*_`K>N|;#lsrOEs_3-U#=Xns@Oy?!ar;j?)!N)*JE`Ve{&U5@4L}Rd z9h0tjZg~3i>E`m9j+%b8{o<;s- z<2m4|@3?BKKI?nk+o6RZdB(bqhnSvC;$`O>A0PDFXrdJTMcz-Lj_ z+GozsZiR1&KRFgzk9y;rL}0X)MK&2r_~aPp1&RN_=-FGc)pI@stJprAlDBDlkfhwW zGjwe0%uvER|A*}iTi=mDdBRgkiNfjlS*;snT3k<(y>EVb!#}u1Nk`_EUq!m1O0)aQ za2`Zv_fx#2)JkrAWv^gUsb^w@_uS=aX*vTRzmM6@CGrwI{`kGCFVAWG{+(A!dLcG3 z?dzo#*AOnyuvufPq;Z>=g!KV`J+kh7PI_LPM z(jIUBk8hmc_Ahzb-M8iX5i5>UY@kNh=HQB{1Y#1_W+JPK)6_h(&tFR^4=|2Wo>3?p zl9T)XVx=bXzMW}?kt{L=R{Z2mzmczJ{}EhAK2_qRbf&{f%gwt9wrmY&MX))Mrz zyIK7{#bR9H)6;24D2WK0h7-5fsNH;TdJo-He8&jSAo`aaWZW%#uQqS>Q$owH@3lM&41a#4TWYy3(Z$#%4lmkWQhfW-j^-WO8Psm*3 z`50l_eIVa`?7g^pR;{FJ0jEVvf-l45-PA6lsdH=#rp&F3 zt6X78de9hRcJ#X_$QJTkvUr`)^uh}Pr;4?Y`Y_BAe^`bws&}10knt;SYQLz+w$Tla zXCE#vg~t5zdl(^VoPQCW^!G{C)B3Qop@d=$E`)S#;EJ{%WrlvEn{0%X=)mn`W%FM` zz}OkTrHoV1oem+}E;dl$Hkr)4#TuK=z6&sfxsTB{6UtX(dtPx`h}>q+@tmAyKZjj#8Z+#7Fj zkEJsP(G3BeAN<%VDeQ7I_^b}Gv#~&tt9-=v$k%UfXdWR8p8kXKzPM%hzCRt=``PnSH(suN+-B_a2kL$^{QDk#^dVp~Usd61((`esopuVB;0$OGa3x zT@T6h6#1q1E>%m-t_bJXZ=~ai4e~$OaU_I?%_K$MeQu;OD`;uYxjT+4N{M(jk$l{@ zA3PQ%Q}|=oW3mKU<^}y4$Qx8=lS}vIkGN(xOL-!JD)7ohVr#j`7{Tgq7xIn7c6;^!k_K>wXKdm4Icy~v)Dlhe;nyz854h;=R)A4}$=I6MzrNQ++eVqlb{7M* zBIl=Vocq_^K&4(DB@6lANJq?K06)OiX3Zo8T8XJ`#=k~D1Kx5Zt%C-&Gy)h&g90!C zCIK^4h4-Z0$l9*koWis_yuUJ&l@t$>zkEPmyop6sgQ*Q_7A*=>sbVZcUs}on1q;Mt zcocWl#^F%&@>S}gT@G6EZs>A6&#Cd0j)QlfJNK8O@KNC17{H_bGeROYtX(4qHvXHZ z z6#LWq_Oqk5+x-LEu)Pm!FOcxW{`IuF%w7%i%{cy!vbwp{pq;mD1KJ9^pPVf$E~DxK z(gPLe-+|Al?uq?Wcl>5kxgR}38gaKpSWuDu%H1~5jWJ#A_#fGGWN6!m4;JtgA68P0 z({sV#E_p0`%x9?1wYOXfx$bPyug9!{*~VB8Dp(_&p+4&gF0Wu~8%Fs72Pa{dla+uv zKnb7DKb?tvQI*uTE*G0h1^Xm2s+4O#?`2e(NzBQnjmZ{lwv@NxR@Vx1qI(9U79v%w z1fzO#I0hU?3xAkw3w5e@Y{_?o3PP_aS@4~l4DsS?EzbPU4f)(!D$+{6mI_DLW?ZCrtWfauN7SY%7}7d-0T!8= z0*jn8u1Rj#mXbBa%9*LQmlv%h)9Kh@`oqxD6#~7gKabT6mGrCO@1N0sTE)91oG1vghaQ9m~OvY^=d?JW-G&psA(?K>XBoJ(WW0= zlhY|$DiHSJ)p&ci!AO3tu1XTxDOT0i(A>{vBWkm! zekZ8Ci(P@dSN9i8b>1cy9?n}fG5oNcUoY%AA54`2_eMLgnM5ISOi64725#flw&8O1 z+I22jEo$CvKvbMd%Tnim%qNh7_r5q+Xug?-UbbUq`_%M^sT3;0b7;=Qa`iU8(WR)I z2Z!Ct%^iNd12UHQ3u7n0Vl7-AaBNuX;=nR@wYV$Oul*>RBG&MNw2`(#+^CtQzV>vh?k509%*AQB!Hs<8EP?YXII zH(ZLmc{>?gr@W`%?#BKj^!U0a(4sJ0m7C%}!3FNNX~o|tNQYsof@ z(^-wIm){J7)I6DxVw>6(AIoF8uLs>b@A4GksbJ-cop@;te;ItU8@dwcEkGrub8G-& zw>?laHW;~Wsn*@<=GSXl`IbhOAODF*F@?pWPXx9)=0uu*Z9asZ2SWgtXg7?0L8+%P zIpRzT(fY>*-PNP)?lnTI#4GbwQ2Z=v5k&ZMs!A3D1-aB2d*CaRn%$Dt8X(gWh?R^$ zP_Y;CvDA-bD%pVI_s10or?u@ye~YntMIUt|a>`KU7*_5IL-G^Qh>GKsXX4tth1Xl%OlQyDm9n{pmL3tJ^7*4Wp-Tm9{r6w$9q*5leZ85I_&+{|WzTsQ3IHjOW_hkR zED6f&eR*3YyIybQQWwS8-)oMGX|io7qUP|~Lc90J|l{&r}Rq>*3EMotfE}bR2YQ~;*a=P-9K^QT(OlMdJ4ke8Z z=V`=hBjY`ff0*mD z=XFBtnge^+Vt#R+|6we=F(TXW{+KqzcPoP64yD; zy-!kz$T?rD8K;;=ycyF(!RT1S!MV-%ULm*4<%L}zj+5>9E4wK#OmRth@vJDa(SP3( z!U^j>RW5b2-bk6nt^UExnlF#mj3a2?Nu;*;Nq`og!Ae`DjoozI#ue*jn9r{PrzZ|V z$=@h4n3AOnzQ)5uKo0hjpcl$y-Ha^ zM|{r0POMe5bUt=DCuRVL4~^4d5->>!!cW5oXk?aO)F!>Mwa~9^CWSeFPPKuB6K!Ct zAq=n{;%8X#Irr8^kj?1l3cm6j*9ZEwwiaFh8I{>DB3$j`tD!N3BAxUCE7FHl6VvHa zVh7T0J4aWKiOYknutB{abSyZ9Lv>7PV|K8#z>Gg)R&~GIJUufD^vC0S3wJ@c2lq?* z-Q2hL*kG;SaE;QOo?5eDZ#`c6BeJl#^&uAj+)hzpwb;2^D`p}-P@az{W2qJN*y*2XTWVXR zab9HaoW0k@1p8NYNp_k%jy(Y|RuIwuvt?H%NpfxMDUh?YrxVC6A zwaT5P+t=+f&o**EOgE> z{{)h6QN$SKTAN9~nh<%-KIRdN5znBLP2S%FhVXJdcpDF$c7(;B$!yw5KjkgEIh!@` zTrq`MY5!bc(%Rg6r@%Y~S&3q-xVf_Vl10<0DTM}kOD6BE+`&0@@<$3DU`TWEg!wAT z)7uc%tHfYr-Nrj|G4%RVw%ME?;pbQ56;giU9O<~Xp{vCmGP5Hq$nlcMF(29Qj1S?L z{aicPvXg&2*ke1-&Ri0`rE@Qhy7WS6g)@Flt8yaPb8tU1O8yQr@=TH`C_LPz#qp!~ zR_RxPyPI=w7G_wNj;qY*7JB^in}XG6uM%SrjAp+aZ@^F#z`bXjT<`gBG%N+7ic=JK zm&Kjp4Y7frpTkU{uj)RH`3p}R2!gw)jcgEL$LGkw#9Gzh`s{GO zI+ec@J5i;fN_)Tq^5cOPL(MK#*!A5N=enq@y6yX!WPP<`GWWfYGB9<~CzHV~lg?Dp zI~D!2^V`Q{$eoZ2c>?>VCCRIKna;1)et4I^xuW%T{Ddv%To6jc>| zy~&Qr%u^SYMSx{d&$xH7_=T3uzYM-m0cuH55Tx@1^%K)f(PK< zoJ!i;8i-feMf;JXJF^<`0b^l~fN6KUb%ghn#UJIc{RP`^_B_8%Nk?b!{d%qwy@K8! z_ME{gf7+{`W|3i2py3G{0JCUBbQjB4ymiPe9FNl(@e`=1dEPFQ{>h}Uq=p(RFc7;M zTCO+0`o6w7=qjzZ6qQ62QY#4k1ojdD>)zxu_xqd(3&tZQzgeVIok+9~V|m6wCjE_w zw`4!q!tjHnuNO+E_EU_x1rQ11li|5mWST*$F?DLO>s#%gd${IN<&xRp9x;@9Ads`? zquQn@CqJ5*p{*xPvJq2_FRdqD&RUM_zSoA_lx@v7Gq6q1$M99>U~R=T z{ZcrGJ9m(!x2&~9n%vAK3fzJP775*du9>N_j-#VMiZ#CRgY6w-w02C_GX8<^6~Wtk z5XFL~r%OEl>7fx+?E`9{-T17%Pq`B<>t3+7Hl;Gcm?|n3>G#C|fzkgtcn#tFf*!n= zWwsmN6mkAcM@JR=coDz%LFw;ru%s!$Jp(KT$-&FdrnC4YITTorU=mVTcz^RuMuR_Q zb?Vjcimyz!({I+5$vh1{@E9jFZ-!)S&+<2+8hMVfwb~iCru5BNOv=vZf?qq_nveZS zo@8va`IC_`!J+#zrQ{dSOZGhef7rUps5r8%3xp5|9xMU`3GN!)-K}v865QQsB)Ge~ zyG!E|f@|Xv+}-`_%;dc{^UbVPKYAh6Rkx1qefGKKYgf_!ba$@(be3Rs+JhlGB^J)M z7I*+D00)lTADYd3le0LK|MIOSQy@lsBqb}V$9>1!v5 zp7qaL8e;*f^bYljw9u@r2B!Sh{1L$1KI^6v3|1li&D{v-8DM{r>Q=bc9nixHe9rmO9yr+NW^wc%l$r>YUi`wF38 zZgF-HDlsL0`}Kdk2}6*;QO9E#w0v2fk#60EQ1|k%e!+6iFhMKcqH*<`G)`l+m)h0F zo@mWx73cohQ(sM-S-c_DUB-~|60+GKg2x)ysFWGW@+A~-qB&3Cu_9|RevDE&*^zoPtkghM(OY8H#%G#aP-k#Mh1p|=J1%`|^Cs6PT31md1Ta+H%^dJD(%v(Yiv&xPU<)+hDuvt_E0Y78R|PQsK( zKj}F%Ws^Zu6GVGHNsWP5AMM8!ICzXH@M>!fV=J%I2_svt>(hF~TN(_@j$2gP&f@IL zsr#deHAYvR*tXLhMsHWX^toQ7PAT=e)O|jsr2DQXC9n4hXUlqyTTdA@nBUma+*?L; ztQS=R8p_%@+}N?q$zSYSwNC&k)*%-V1wTm?U)03J3{DkZtTP7K;>);b21Ezj<}IW5 zZ|YH_K|%lTcxw;4rKKT(Rg$8Gm13r(pVY4x)ev&?GgE^a8y1ED>i1D4OrXMur&gE7{FWY8xkCH#n)A4YNiyvO!ZwR+Zf@>@&#s4c3>9F?yW9u}g1VSE&*N+=u_ zkeMt-D%b!8?#NcE(;Vi+B%}hhHw%|*jyhyQy4H1y>^Kf&1g~?Cv370IQ5`-KL;vMZf8PqQkr)cvY*EXW`t7ef{oBD8 z!DnW+&u)}z^WE_9@K7R#s=U#sy!A?*WRys{BuB8vaZh0aWzW_za`+3DNdb9Hv~;c| z1KIYHk4(y1KqW`!8PzEz@!RvHyiTdm_)@Z}yLq`N{muDSnQ9yaYa{ui7`p zj%*sCW=+gBWuf}v|98cHf5;Rh0H^X_dmcvA8uY(O$?wOFYFDxgrT&-G_8>zmE|y{< zzWnC}LF2(ZKP)hPRI)6V{Antg92CFIpEZGM%>-n@P=UQJ=s`(%DrUe&1 zpEkLzm|tEsz&GeTMD|MX6EMT<2C>^8-ndZ^GeqFK?hu|2B{AJD*bTJv3%Rxj(gp22 ziKP7?`2QvZSyetRG;ePf{Bp)=_dbm`G` zqxSBf8&alubvD)F+57p51Rkc9jYu>KCKX5C#q{#kRJ<3rYu6MAqPRJa!bv6n$gJ!% z_Mv@N29WYT*J%E3OWsC0)P7GoXE&BgL}DS6o*r0^@~X^p4-JfOPGXjlHgQH1tClFG zNQW!&PGA-%Buh__yUerM2c@ix7)+D8os?>l-@lDDNud&+dC@YP>PCGQTEOn{FsrS8 ztw}OhwtRfsGNIJ`lwl_>+w@WHt!rYU1y=b;JJv|BE5E&eJuqQBXO&Mil96>K zd_RVuWMP?!b38AFCq1v_f7$o0q0yVR+3NMX&>M^l9+KzeZLiv(tFqG55?%T0O;nkJ zC??y7J(;haTQd=33+;2_v;l1tIuh|ow9Q@;L&J{-w%tuZrYN-&>pG7DIK77L$s+pL zTle6Ag;6U4Oz#c?B3&{1=($MpZ2ut!)BJ)e+&mcckG zY02Y&fkz^Rk?!NziD^GTHWMeis+(wUulm+@*Dz=8gcPEClD0>=u+#L>{FCD=WY&YH z8(5FK29MgC6n&pVA4i{#AP*V^lmnrb+d5(f5}3`~mdky-iCa^|g%1|B58=r6hZ(Y2 zgzHO3p7X97>oMNbWhx?>Wu25%SGKotEMxm*W+9Adk?3M#Vg&j=L8_R=A~`B;1>p-O z3zU6uN87a9p5I}a-U%hmPshA(3`+`?*c(ZiYUz|B%?nqQFTf>>4pJmk0w~Cj3IKSv zTtqlx6A~ubw^P^M?++Wmoe6<|kgNxv(BTpt zt_cTS0*mxJQUCfEf@Or#2m8xOJxML$Dh@qY*0AIloMW-dCw{S?A^A;{SXn7y(48XGl#{?9bd-5QsMCV}>9{Tf%DkfMW+C>)= zPFPG>VbRp3U33z6a*}bH)4JXs(b#vlZ$*({d0ibgAhLMNGS_moT#Gb5!qFEeW^jbi zsMYSus8trB8&Wz@4{R_xrGd)OOoH|EA_|$vJ~iJvG$;zEQbQST%Nt6`fc*0j4CI8; z($`_H84$VS817`+Ec2(yc;#Kh(Ya;rsX0^2_Zy zbV&QZzkjguj<=} z^CPahk;`#Wa^?Qfa?ccXIVu5x8E-$=VUX&y6wZqiX)nHOL*a8`3{tF0G-VlTLxw#) z<`p6|Xi~(nItn}v*$KNSyy$2u6W*xKus9PyqhCe64Ys{Z#8Xn6D#o`nonk9#zOOVZ zHZHJHxr{B5DPEM_9b9+V21Q_XS~_Vv=z)Ws_4&m>kmQ!kXc+7PiD7%&4lS*R7DV-; zk9upc|34u@N<4$O>|?8WK90jX^VI~Gk-}M&WYFCYT*Y$`?Pl3!*Ga6 zp+T|6VhVd0y*Akl;7N!xmg2o?APcqq>OVI1 zKlA{6BBeK($h~joSo(;D6PqW$d$9PF4m0u!MxIQrwI*nf>e4AA&1TLz!Wobz^;Mp6 zxZN_=btj=2Ps?BZ4IvS%X4h-`VJ0l-J_(7>W+FPJQED`=XQ4 zRuSXe{93{(mUt>`<}w^@4Z;o5;4KkTvz>{uh=A9=6s%85&5vpN1`PxLYl37(6OFz8 zu`JB~*o*Y)*VVY{Dg~N$OK>)FK)3WRYy+~lwH(cCjcS!bXo1i$n?}gvBR*aPX$3TV z_7-d+H(k5EbH#g^W9qLTLf(2=+V`tmzX$XJN2UGLnyEVeio@2P2K$a!r^Hl zyen*UjiSkM5oJ+OAl?*7_9Tf@Ux-tis+ak%!rtB>rOnww3JOSBq5BXmFn^k^S*Tu> z#lwot4*zvrbtwz<8aqZtRec9g;UwJ{Ro2ZvOvXWx!>6;sVW9V|9C6=gUj(5_BhmD8 zg&NADuc~V6;O^_2gYe{(edwX^s+Wel_Vyr6c&(-^pvcIN)7pqWsl}D^)#a&QOOf(x zDd5rRL+H@rh6ZG28z$E6z2e^{x2x%ej8V`d{Y&8M`2-EGs()6ktME_%mnVwU($D~@ z;@#ivSE$q&u&Nph-NBN$zJ6h#dNRLx@F^unQ5=M;yv8Vt+2?44GpDI#u+1UUTeW1e zFCNEuUDa(k6T9$(aK34fZIjh;bC&SCbt5kH*Kd?)1RV7s|5>0sU1(x_nMz(PDyrPd zxN*+BU>PLAVp}6AUCCrpczf5G^S7;BtzhlH2npCnse6f8nZK!)8v}_M6xoJ#)p_|b z(M%ZqvE)wIN(;!%%y6(REU(myO0=HBxA$YYse2LKR^NI+?~mizekqxO@_issK5{AO z7KYP*E)l>$T1{hG^sI8^Ort)k`VM{mjtzETYdc1yBbo3kk75tth0)8zf6EnqGLUdE zmX!7CAIsU%L1LZMy(;%a!(}=}p8VNLdzR`;CzU9p;Wus(9ggZka8|Io{|x{O#Npov zz-r-$v*oEr>{A{8KDoTgGacJ>e;Ml*F8q<@7+=()yGZe!XxX+>7=et=9nwEgSZ{+O3$VWRW9IilASN3t?#D`bGM`xT`k>eIt%kD<{1xXF=nl`Y*rDwQ>FGol>xj%&0 z(AHS+EQ`l8#}Iv;<`zf8dOW3m$$(rz`sgs~#OyE*YF`j`Q=W>H3{++6xi>yz?`1x( zUXA^lQ^&tWa zUR8Dg+#$~YpJ?~r()=#u7c(aOe*2$+pKTYG8hbELxdf2Z=MU+}O>F0UhQ?{a9H#(P zR*y~6Z~-PQ$&|xxV*T^dS(m!xosuY7H+i^S3MOex9S{}L@PQ@Df%C-5fR~kRYf6v$ zfZF1G>rgOgVw?)~lWttlPXng@k8$}_y4+Rm!qdVO3lHyMVstZp z9hRIT8qy|D3R?#hF5+C~&4(XRrB2q?%{pTW7#lL|rr@S?j>@jn3T!x})g!Fb)|J=M z@JP_EBf`BX+h8$wzpb>Y+HPzs9*#k!Anr;0pk16D@l@NlzPK=_R{)U3&t7-a*cn(E zfp=xY=B^GQ2Q^j)s)?JGzP^4KHwh+ZrBOM8{V*h6YT>BIU=B&ny_CBiBI8NnO z+)YTbp@7llCwFACs|E%Bq_!Ubi^1J0t>IdUO2btu9nn*~mn0{bYi25%>EbmN_~P-g zKc8|Jbw;Dz;t8L4VXj-WxVnKor+904XH#YSg;%)jh zaN~(W&DkHKOx0PryF_W1sh1&xz%24G9d)sfT-5=rEpnTa`6B3&GHk7pBlqGiKZt~| zS=!FWWadwo88U2o>7v#?OJH+Ms{kh2M$LBC?hE9`*_PvGcq`wYy;AVAWy8)tE0vD%ic+XCXz*uG;Cc7RLct zANF=PpXUVdq?cuCkpmWX^;dC0iki<G$<3c^n*@$w$lxMBmtTL<=PJcA2anic)~z+(Wu9KNsT3=u@Zj8r`cx;LD3a zugXrzmf~rff#Xk}4XGY|uohL$ZNzSPZH^f>K?tMXJll&D+?Ev_7 zvsy{HbUSRj1iKt*3pHiR&g6GF)%Ij|fOe2@=!BANUX zTK@yqz$XF-mF^|?o&EP;5jexV%-(Gf=28)=XYU#{g(eKm4A!aZQUPofW1XChnQExD z*3;}Q`RLJLSny1^J9A;1*9>E7w49O!m44f;qI>fS{+AP=5drg10tDq*@{DrjuXZn-X~?P7;hmb+e|9$&qwFkPtd%=h_*IBq-l;BH|vb{ju0W6CjI zw4hC@yYv_(y7B3-;W^|z$7TNb1V>$DugfvlCmi|`i>cUN_vgnW*JKE`SAQsDFs;&= z&lrtf=?5Vad5pHxYC1U~py#kw%3aPLeO0GD8a{4ud`|+uNw%{7P+o#^A`FA?yJS;G z_>nwuGGF*1Bnl(P!hF#_*?OO$Y8&2&@-+e%exRzC&fgGE%m{J0n07 zhk-znD3J~hsnFq-x9|@Z{$esv`u_tH9Y2XHHK}($@C>do!v2AzfNu$*u z5y=RS5Gtid@rXPDJwLBW!`#f9p`2Ib!tdPb_Z)?VkuY>S4`k4JF;0omA;aNXLVuhD zKE7klv^%?~zt;Wv#U9}w2uzi$w@aWkl4=pg7vY`oU3+{rQ<3?HZFHt;3Wi=Wt-}XA z!eeVbHkWC3M(_YH9#FV$f5eD?G=C+OeFhWtah^& zw3CP?cK2F!hFeGc`1wnN5z@-g)2LaIs|p74yOs5k!uo!^sO-S(DSeB9H5Ev@L*UVH z$#Cs9W<;>r8t}sV6wo+rs(U>OX>sgjskrJo(0Dv-x7zDvH0X|$$b0)jZomyFtWyCm zZw1aDla|C~#VNb#gKqxmMf#G>OcpC+zc<4ig)!mO zh0CLHhb6TwqDqIW(giYRb-)K`Z9Pu4Y(0pxs2FZ%J(0cl3#0EfX$LJ>zH>l@%mZVY7L$@pm(hRVxpgBdtDck2;-BqDD z@LZw_-f&}2UUY$|f^6L?)&Q3RRSRciUQ0L^Aj!i3OjrRzIX2d%hFdoI6Mrkh@W0kv zKBOl=<0h#d4UpS|e6HrVKO2^4#IOXqVNr-clRqi<9vWyzd1uugGog!nV~TNen>b7v zsmjEdegkGI2EtB0nF8O`BiX*iq8;3uSsGny4*x{{2QnT2#$i*a;^pk)YJ=<0#}*12 zHY|&!zSQEVebBi?alN9%(jn*`;lq2w5#V?97=9?=GJIv2vWruTTCRE2xnJOQP@&Z( zDdis_GBc&aJr0)h10jTpBpD;AXG6t|uEBT#aNA=HdFZuGmxx-WX=Hlc`cmt_l*80A zy?Uv-o@Uz@9TH!rxwQD1Xy3_Dr~$W|@7LEoHF!$&VI^kZ3#5{~SxBf(t}}vJH2ltQ zKs~oFf>gllm6G{V9my)ukV>$u#5iZT;$xNxPW7$2tSi}(I=QjwQX|i0GXCRJzDw}cS#VRh_y#_|bpCc|)M3lyUAr+$+ox_7e6kfjCkqw1WbinHR0IU<@2_nX$Lef z9(XFhg$P;f-D_LsrjKs~)_u(c?h7`;uZw$es70cN(AlYHcVJ<7U_a?F`*?!`&{6(p zP6S9Wqai%Fzf+oCBwwIcZ&)XmuEg)Ui{2U`l_viQ6+NU*GEt7Bo57{0=z9>ck)qmZ zENl$JNx;>7n=i?+^*rchrRA#+4RZCO1cUqb&A8lp>fF#}1)4nJ$BoT>9&h_--e42g z$f<^|W5YWkhC4(6fHv1H!aBU2RYXEpA4RnUBhb_+cDMS^;jK&63f?9K@*YT`mFrNK zv9c`{v0YKhq8&D-FT7jSr#fuH6J^MsNbkcqR23SwgJ(HBILN)aaxAW>DEI~!F^IDe zy-2!NN3=}5mLYu6;D+JM)@|zqH#rH1O%+8pY%X66d=V}zx;mA%NJG*4lA|q# zX&KZT!-(aiT&76TVALSET+Yoss^OgQmhU8ylM!szYH``sbT-q;uA*!n8-hrNz>gcE zblFAbHCGU*c)Qp}NHZg_E<<%flP|Fv*sQ=7iH{?bn>Nha?%L{OK)AeVu{wyofe~Uv z^_f{t%kaZ+vwLI0*vJ>B2}$0i#S&*0FQxaV+!M+(B;0w@n(OOs>V=JqD@n==SrKTv ztpC{yK%sKI3~c!2%%wM&EkVnnuG7+C;>xK63%$0!rGblcd7$eTSuGlI6bEn_xS4Cd zF;Wbi!L!^kA8*&*eTl7yUScJN?sGxq_1TxT-O+EFTSdI?m78XGL8^|Fq}99qg94bwXN#OZjL_+Trbs@fnlIbb#?StMGA0z}e$1weWqq z3QpgeVjCP|z`hhAf!B6cTCHMbw12KWhSskt(-zJv0L&?L&H-!Nb(!veE#LTK`Gf-`Nd;LCNYp_1*DG=pe=AZ>s3MugR(1 zA8*zpcImus5ovVVQnBD3C_L)}BLZ)b(JRnxQUd?5%b$`zF$uK+o*k7by5l!UE3UY{ z9hrn^KY(?gMHOjaYlI)%v3Ub7VXN@EfGnvU8b&&&V@QyG!`P5yz~p2@nW9jBn?4?9 zc=Q2tG0tpf&8FXCmF?4vMDNvN-oWu&YNf|X5~|o^uyG$peTn}Bjw_;S-DTS?`sA%j zc>2j|#ueP5rkc(!O{J3Pmu)4_k9Z5bULV|w+u7pxuRN78#>VS5VsSYh>uvA}$vBnIu$QW>^}LQ@ zrfIBj;~cQE)iKf>`kd9D-LX6#jJ#|p5IN?08!CW!=4xWN(mD z^*f*;QlpDThdY!WJweC4%~|K%wD;lm@D0QCL+wx~~beIE0D_p$TnDY70(W44pi|2bSSn>t!-&Bjf5^hv2ooy%f4!e7u zoYNwXx1j8`=NN1wD`8u<8@D9n^tQSh)m$W>wrf<}9NO!No(@;wVOuz_qaKfu_N>4Y zpqaq1iwhc?KIw5d>@d38JiJkFka^&83V6ffi}~A-{%US;#L$I;aO;5HgL)YpJmiMgZueGa{n3t1)|kRCf4LFVYNE55D{(o z96h{;rsE#v)o!-^%2s70im7^cD#e-8s$n8jOF88GZHSPWpR?^ySI0t$8Cl*^cpIdG zv+ycV*TAE~$hS)L*o1ezJ+n6@YC zqnMYD!JBTxf^0DZFdz`CvXk6&q=8ypt<)Sdzhcc<(2L;n!u%g}CGgc=oPsK;J2QlQ z{Fz3SvqqoIj9=Rzksjy(s4U{SxrEt1;kbc^u`OfFGRktN)vd;zL1@+I*az<1KFCq- z@ptR#s9&{Q*8oTNc^NXVoxLE@sPMQ`cRJGo!?OL!lkVEz?T|16l;^k`%S0rMUSXR> zZxZ*7BW$v&7W(yix;G)eJHii$egF<#*pwkZFi}c8UrB;7U%r|mOkp@F_UlN#lq3(< zr-)2y$x$3pk#O81tjeukZaJv`HHrSyHhMb+sqgTn5^zHPB;ErVU>*{7u7y%3xasaA zJTG$QcY|hdq7?hSlYGba-u3S2C+N`NRA2Br0SIxFKX$YPwzb|csI%#jJYJT8QtQI{ zRKvc<2U`U?rv!RoqyH9ftWP0vA`<;-9cN+%+?G$y=O3T!EW}5=2zoEwrN*5~e~Q#l zk~5#s5FmplcpLmm&CkWzjyP`Q%BjvEYXt~?TI0+?E68wCX%cNIPWSE2Y3sapZ@UDB z8_L82=k2sS03f9f4o?&Gs==Kshs$QcO3AI@zU#@atBy?@3;M2B> z7addV_7`3IGsx>WFJKmX19Ft_FOKhbAfWRqRW5p+PEhmd=Y{v0kof1N>=7uGQtVU1 zaxu7oC2=f{>&05cfV(9fYE~ngn=7Y?Tj}+kk^D&5(s}5+JL=0m?us<4!rRFOWd-w(BpkY@ti0i&ER&2@ns2hWC+`RcSxGFpMJBg$(Dt zeIsRnnJrI(1-i3dP9$iX>MQ~((qhOLt+$ZxkEF!9G9|_8%V5OSYg)cJjMX2Z>I*W! zzSNwKQE!f*F<@2qGX`get|VY>ZM%6;bnIu+t!MvJ1P28V=`{U#+GN6331t6`9Qn-A z-je2@0t5A3I4CG6`c@!&7)<#i;$U+-jcVI|YwjaM^?~4sOteHsQ|QOD<^&|v_B-yG z`WGd)Im*fmT$Dt-PP|xuT1ld}Xve^aIy?b=BEk<%9`NOWU#e*ub=|_oLWtc0pMct& zs^<6XnYEw?kC=2_aRzkG);kVP+mPU}O9f~uE5*awdyVzDp`olHBCEdPX^s-5+J23( z01QgjDaFn2-y%V0Tn0e@Je(z7|0jn@Q&X<@s$>0YlPX6`5+xZ$nRyMNS?H5?58rS( zC_>7g`!rWg7YKU3fxG(agI@RLP9Bs<8ZVtm2*d8rQ26=St`;`$_4`gKOxC2=Y9eYm zj~eivQle_8%#pS#YtFb6RVF&#yWPR2y1oKgZ;cwX$_ zt3TTNt5ZuOckRY6eH9%!X@{FY;m35`3*2lp%k{SnXkfmgM7c^kJK42`-mlVH)-zf0 zbnT0BjnDQN-5+jmI4jyl&2!PAL7t_ap8@*!t@>Htr43zIJBD>yZ(liZz)a8kmZh)W zJeK0je*h6ZY3`+!@D8XA?P8l%SH}3KP*2#0d~RCQnxiCMG?@10rzSXJuU)Bg5QnMtW=Ias7g_Cs?>x-$y=rDF%;aT#2&h@tT056 z@|v60A2a)KJY7`W`6oQiOY> zR0CRlqI=MCbgBH*N##~}KM3!R-#WG!(p_T3i{0J!>;*DevWsg2X!juqOgWy8=8qLN zMS(F-lP?7teau=c*ASo1 z@xG-iIin1@a*%uevf(g6fYP<=#JDoJ7wxk-ayVC|fj$4hqDP1Ar_(}YP*Sj4y%lE; zJCHHJ*-NdfVVIB8k?nAUfKv?**e;gUQ0@myNDO|h)O#)Ai&JWfrv%bcQmdteErw$V z@9fZT!czjNdU&Y?>sB8*x6wvo!d=UB& z#Vot)z5?U!*gz@W4^BL0%ut?xAsa)Vbe(Bnd(jv-@O_4A-C|*G6FmQwti~Gh{yUNy zjP(Go#mSPK(q-;}ZSJZH;h82#cP}EQo?YDY_~b20kR)Bg{!0*e4&)4YkUZ*gBORYr zM7;Qh0OEr50TYc_V=?xd9>dYHjj*%(eMqFA3O4HRXxF@E_AX<>u!fDq-P**K&gU82 z2Dc+*awI)@z{gFImmzl!Cl8zaj#p5IUoJS+jtw2KvaGjxUaoN*-kmhgFHtRB?0r#p zel$*gu2Yx?t@+)o1zh8xeWm%pFVdgD&;l1!^TP8Lo2DiDfvzeCXuwp==*kdG&R8i@WuG$ABdEv6`hSw!I<9cD7X#;Cn^{B2(E z@uU5`^1lkrpdg}vl65F8q6TnmkeXU>u?U_bi5N9 z8k)|_b-e@?6d(era>me986DIF5?UuOM;p9G@*c+b6^YJx+CK&c{^GndU|Xm|qx~yU zp<&@Tfw2cd9?)ijIEGl)N4R8mLSWlTj2e@Ln=Lz$3wYEz2azf~5cLmOkAud!xdXZp znm0$~2Z;s98BZ4NN`9R8#Zk6ZT6FChUQcvhpXHY&o*pvdsme26niB0dbKyCx7n+($7O7e@0EbHiQ!yp6Z7pqNE2tvP@)kMHTIhyIPDWQ!gp*YM#xXvw&rDu*n{S~12;zR#o49VTFHs@ zZ&B7NcUQclp4RjO(3}PZT!D**aSPS=<;;O5=XDov=Jx1>-x1A^Mstjct(@;Qx>B_J z#y@^_9fNr`)_L_LAjn2Ia$G#%+bo$ zts3K{Pgd>DzG1V6JOdm2Y#QsDaoCxyDtwBbM1hhWw&ABO1(^x7_whXO&6?UWjoeogxL&2vaLF8TONPBE#5Z7f!;G%_VR};F#pnl>cPJmE+{RAf(HWQP#J^lUt zES!!9Z#PM_u{K-X^ZT@Zu}fV9)>lInpK2XgLGfM>^j>;{123^-WO1q#eo@fC8{(fN zy3+l@EtwQgk$SyVGTsd;72LC=X5V!^Fx9>KGoTBCs4 z&##mnbRC(}+$<=h*>VD>X)1DQtne@43#M4xC_*llJ*{6slpk4BxR)8-ZDl>}yhIxv zdz_3q1y~Ws1z~i6AiJAsIG!WpD*5If;O9th4kPCXwB(mxbGEi7fB*+=GCh6`D2k^=~smOYLZp>^JN$HCZ87+t1O1jQ+Azwk{6a2X+>%`W#!Khbn@KTYwFg4 zW;fUq)RkEfX(Bqr7*^oT(U-OZZ;YKbxFDv$=Y`S8pL)W%L{{f{vlV?|iQ~|Xp_2Wx)$O}eodf+5q0@*(OtSe|N_tnJp>5P6w2V9)M z50F>^C|g|Ne7UP%UB8oPf{Nc)rQZNqa3x%0M>E;OZg6PC52h+#`h^AQ$(YU(*SmY3 z0}p?-21e2_$j9o>wZm9mF`iN)-fd|ru9?K`twZe>p(W`qD%Bk_g+%XO9Eav;XsZhu zxjW(BzP7KkV7J=0j~)UmEYQBUj7`zu74T|0W12tlX>*Rl16~Rk)o7{bIi)q(Tv;FA z-v0|JZxvl9p#0sD^R<|%`9j)MUf%imGkM?4 zQuH{Et~5o}w*jPSSd{fnwTwcvY`U~O`$Xn%s@o$$TCyGQf2yl#Y|aF$EMxg~S4tG} zX5<+|*b9zYigrZWHm}!M;p0x1Q}rt6<83He97V{y#jOPYS(**Ij?l zR+ka-I zO;KCVvVv=;M)HMjGqQ16FZ=4|C(;H69Q+l!Y$hOVXY)4IE~E)5k;bR?liV)ng2(Jc z)DZEEqJ(X3k}y+uLNa7+cf#!3OWm6Iu*{TakT>=+8>Q}jSeH%xZsJURxAvjh&fB7& zn58SXhcd4FDx46C&wO!pvU=CdD?|Dg8Wzl$poLEr`J!?f2;e@xA@L7>d+6gv3IP)$ zB1*>fHz|e#EOMh*Wz3Ll9vNfYz3RwORRM5PKM*NJ=8DI6%CUx;Mx+h~BZ`|mt;|fB zQBg%q*I@R$X|r0@nU6~wGP|a?PnN0EN#1XA_?hMv%P{wNlI^$RqBuDOrOIH+%#akE zIN@yqk2C5s~J4;ZHi7tJ^{aC<(k1?+vtTuU9p zgt7`B}rO+ zpelI{GT+MUZoR8!vZ(bLH_!B+iBK*N8S0E49kh%&@N3?{zU3e$Y12F1v*Vo?dZjWW zo?wQq_x;?HE9yH9?I$x(8cfsE9=qHg!rF}=w13ycJAa)-UXC)Io|1v)>I*!5L(87M zVh#-$vy@qr2|Dufz9*lrNyeIoC1kGB^rW!v8HJB8^2S}`i5x~>C3DVfhE92H!ySCs z+>gv$x5TrS$!47T<7u)wAwxNicxbnbUM!a^#`Z28KYsaa;j&JyWj*?7fC~H^9r)GQ zfS!4BSug#RllxIkZLvM5de7w4w#J=60&wMZ^6rRCB_$>>#0e z5(S=mm+(VcC-wxJ)+IyY0L7L55vF$rd<0T3O#&UJ^d#gyB6Dceg%ttQOe=V_OFSvu+} zb6YC;*s$~8v@I~C?#45&_kvYw0t&{eb!5-kGL=h9PhWw&@`H1o56 z%0>8f_)FZMH_O1gQqGP`m(?05{a^cyb9N19|;`7;(x(`0M&CcV)7kg$Ev&*jgH1&0B zWj!A;PE&W%-{nBYLVD1k>){QA6Z9e+PkwfKD@taZIW`KU0_EWsgu9X*;_?UA#C;z>SWm^a4Tk+Do45u+RUXT#cEBYZWEL%U$r? z=8@)@>Ggwus)@@T5${UXt@A)a@NOz6*#)I`+$FLS>*j-AJA)eYqJ}Q|&%tZ+EJ1Df+M(1ggB9|=u{RHCu_nbQ}Ej`YIwr?*=Oyg+k4A(x>>g(5r`)?^BFc=) z(G170eH*JG(fY366LH~tpn>*;))Xlzd8HRYWa;z?@$vKX3kz!UHuZrs!8f@ILC+8( zu;&G|a+%DUzvXfZ``2kR3Hq3RW96ReM{Wv0;ykeoz`r>4^}7<$#ywh<-V#f~5)Fg> zW46JOnghPU@EU={61GuCM0@U|nq|vc2@S%LO;41(2;`AV^yYUGvi)GJN_<`5p40c; zbd#0sI~ZkbiuXP7tE+y&m&@7G_&K+UZS-{9D>@y?$sbZx!?ifJJ)8WtJo2Vwjf*** z655XIUrB?;2hEO_r>H zeMD8_sKv@fk@Z(vY5ek*wVvsh^jPtkJX36Iv^`5 zfcXiwImEYx6S(Qaq8VLb>n6lu<#*6zI$Qj2fGiFdc^R>a!PfL)_CL^&E`8Fy!JGEu z>9U)lVL@;lhg%F2gp>X|7m2y~C0Z-PmpkF4;;2ML-gO_-Np=m~UlZGm-Xpym?=I8{ zkc%ta+Y>mz0bf^h6nKh}rOOL-7Z{HJn)eKtzI$#_D1eN3B%zd+=gHfhO%df1pvlAV zz$o7~ydrWHL4x}q17pKa(CQZVv{D`(vZ){SD}}eP_mSMQ&Sy=6x!2I6YO>aaWaRZ4cuF-o!~IACFHKi1+rmfpW62eai$1rx>(o{J^U;D|efDYEirk2gOOmeFXd&l>y2uHP zcb6iWHU0S(wwi9orPEICgnxt9Wx3!lGDFe=%OHWx^<;%EHIdIGi znHSKyy;2i4b#Q#sP(xu-AXXMUXq=E!vtTpjF|-DNMGM4&@zP2|c+H-j8`a{4pTocoz;jW^r zwJM~v%$DEE%{KvF%&|CfaDJ~j*3IM!OuSob6&p2V7R_@s>PmGJ{$9Fr`k=;n+b145S@LbLW4vnxX;iLhsBcSW)BSw zp28-ZY4+KzJ{*3!kbM{+O>u1Ykw$~JkO_63-Lv{On^>9FFSRpQdyv@(lLYJGDa0m{ zBFJc#`?dl;#IeEn&{Vt;ZYrsf>6Wq94Y5su@BJzv3Yg?6WZPvG$-FfHPv>3%ic3#4 zb@~0xs2hcmb8rH|7?vL_IMw`OZ=L1#|Jb?;sH)bjD+mgLgf!AfcXxMpr*wBnhjdB_ zBHi8H(%s!5-QDo-quzJ#eg7K+7#ldT&ll@kYp%KG!n;i0elj6AWd~?1orR58tDP;% z^Y?Smn3ba%WxbZ_=FNUo^H#CFt?y9;w^Kj3>P>u9sA%45d)?#2G!bQaMzviXu0}gI zwU#FML+L&Jn@#ZP66x2c9+@b!me6>wa2k7cAd0ynnMaOFjF;$qmY*M~4W;MV)jWYB zJ;m~t6zYeSzF3Y7XJf57^cKU+tAb>}q4~fT{qNQtX1?sLx z!{J#dYLzDL%{OP=aqv>P?}p+TbKLV2mLHd>^FKA-)@i!q?%wJ(-F#@%i5>ORPvorl zdxg9JPe)QE-EB8SC=+i>g90%bq+j{;&b~|v`?itv=Wz({A#Nss>By(#xO78}i3-OC z;d^)d-jL9gdZHszSxswwj1KOY;VE%qNNC&RgD^0=ljecrYPDfm635h;p~sj~60O`x zIbLL`0aB6^0H64r?f#&0*B6F*RNeVezE}V-OSE~=WiJaw)UU=DM&yOgoq|+eUlR4K zC$z<$2kt#t7>uo&;kQi$MEd1Tm|k;|&yy4J02L9QhUus$HAhO{mogf)aB*^T^zG0% zxZ)MKS2`y`vvQYeD}>dTiEGM@d(>9S)`+YFWsB4LYKvV5+j%Q(He>vGts*JkD83AB zR_%0M*4pg!Rc$`mUAdyrm7%c1+)nkQJxM4|Ago_uC&knnGb)x(3y_%PPZ2;p7$|dxetR4G0 zz-uToJ!Xk|$rI_EjFmb`EVb?V)io;<$^5 z2!;-RFb zc%zl26GCJWSB|SOWg8A4^mxpb=5?YQ>mri>MESwfPv?R8ZSrPmV_FPSHi;*el%KFJ z^)oM6U--~MS53Qk$O%1x!`WLn{9X13z;$@{0YycWaAUfCptS)kjP{3fB@0QM%0K)C zA9OsM?+KsJ#4O~FPD=cdXby5Cd-QZS6TqPa6TJEAI2ma%lkE&g&h7cIQeMk{jEt5%7Srnarf{5ggI(0&+=1&@YjXYl))6ZTU9I)*s z;Wy~}%b)f)YoNTIto5uGm>-HJ*2G4;X=E=5CrKhh6R0RGI%eJm75Yz7nTyphvBQW} z|1de@o}SLX4L06mIN_?D51eRDog&Q;u*3)@qIte#w2V?|0U(TGt!2Wz3THM)Y{#hx z0ADInpOLTT%7y!W)yOrEdEAtex$U`TGV4$z(%GQCDA96MSXqLOU$gr))Lezzw(VYU zos>>!@g>aWQGUUTM2=#wmJrN>`NunB80RH>FZ!`k&II40b%Ta0ncUF~>oDacJGXuP zeTQ+hXKv9*s{1hQ=PqVLiL>RqovG~zWa^&vhl;lLO?aGTscB{okCR>;P+jbFnE3Wj z*BbtK<{j!uq+qY7($CsUS037ghJ)c3%g~RHlrB=L_M*} z0|!gtH%p^_yvb`1A9!EZr^cw)&}z<@nro);(vfk2GuJ@H4wpnyMr&;rRt5(HA&AlE zD^l`2{-c+unR(Rz2eMxH04g_(ZQGH%e{>Am`aZO!DnE!Ow{60->NBHXEX7Rz3V44) z%#IZnkL3tv;DGrxIm(d1$jlZ`5ePD&;+q-Tz75r&G9^G+kN@eA8AL4Jhn%4zC%k^X zaJtbmQDHcsIS@~y!ZT!uHqFMBNdTys(4s@5BS{^HI4-rB7cS2UCKofGYJ0m)>o!fR z#<2KwvBZyoYW0hk%{kPw5gSLjt~;dG2k?W(T@OVSS{M}(QOpL~hia$QO{WwB9U9|P zx%?ziA6JSi1E2!0#J+hFh@oCIKd!<7u8K)ly~A+`ruuE^%Sn9Co-h8UXm8KSjcS{D z@zox<x1F6U;!Vu*o0cQl z?<&p5e7AFUwluZpC^1yQKf0-|P@~_DXX~rkO5=%H*t>5XZtNQ3(@w(B+ZNUo3t7~2J{M-sXXI@vR3Mp- zg=e~9dyTVod4ek+RSDIV*&R?JhfCzr!8BoW-%8N=@VmD7gF~NV@%LhoVl7M@kmzn+ zNmyYB-1{Mw4DLn>b<-o}9&iF$2XBn|Jba4BifBe3C4=~B?pFFLK5`lMe?5Qr9%JyW z>F}Y|A5Ri>_-XBv9?2l!_5>w+8h&g>$N*|JM23d4Kb#7Ez{Q6LyzDd}uSon}nC$!c z<~wrC(&LD{+Cx?t+hnKQ8Ky1UlILwrJ^a%EH2D1#1r1xh)K*ayjTH^g=?X+pDcPir zDs|!%Qt4S;Xp45v{_39FG64OOe@_*;SDwC8CIdfzOQTHYE9LYQ=kL$IT7V`!UHOwE zwPpEy{#5mXyz#nTYRE@<)75Z4sHuA6W-HSLd#{VD^B6%bZ#nxI^Zjr7=8t5+*njKR z`?O*s8x)n=Vwrr_qJCfW;9-c$Lm@mo={0i~5xjBpL6l>7=i_gTg+CkN5nk*p(8r82Epg8C%%BpD64U{9+ zD=>PqV|#e^BThRr7M9y5Vf@T@%|&)*kG7T*@1}CmRCfSG6iGU{f4@y(M2`hvlVH`H zcw$3U;lK2XGFr?4AE~eUmgT^pb;1*~|rpZ8`yoP`QgW+)cQ&mc!eG zk@B4!dbK$DY3iiI3kZ*gMuNs0xNoaNZIp}NBMlzD2yR;w@8K=t--K6_43dhD;{+sa z-=AD8bdR6&%p=R|IMo}TnvH)-Uzc#*Pjb&7yS@Ww)y#(h_V8JrYJxG5V?gvWzyWQB-7t!2{6ggrF3^0=JA;um@?v9SOO97O6xMUSO4EmHma*~f&3#$nfh6lO1hv{pF+{j`wdMt@ zh+wHqs4Ffc6)40)@R=S{2$(utQjP1eU#_hPuuy(oP74hNvF_8B&l0X&BdC3P^YrEG zfN$Hk__y%+kwtW&``szZyvI*Y@kQ!n9FH9O!Cf^!AW;20cXKx{e%4M$Gx@D_#4rpw zbO|t!a>-EBOAbqBP0ycH5g0$~f-@;OlM{#@g0#AJ29G1Jk%Y|_05B%o9Os+RfLC3V zsw%4Z>hXB;X|rvt9H~wWyRsck`)b@Xr)WIPN9#1&!}fGO7}dG-FW7ujU!vng0q)%g zq%f-6EA;AbDz5!;gt6C-o87NU?dHI&O})k*W8&^Ll}8MlmnhtEbifMPr$O}KiPr70 z@9+m{&8dC0s4@uGL@V1KZw=eA6Ovtc?i6gg-GJOd!^yH3*bMb|q|lS$$1y?I+Z3vX zcI_2Uwho0I0CT=Wy~j>1G8W^A5(Vny@iqrdSirA%~Bv!_R2YF z1k;3-VEZ2@-J|JW%@AR-9y_saDk7G?kt^VPVENJ!fz44hF;n|~riTB2>WR<-sesof zT^*CZ&nszipM}ZFl}{Vt-3Jd2>UNp_+;=@|X*F~z_XI#N&sq;5j3$$lpNZDMeEcj~ zt#hA}fCxZ9*A=S=cnC`1*&D)5_z*=p8!@wfm5YFs%4gI8AxAxQ;B*Xb^W7I2lf@T( zJc(LD9%SCGCsf#_$3OW2YHri;-iKCI^Tr^}+EU$V=-DRK%R{5v7oNo*= zFS3a;WLW}O6OVW1Fh-i}VSx7Fu#9ZI?BZw2aUgL^l~26(z#c8`S6N6&+X2AAaoSDh z43O{2x8icfm0DB9D1X8ttNwi7Yv}wltZl)!lJtE1bq+bfbTPpH;EuZqk50z8tq&GV zuy+hTJ7|7Bv4W?WtD{_O-h*cdp0ELLjM919-@OPyD1T+AwOQ`6e@TIpDBa8jvv_5| zE#dA77ge{tL>O!=1=TjNwGH6Lk6i-?7wl^ntDV<-pIS^AO9afdbm=@BBUHH;J)3ew zTjMkPl9Rh`=UlcL_w(0mj*odzOH;g9yp9p^Rl%j>KNm&75xrW-Ej_&14p^@$s4GwO zll`itwscffAW^tJIDa_X&#yl9ZR&8Ed{JqxK5j`>h!F-&nMi3-xvN~*-CPf>*zN#z z+>U!Jp-2#iFsaP-(#*0}Y5wKU|KS1Y(!P`B$-@mZ?*P>)tjE95Fd5p@eqxW>J9a^9 z4J$iqL0t7QEjUPw7VB#{v6!P+Eg{c%1>N_)GqN)dV8ztACzB$^C8%< zX;mGcV+#58!ns$!UTA{IrtH?s9fF(*m#+zj*AtbAfq$+FoaR(mV^d}T2`YxRZaU2s zm0DybcHH%+^+MiKY3m!5WG<7(XW+2eM-6CfNqFr zH(Ssh@7{w`2UW9rY~-bWiR&tFYU$fXg#oj%@x4<01WIBTKB)e@0RrFM*&`fd1y7x` z<9F`xf&_wR>}3lNFg$Y~9A`FZ;M_N9az$5g*RCc?c%qLtP3&tO*>?cRl2O39Gml^G zBMC8YQe^d$X?!tUh(p?YA>Ky1Xp9t27Hc4$ZJ>aeoiI)|l_Z1_Tp#;gQIZHFCXY6| z(>y6609VrXKjVjZsW9swwBT2%15{j_-Pp%(GP&EGrzqTptiL5uS;YXE zHi*=$iH)jXNP{hSzFz0GP|^Yyy_duzEaD@N(!P~91i&&oDci|pEu&Vi>b>hDNxe~f zm@SqrSYD8JRKlM8G$>BR#Jp!x9#cr;_9lQ%&H)fC*-7@E*vvZSc5NukInBFkJk`&b zsdYUL24!Dg#{T%HudXitvy z_g@Rpg(zB!z?N(cAUvF$IznjIKq}{@P%Te(vOvu}{R-fk?3`VctNPAL%fk&k0vZDG z>#(yQI{mTKC{;M=V&3!^eG#kc55WA?#Nt)2RrhT=rg%c^K$4@1Qvs1!n!A*BXHAih zEOJ75MW{;;7Im^hmlZwTW|ZX2wAZ)um8}+Lq*Ks&0$C{|73HbDacFo1bi78PsJ(C& zxpe&+>s#mQ1(blbi%T{2(m+n*me$*rpT=L8@G@vr!O z$ss+T`R-izXs{u)-n@vr2ybnf<&ioiJN5OIt<>9fuJ6)91QEz36T2>V!%G!RnXnMP z_+P$jO6I~pzmXc@(K4le>J|kA%#|CRf+cf;lFh&XkNB?&m=``}P(Xl&%Ip9VJN9`Z zM7V7&x?EYbB?dj_3*t>Vse4=QI?4Kt3TWN<*g&7;PE8SzPm|L$oq%h(?q>6aZz5~A zH5Hq>Rch7ov*lGvWfkjDFs;p-q2QLC2OczO>8k`Ph~0LOgD8nl_$TYeUOAho4}4r0 z$Jl+->~`k#)hNGwbcxjfH|nPc-0N0O8LhVV_rhbqcVQqXC4VQnY5=2p&*~xLjA~Mm zuzRv4EIuTW1<1zu5yf7>qU&QU(`A(KL!AWV1`zp$mv8E_(@Ekfr(}O>-JYAP-V`K3 ziOr57Vq&e;YbkC&MC+*ikn;+hPGmK<-F|nx;&|dEJSVU7l4CqO53|w9!*?MnP!{JO zAV1(T2nYwjf&Mhw3gP=Kq4YcqV+@LEfg$O=@`{TCZ10^<+c$6@0&OW7nOYs2ACP>{ zluSX`atii?;;^qWY5f&zcnr6dodEzP%6P1h1cg8q6);VhuyR#kU>@KSxmwaArJp)C z9oTPRw8;oOwK`v4x~08>;MK)LV%^Rls^z-RWtpj&awOB)TfEpqOnjb_r%&K_rpM~V zqICxD1gLPUcl;TKMfA#wjn;gQu1eS8t}Thvh${`Mv%eDg_}U#P=~_oO1LS=3w)xx; zf{eku78g<7q@hE@i_$ZgP2!D%{Q;%=Gv z!Jr_-R>`ZE03=A#sPgG5{h+SDq~M^WfVJq2vyw(_qgniHK}_v_m735u(L?3oP{q0u za1hhw+)ZbrfcxHgy*~)WBM9r zCG7xmLESuiDK#k#Iv4G2MVuZR9)bph^nrDFG4w~~e@qu4#PaMVYq4rMKZULbp>R&X zpZfiO(E)fBLC}O!6X2-<2Q#nk7eH)ypldI&q;CC#xG(Y6x`cIh zhHeZOctv3-N%C(Z_1}a_8gldtTn`8I%n-`DdoO=5^5Ts{SaI(kE$}p(ZBNPYJXW3I z+X7esrrV9Yy2Fv?URAApZf1DVl~V;fd+**s8g&$)-_y5MtwZEJ#5!0)+Bry5FI$XM%IP0>nu%dGzX z?;n%oQ-=>yP1iJ|{9W^j2M$pp-;?+*@v4v1Tv@dJ-@Czs_^KUc=5PjZ93$kr6{YA`hbcJo z#1G_Or~oJw_^V(E@EU2L;K5=->mEoX2(%SQyhi!Y5i?xcTxrxPIp zr~3bCNq~T?Y%O8%$dvxnpJyU)`XGX2 zOCg6yC4WS8F#V#_LY%>VFhkLvI6IM^w2P7;mw@>D1esb`yoGqCfj%bDH+dmR{7^do zcj0{mQpA<40O~)MAZU~+ghdi!f*&KgG$z>%5ffx)rWo-ST6i5OEASwa)tQn8grPsG zZpgXA6qSa7*4}R%P}c?#;izzOOi#%RqZFx7|F!Zjft0@wpi7 zB~Ww8%ggJ185Fm4||uqkoetVw+-Pt$y@Ee4MN77C0XrrDVmTYqEOlF92> zxRQ4IFD-z^v;Jr&!-2ZKGlpBKoP&ddL9h5Jb=NBla{6ug{OX|oB7a3IDx&<-vN$_R zLIx2rs;f7{@%H3nMX}k#{Vtd2!x2$nJ}*8Xe8&?0(T!K3aVb&kzCw zx!MVRkdjJ4(VDj+;6CIs5PTiL^*`c`G($u{p3pFvCJgo0_@~`MNh-rHweIhvt?FKK z7*>N;6|ckxUD{U)dfT*nlRO~}aKS|Sq6nZ~)dBI^u42iFsF|m zR~ezyuE*c40r9r4U%v_i{YluhY=#8Y<4$1?DP{%zjp+FJf%!b~=T)r=CSIBg+~Tek zn0T0c-sk%~vYVDK0{J&7uSyx*5YlMA{2*`tg2$2SU+MI}j$9v*lp!xqM*JuQ721e0V4 z={pS(vjwh@2#dbljG{MYD$T}i7J}UasUo4FNOVo9~1Ui>1BM}LS20cB!q|$xz5E(>~qfbx` z-l#7W1SF&yIR(YD+xZYn>uoy>4JjcF6?ra?_JY(lH)k5;5QG3E*fYWCM_w#35bfYU zU!x?lrlnb%sfT2n#bZiT+7s5n>VNLD>seN^4F(0kM3gv4sbpG)^+*|7zCC(9-xZ$)#fX$@UIW&vqij$ zT-NSUT`vO}v62W~CP1Jz^iC~IUD5g7j~{#?0)=%I0YdM|2)aerun0FKk>hkT60#7w z>XlqmSZxWK&xbko_+0IB(0aJcAVI!s8$nyUBg2Y54#$1Ok7Li<6Z!3*zz#?*xqUDI zfC#nV|EF+ThwugMqtuU&$a<^2rg`|j`mFvKgrtKAVr$(dA++7HS!7=`_}9t$kwwOG zATaHA6Bv(joRw6LZPnMusVe?C5N`!^Q>9s49odIuA%FR-qsRdF#PUr)>#N^;M}ip7 zY7=P^8g`Fac?bk)nS6veXkyfHDOU=Pl}qQivq7z?IYgCio{z+P0t>mch~rawJF<0w zvXm?f4P>B6A-K_9og!6~2j@^db)`z^bA7|?)GxNI5*>_Jc)t;Df&If(egRIE!fPW- z3;7zO4`EPl9Pgz_jKrLvu;KUM1q{}ah1e_2(yI8HapC@m$1pzp2s@jg@p!R0YR3o@ zI|%``Jk0)uNye;U$#_b&iu>L*!h%mz{n$;~+tOt(|F$6B-ZT(D+4jjJ4ht}i?Lv%8 z$cG_+r|jS1M#$#=IGuEMxAPvxngB&g ze}An&1<5KxBohlYk>!&gA=BmN7jP@)$1_(fb(UmBIKh?Y%3I4Wu-1_2DGKSD$wK5t zR0PR~!=)Dx$T~tP8)_gt&y05n0UfB1=>sF#yk@ii)jNc@1Cg~|>+R1^a!TkECXkUSeDZ5 z5nxYg9@H`hsKa`Dy<;uy8KP;azP)N3uA}4FypDHfdwsjWu#m4+Y9hXawOb?r1RlSS z?_WRF--Bsnhg;Op7p%^H`LUKoPMuGwZirtrAy-{cRF}t&tz5XIB(%C`qoSmxNTigk zgdI^J&s^SEpk5kcg&k1{hgc$DF_1A&{;{~Op4sT;v*vKeF+Yy!_SB7!xbd=?v z^Gl%W@e=Fz`#;10jJqcnZCdi*!~Tp@O8p@p2!(eml78Z@3BtRgQT|<5h1UyrJ_~N~GG{ zoA)C*|AMc3d43+S=ELg&ExGIZsIzk5dACmnkF1qgpBr>Gfqf*eX4=L0?4Qhd`Qz!5 z3*zd(V!RW5a3`n4;9ntrHy^24 z9!Ch|!~L(j1rx_(t9Y+9wv8wbTlln)-qhs&@zbYIA}Myq1*SaRyO~NZe;A+tdg3TK zbem7LQi_u#=CAG1CIT){pM0if%7|&YBTHr`037CeG5oQ4OX^zswTNnf;DEZ(cCYWW zope2A{yVi*vrSTd6|rI?Cgo_MGfm@4Q7xkKvYZC}pTJ_r4ClT6>BC2Y4pqa zV)jW(OY3L5UrG8p^?Z`SV10yLwYsS_3_bF^0w*pVo|A<$Hq37D&z%aoZj0j;?(=lj zpP+J>Se;QsG`C%8@tCps&QLgyy?XJjfb-oyP1v7nlWNepQJsr}^m}cb__UE$OFch5 z24laQT!+s7@hW3f^z*x5NlIbEj|D+0LRIAa%3?xwaIouIOiX? zZ4EQdFu3;9ha5IF2uTUJKhTmys{Pk#KKY&ZTXm38cC+Nng;xYBjrK#E&&$`EkcGA9 zVEy|Z2o8TakpG-Dsw7~aQ9VHD-TeM*xVUcJ{CyB2;bf7O*+#biKxmPoa%cOv9z|Sb zogzdI0V7lBtfKssi;_HQ(Y&RN+?0Q?IKl}bIG6k@*N82H5^wLRm^b}+2x$=1dmhJ* z_VAvwG@H9E8#v7;g#A)9@ls zQKiGI8?d(8;YE8k)VVy56YE3C zONRuwFY{26M-L-nF&q=JgA@}kZ?SCP4DY0C}KL#4y)XDD6>ncSLfEHCio>dJrU%~fu3{n$fEwn(xr|RRLT35}lt=LgoG)Lpy{K470TM<^#*2n`%q&(eSOB;nx6NU>gXEddAZwAom z;QogNI(ZF10GEN4-gLk#Fn$0-EQr|pWT%u%!CI}j8RgBZ1$CkH{yQxGByr7W^smblhPa#F<+vK|8>${pjG8(EDC{bBJ$?5v=g(@6BW;)JQ-Nkh!OyKPuuxu9D-JnP5KN)msj@E+TqeF; z%EDF3u51t2ECpm`Cu+(1$D@JE9oXSHP@Un#Ad!5uiyI#H$Yx-vJ>@aN)EN zm2*}vDGFiV{Qr;Td3#d>@Umlh+B;wl{gDuU?4?qN<@$+!h^(o6%MO(g^QbhbcoDo8~$sw{eQ=)RgaL|>!k zl)G=~QH#G)w96R>_Wqg7zk;bMQE)CjnRyqN)Mw6-&pMh+U}{~PsaTQ9aZ&&E$lt#z z0&oOMJfKYS>jid@Y}m`3OeVBiA?~cF zZ#ds@z2p9Lc$jeJl38_uxD#Soe}xxF-LE*Njf(To37+TS1d>3{T85If9tv zN!f29x%%Vo`?L|SR%uF#`FS&y$uJqsj6;_Z5?}~??n6eE#K)5Q`05SxwtF4n+wj%c z%&hdNS8qb;e(2I7gsNJ)T^-0dBY;tWgjy(w+ODzQHCUzG5GZu6&a}tH;a%Se`>!)?lpU%n@*qX0Zff58cq@~^r5n4zsM&Ag zwZ?0(L0}F2!)yP2y$1qQ8W=io{+O4~yflu9eb8EyKwB8VxPHL%l}Ahn5JoTb%bC(s zK+NoS(ueAiMJ=);H_RRu!8h{=SZ?+5LV{79qQ=uz5(BjFl*PS;ii}XpQ$}`uWw>t* z7`J!#OIw}BXX|NW#LovEeoTB;W3;xLCj6_`_V+vEAtXS|3p%s!z=q&8%JV1@faua;LaU`-%t=Dv?}?*hjP}L47QLT zI|#+(;L3>Q0j!!TDENhSB_U~X$tqp`N{BTf?+b&pyMmRl)CjRmN1+q(b8>PTBJxYk z!CZhrm=^QyDkP%R`H7WrSn zJg+VS)D{f}fn<3s))-Z-MtfTNh5~h~ud$QX)PXoFSD?PI5(e}?q8r|G#Gnm+%0a10gqq35!Gd?HaVSh)rjxuoCYVkh$QsM*^VC`jHnA zzbkH5BO%!9;K=dkO}qEh!$F=T3?ZI``}YA0amxNH?oYNh;pVu)VoUA$0xP5;GSJz} z&2DZ-c-PQSK=pMREO_x3Z`Sgb%Keg$mhBuzoVUlZ=oq--_Ngn2!mNC+vpO`hbtDCMSyo0VJCzUuaAJt84r9{ zcqh9b#@AGA)+i^IO?e+oI)(VwETt=dm>sq7)wx$VddpVJ^RO3E*!BfO#2(CPt^E^8 z#4p9tzc%@h%ql95FHpEUCaS%M6Ak{`a`K*ohT?Vv4CvDe+Dofm5J}+EWnV<2L6SbB zu28@xh;>X;3|>Ipl+|pM6~{{2fyVQvaQdX20Si2U9a=N~j{l0m-1x%e{g~S?l*CG%IdHc5^TH^}iK?>Xjv;MT;<0-O=^+9IP%>SRQ~10n zdIkFR+2q9N*P>-3gv7j_czCVCtui}mzZpfwa7hD}Vop#CAo&IB~Kx6z2QRD<8 zMp=oT%VKJ)u4(>=n}GE@#r6BSt>ZFrZYw zyN-8N5IP|Y9~lEjK}}vc&oh6mNhZl8brxvI(MAK^d6E}kgEd_Kx(85G?z+Y7C~o#t zs3FPeL{$i4BIoQD{7V0R&N6QJvF162j|0t6Uh*A>ia+C?UCyvR!Ao!^L=fuE*f09g z9WXlY$qJ-MiLN8ke@>!@n4&39l+11s`m86t>F1CauMZR_J2oMULsQSonC!lOP4SH_ z&V*?pui{R5O@G7$gce6bm7&VG_z-<)=fz)pMeqN<-XblpQ&xNjneP+}iCuV=D4soK zGUOeF;SqI~J_iihvk68Vni{Km+~nZMM<~2iaXi%|{Wwy%O8k&fB`gDvw{chzxI8IQ zF){Xt$M$bUZmoD9Q}80sjqMh@%gY5Iaq&_!u@v;!doHnNY&i#i3oriHX{iniw(?AG zO{dV#`^O^y;A{TGKYS;0aC*{P7XZ-Gyt2^EyaRBryk5fe)!Ij~bfyj7Jx08>-F>c}FyIU3G-I5!!F z!Q?qnv!6qAzQITLN=l0NN!dgPg)6eS|14Wil3x#xJ{B} zg#(FI*1_p{jFDR7e%n81v*{uSD@hXm%q@K3jHDPb7*L&Lgg@HQoM3p!Xibx+IC`j z?1EaaS!T#PZMfL@!kA6IxNN?9?fwcadHZi#h%No&PEIsDn)O!;iY|yn+*flb4jt*XTz?#mMI)C)vlH)uZioJp@)rMF3s3WrDWxqZ7YEF?M_iN;TkEKPWO zSjEHMY>v>^8Vz6ddh@0self0Yr-#YLrpc(jw=+3>vQV$2`Ob{puMi8zT)9MMgmOBW zK+!0l$`C^~J9_6=)V{a#6?~rg8U0W*R?HCcj)Ji&9B$K1jPp`FB7Ip&?yRqTEyM)n zI-d7l#qZXecL^%?yQQL{Rr=?-t}2d9*RyPf5-OhGeqcn>@|a>>S9cuxV_g;5s9f7QB~# z*O))2{Cx`ro8XA5W&dC9A*@D zAdVJ!BusK`1|uS-!rEKQ4!)j;`5uv9pplsTRDG|Q7UAPmZ6Vw!3kCdW!%H@Ct?3rM z#PeBY^)pY*@YZaWOU(F5@=t(~H+d}TsIncFBOgQ-V7SJ+j#W^I*}Z5v+^K4zq~1^S zz!>&`2c@#r8GOnRUX}{$0rcvR{R0dol;o7m>h&z4PmABgLY)mrZ|O^7@UB;RR^|#j zBR;R2+z2I;T~2;@^9HU6?af)_*KuDJXEL~%5{r|EE%BGP)6!|wBi4gvcs(?1+{35T z5%#PE|Fl?tu6Zkwpo*-k6R}BN{oX?eco3ZgjM`ggySb_3oAKIwE*MQkW;wqylhMZM zppE z@9m6}gBYzpT_PK+4K;ZJBpaj&_94p1O`J5e)<%E8&MDhCSWLtmHJv#6=BYHZKTsq$ zuVdf2j)m@}!P(}7{>l-o)wJ77#T}f4?cwi9EuUs zr}zNsW-^H?ByDQzj6iwMRE%*gLNUaOkXXkD?M-C-M4~kudAkx!Y1S$xJBct*7S%A8 zP3ecb2uVX&U%?0X8fj}#3B}BbtwhN!n&qjUnqEQTbInssoz7uta`8^-U<&fR=;IYj z8;1v+jd$M#st!IgdY;a#@l)7$oqylQ0NfRKlGVf8aJQS3jbf|`kK?ygf5*>hLhle> z-(lboruJ=ya_;>J+)Rjp;D#CL6BfWqNRxxU*j#>Aa`b~|T^h?15!m(-ZR?Mr@9lhY z+Yg-N@Wh%c@*$EhKFk`@9_DZEl1@*_ajE_J04C>R_AArB3#d7I^^>`P`{d5cczaQQ#@L^mv;gZchJ*8y}e^R5ej z{#c(;8t}ohz9|3Z# zUcV$gRJt)d>{X0?g)Uan!y1IPMa6BX19P_q`n_49s8iD%n-Ubi}cRR9wjh95&QDP zC;hxC zZiPEVbh!W7(%+QXQJCsK^$400Z(aq2AXtVmLz2p%l#LR7$ct8;8ZsJW;EX5lW13iJ zxgPDF6ES%xZqA>n%gRBQ|BR)-@SuOOP`S{jzGkDW@~ffvXIoplv62>J=KofnfS z>{fgz+HbOMTqSBa7;IeXEUeJ-D{%c`0BfrpiERTX6wt*!5FH$B(mYV)B)%s^te^KD zo~%=ZM9wTXJ<(N>k;mugZ3k-nBb zl%b=viu;nPExfYQ8VpKrY+_O7rw4AW7}U=1MyfcE$wXQ^hNzrAOh0Tx5(1nN*AZ*Z zg+ey!&7f>@@aswazsMv1aby5oBQe|@P4d(;AHIgj-&YkC{UjWS98i>;VqNF zmEE#CPCJq0x3d&S$g`V{XO)*+RY+#IlLICOSOzmKU#mn_m6pYh_dVw99nD~pzx^TH zm3*bUC0)BKjeRMD3Z(+$wZxn;Y60PziY4>WD4e0ZuB_kA4YV1b_;|H76NhZ0VSCyn zTzNcBWg7y!jG*2ETI@$w3lTpRuCq*nZZP4@Ly!6lU~>XTa3y12WsO3XQf6E{(D%=( ziQ+z+6KS2D(TeY3ED>y}Bx&*SJUdfhtH$hOS@Rn}ak;D^G+YfX8$fk)tV+{@u+$rD zpDSC$ivyyL-J{Caqn}qwa^d3-@HuFRE4>`yJWj4#cYb;~Y|JM6zUA{|aW(@+y$Q<&q{OP@V zgCm@Opy2q)^2Tmh40To+LYIR~tqO1)LT0le8-=9h*|0QMSluU*TRKsMVn{#Oid(PL z&|&8R{(6hX8D{ZHW5O_oBa;-Sr&@6j;+U6{Z;!AX&F*pqEtap*)boK8Sjr3Tu_f+q zWQHh8UwUO6npvmX~;}V)n}KqsDD%>CK-TEA~DZ>n7kC@ka^wPl1!K`73Yn_NPpmJ$n^VxK=+fiuDGTvP2{Y}^S{o$68t zw^?@QQ8Vfrs6=lkH3ZyhM2w#;^_Ysd`c&+rb8pv{HCC7Rd(@UZrZ+z$ky0{DEO;H{ ztT1EaQxxLAb_mNfAK8%-Xdq_pTeTgU!nR*XpzVsfygSWz*f%aT=}MujSRgl@3%uWH zo7ZH&=6OlVJmTW!UA}7ooTaOYVEn!lO%_ul+Lt9O!nx(#a~ttP)=7JnY)bx# z#SBSu13F`VNqQo|TKoI-=5~4E=g0Nbs{5T++fNH{C0RroMiU(>?&63zNA(h&ZL$!tO72Uqr4r$!8J(jO&^!IXzij`xVpu-rCaGJhy zv{a^|5?R3(&k` zK|Qb=mZD!NW)n??pA%VGa%J~NFHE&x#N0*JPH9X&T6Oa_PpH4cbCe7+kh4&kG5h)c zn>XvnR(7w~k5x|qbG2t9_6BP2N6G9&fOAfzOu_A3Yl%SP)+5S?ZMC-ps%;Nf?Mn`G z3047s<^xN~O4xM7c5B~HQ$+#YUHrMVz6#fR)Fi&?r*tr|=l1wCCC?05OP! zY@g2|ukcrIAG(wK1x9hP3=Ltf;k`-v*8<*?WP;rg&qc8m-$fiQ={Mg zOP*UE^qqqC66iR1m& z3;?1Sk9kf48gDScpo9#(+3BMx&3K71#Y1kmg`1(5}IzLLQgtf*K!!~yo zhpU^I44btMed~`-$Qjk8_qT;*`a;H;z|Tqpt|S6WjF`=y5!Idn`+sb`byVAJ(=A-2 zrG;X}T3kwTXp6f;aVNM_+zIX!FH$J(?!nz!T#CB|w?Kls)fuz#F?NX9IQaH0>|I+jO~uKYmc%cC|u2 zx`Im@%DgYQOW{n#FN+x?$*AX;z{H|5f)Wleirf>AmdyuuABURHl%8*z509QM*4~Lj z-XC4WE?P=EjTD#fcl3GJlQc{9?!m8r8(s_nw_KYJHOPlpCd)N5^r?(yC8sSDsgK(C z4uNA&?xz^9qTI|T?)_o8P8|vRhv|xiSj^1!;U@!)rmd^tBCFlka0droeRpqhb*?>5 zx|q6bmSySG5Oe$i5UfZ0HKToxig8hBicZ2M)tbe_*3T?goout#he}$6=(CdG?_-8R z2L36HXMZSucCDs{R=x=PV9<9?@*})YSg#gvt!_pHn5YL3PG#`-Pxl5gwA$%0=?-7o z)GW!sJ~2(#mV`f_GrfHR{tjmasfl*JdgUS1b(*`y!@5q7jiLMhUvcK+PBZ~*>U(lc z|0AWGcq7|qM}srIJo*t{k{VTWBbCRX9Nr1m)~0Y3xH-FP>Nf&o8gb_ zq*VZ?WwvX+W`gF`;c6?ohUOc)yS^iO?k*!u5tUCWdu@ePhmin%cbw~~RR2zhPT|mq zN7fz0(0)^6UP{v3^Wtb8F5PE-3z}`ct&KF<@d%4Oo?8V07`pZ5Kkijsb0B^X8qda^ zJaOM8DCY9ywdR6{p-+-jxBE;%k4m^*sy~%pi1zy%zEBmDVn+nSml59@s@6u1D{H&P zAFp~Rp%T_f%4;{xEAOOtDcR0AjSC=+UDvW$$bTb=K&l^TPsk}0oD86gtU6A9-#ljd z;hX4DQtgbD@P#bVVDb!cUdbJ>`z{L}TI2jGD|M0Stq!-_YJZ6>0cuci@IDQpq#;li z+q`pERr`Y%P2#rZP2U6iHU4DrJm`Z)`%;#E_}(i2kE||N50P*{m&Jg*17hVTYIw89 z?>W~7kM4sLm_psphgaAipSf2YtVD`Kgx7SzMs&Add@5N6FUrMau-+^_V^<-=VJYJa~q1_-khC#OkLonu|vi zk|DjXHtYGb;8llldw0I7&D&^0Mk8)>#Q%}_wWUvSucJP04mz$?calZndaX|$;i&iglV{`*2SLKr0a z%>TvDFw_;)ir=F4lGjN{dQ&b&oT%v@o?hLLW~homznYwSscN9bcKw|(^K5j z*K!8)^|R$#)f*&fe!~j*M6;R>?2qkd;!ExoU#DL=$z%?V)nvugT89#+?r z(N88D4`Q=#b`z7E7KCd>e2A3)$PhG_(ufc<9P-N6+%i|SnTb2Br{N~Yq?mt`wEVhg z3;;a>tKH|!#Ld8tet`FY9Y3F8GuQ2NL{;Vq&g1y=1_j@#(B|Fk9dx`l(fOUV>zaqpdj@Swc1CTi`p@Z3*ThV$*KUmaitjC#1;XkO zg~1oxz_p7Z+3a7*+O`)LO6dn}x~pB{POB}2dR@n?OO@fh`U9-D2>hVFG9h@VV$X-1 zHSXYJR@0X`s=_J&9sR+C`+pFY{{e=bsKm#*{dY01@Biaj5jn?kRI_ZlotWaS&Tnb@rFwHWZALJI?5SwCUaVy`c@$SlO#AvDV=j=o6cM_{hjyE?h5Hk|xr)A*_0UaSQN15TYPUWiBl zK3x?8-F0qB)`#kEO*IirRE^sfcL{#3*50?ho7;4is zCunG*MwPL`Bs1E$e^K8#RKF>#W;uM0VQHxO$^3{>owStA&xxMaHO6+P1$uYR!{W)U z!22R=jkKgo+cN-b?+#`H-)ik~ec$wr@56p}IpXdkOgexwkWu75f=gA{WL-gWzs_%? zNro^D!Fio}?r7?NnVkRYM?~HbNa9q^5Bx2M`;JPfP>W+90*k_&P(% zPv}VFu&l|_$L4z;g~IY@YOD0X(#+6ONsBT%QiGaDKw8rkh6M>H4HgK_efcWd;}XFQ zNXC^lCd^oVoMeZmNxB={@U8j}dA|pNK*ollPtHJp=#2u+TRWn)R8~GH7dJrdpd3LZ zl9}xBwh`nWmDe3pzEiS8G^`wyMZk+>dRX(_PBZbPwQ+mgte4AMh2>g6DA17N75S|f z57B(Imw$Hv1e(ySu2#<--DRFd)nz_@DWc+O1HKh`UcRWPjV5cg+)bKxeJ84O@f&w| zg!wch_o>Q0t5O-^C$qFfCTkJDMbfy9Ht4nJS%}8^&5f*Lyfp3s98dIK6{ef<;+(~+ zK0~j@Cwf&AIQ+6SDS(G#jPM69Kds!tllKd|uT)lQVRq-SlWa;32*T%>-{P6C=T<2a zVwFr0+v(>&UR1wFx~5~3VY4=f`9t?@^l{(1(q7#=^)ZxkVla$yc}F7y42zP~)D;no zCBRfoJE`+iPlSQXE$vsn$1a!5O^tV3jrq468>zPso5oNK`ih0j!qlYpb=)r|cDQFC zA6Ld`$i_{24P|~tXR93xg$y-@0hp{ZHjUnM1pNwMaiw(bP{VrZi5;nx&b(sO>8N{|M!)9n*Fj z-ET<=4dIc%0736u$rU?4gQ_j{Wm=mkM4GbTdG-`cjqpt`dRqTUt0pF7C7^iv!j-kA z%m@<)vSgv(?OgWil8mwC_;h0Hz9Te#2yy)|EpPj8_Ih1z!>f73`27WuN?oMcQvb46 zeLDhQ>KYMMZuf*-hPOm$-^Acd#8|D__JbY2*8S8s;Led-+%s0vHAC5Q3!7f91~@YL zFZqw0I0z~|&+{fc-}&5kzg?8WRDJ#{vFwO|tt)1Xk8p*c)>eVocb#NeT-ES$PUM#} z5|kl*<#i6i!D1c^4zDl_z%sW3#7gu#yh(y1G1KF4^9_8%GG{!`EsFg8)Tpo;bOYHy zyhgI2{OGVV4!k$gZS_P&{F1#O%uYR(x-TkrB&0?aGoB5-9G_?Iqh3OMz;~v}X@a@x z62Mx$mL?-(qAhARtVr-xsm&uINYHAd7z7Qy{c)2=G^})-%J!O{7GPRna09qIZ(Rh# z&a~TdGC08svW_)86IsiMkFcsMN(L)6#IL#CUY6Rak1-j2`E{O-QnOMAUn=kTP$g7J zr|>pT%;C|6BO+y%{m-h;Ox>`yJ;Qv}%k?neY-a#hNy61V!QOV9mGj>`ClCPOakD!M zTo3q91pEu05HFFGa)4#d$WQeqLH~SsNr{LK4{GEdFVbZPH%{Wq%$ecQSz%N!7sKc% z>+-`gMqwS#7r`;p?8er@*|pWZKz<(xjoC-Ux4u;f=c z7)qOmVBTAdEv1J2rN(yNdia%h`s~^{!NDS{&c=!SR&5K8d<{zc4NIvY;kX{AuKn!% zB-ckp@|4YtgtSSQdq5MF9XT=x5d=YRW3nA*;Q2Y8q2c-eZ?-{whV+p8?in4f+CO01 zQu%MO#!#w);Ofh*CxUNnEV4|#05b@ZtdS0MS2}`dCUr+byR0DvdD6oo zg<$LHC&z6Dqr^~^jT%=3*4ZyD?^5e6r#5Lg_XlF?Uc(C)3;mNP)A(?+yh4Lh`31|Z zndkdk`ZG}+^T%33QEZ-NS;!Any}H<(6r`cS=PpyKq8QkGoV$Lfzs9bffKnc9$mQu0 zcs-8pkcV7XxgoT5$S-_!yoL%(UCLI3?Rfa{w=-nVRhN{067L6z-68m9vpOCF9*~-8T>d%2pkO6+MJ?U&1p@%`2MXbP>(=af z*FVSeqy_mx;b|^WB!FLDdGi?pBn6E`(TU#8F0#S~Wa5Wk55SR@de;0)j}IJao+Aj_ znHOb$H^O%xL3(*|VWc?I3#&bP45V@tSsz>-J9FTf*clPnW$33_Sc? zx@wEy9gMjycigiTk8en*zhR^+1jh-A$-~P;ndVVO%P9`=>M|2tTqzHA`la|Wmu-#Ke z%bu+W8Iss~Y*O+Z<|>#mxa(zK*Lqy7?V*8e>IBxFTGZ|{EMKDMsi~5I5`5cNxcf${ z8rQM!G+M$q%&WP6)t(X%l8}utz*XdT`%+A&E{;Vhe;4-hG*?@-AZ=c;wD%oU2p)&99sUsPs0%C@s0caI>U>;?5le%|UlZv-K1YXC9e+q)bfV}#Pv>vhX=^~9 z6U}_%k(g?odl?7PK`OtfmDuyEx-v*UY$`o2hrB&Z=j!j!=vTLIWVjCf0l!V{7Zy|+ z@^)#fJSNqddP1a_nSJq~e`f&*TuJlJ*;}o~e~760U-t|uUtl61-c0$lRp>uWjDbaz z>*=j|3{pX+|G~Y8uWHbpOv>J#wXeTl)#qNBw$)zkT+@a2Z}&8UQr%VMFXsB3yf~=( zmCl8?_#wX~H9zEh4qdT4Z`<7Pwq|iH2*D^pezls7#I}knBGhg$Ss28=ZuzqBV{5cc zNxK^);XSO7Gi1dCdR8jPuZ}_Jr+dx-6>J@d;c`o?^%M{c@X z8ikPUyj%Fa*Nk5fzx;!Snilp8Rp_Y2*;3m}b7cZY@whPl7e>60cAC_DtkNv$ zuW}R{mY^A>Bv@hbu5Iq4w&Z(|v6Lol#4}HiCJC>Lpo9{eW%|*Y0x?v?4kEuzWw-X~ zj9y9AUbZtDFFWG{kbLXIDDC@^d^sGnI$rlI88WY$%};;SBVS%VH54@m!@jWPSQ0|9Ar-!wS6tY*k{wU? zie3(r_Dkv71uNOxV3*N@4#d6_9)~t7$PW4Y>nch97!}XNL19|fj4_hA)U2(0EngU| zGd8}Hg`JlHFDy*%YD!QI&9Z;5(qtCQTs0Pxb-xV>cgBz%b%kO0PfRfyXFc@2&t9z3 zZx^) zysLDGJ&mBu2K(dgi+XEcwv%P9@K-nJIQ^jFxCTdXT&xMJc=^4vd*aRHyNJ#Iz2`R3 zbD*{w$jMvp-qOQ>Kkfhg2j zfg97s;m%*Vnl0XuVEWVIPhjRIaKJ#<+!t2q?4P{H=UH@=f+I?{NwWfzDMTE-R5^^7Y2B^MIphm z|D@o9rJLd9mAI)mPqiY#w3W$anFg!TXF886_jgo2q!A+({?58~#qN7|uV&p4@1_8g zF2)jAt5js!COAN}UOSj)BZ*6VF1oDM{Qfko z0qx*cE^HdeEOq{x+rKul^TDR8?k;4lQB6^>E7B12UsARS>aR#08y&Ls3)J zTqi0GRQT(U%9nFj)kdT2%%)Uxbj$`RJUN1+rcSX<7G&d(%2$f(jYGZ%P+_;p&I*&I zi*prXFHt({c{#&^tq!7N-6Y@o*sDs{@XwNz*H2xFS708IMrLp>&@i#IrS&?3UYG5m zlQT1Nm>Hi8QSb*G;pttTC4ry75Msz$Qh3;VS*<_RE)mDfD+(iU_K>A&wIUmwfyrfI zL-DoRg;~9w9IxA<)qOm-r%Htf*1X$SO5??sK3&S<#8H2?vu@r|`0wOfB)s&n#=!Qe zL--;+%lpd=B?28zfx-^wDms@7>Z1!bhx5R;Asl%ytOEr7Hoa1+WVr2dTC0`?wz*5s5m$O)KU z5H|T#Wr33m&H63|>C5Be3>Sg5Clgwat+5F70~Vt>vn7hcsevZPOGe^2d(`{@N& z%{!>A0*LJSrS`ho<06O2#?3IAAD%V^IpCcUpq?uR!))(EH5abwd|_L4kjPK)H?`1kqK2^VD~W8I!KVf+PRWxUIh@ zkrp=&Z7PZvtjcK##r9{h;(E*Umj*(mM5m=q*?@f8q#GvRgoA>Y?;Na)jG7}n;P1R<7-KF7@tBRin<6*u#hD`Pt}(-|K4iV zW+ukA(Hb{G2asIvu!qaBRj=#2B&qa^(IkN)sY7OsE%A81JeViZ!21f%UN}k90kK1~ z@+}u{XOs;dG1$BP%_^fW{fx#;Ih_&bJF*y^tS}}9wv!5zWJX_in)y)R#!;)G&0slJ zuZGJk<2PLt)IYe0+!ye`!zY5}ds+fQIu4jy2)A)DO58gB`e}S`(fMm7czuYyk-L^Q`MmCpEiaqgF*V{L zmzr=jFdsCPUrlo!S2?0H={0%o`mcG(;})*^!$igv=;wa~=*f-O&_F_}STynNqreLa z$?Zq!!M=83Zw9({n2i!~dSY6HSKRo9&K3g6C5-Zk6rG(Y1mlb0H>&+0SBxeKwH49J zySUa?2!P~lEomh|Ab7^N#)M*(t0miI8fN<3wO{QV|N4sstwpC+O*Sj(Wa1bFc(GxL zg-vcq$?wXY`?Smk#i>~6-rs-1Fg8QsmCBtzlg#~qgXLI6O9u5WAJt@S5zruqwRvWZfXEojT8WEgq?gJv180jB&w_ zYaoE0m4maBR@EUVoo@ury?>W|VWsp1+#r!F%UBS;>_kxQ_dDC)t14a1meFX|#VQfJO{_l@ zGeq>=5pLf-e+W2Uvld?i%-3u|TG*aM3%{n5Jo6+^zdEDtTLjyQV}{(V=tP*DJ!FlHNqe)8w zKC=DWdKH)?@~nIIKSOH*HDyHBgbFqCeii)3X(>XrekBL6w+E?O)u~UNKAP8iOA1v! zp7K224yoKhXM;HoyR10Qe+Y8ywfJ%zM+ev6+X>H{#c|n(5#GO(#+i!n`5luAtcjoa7mO;f8a4&up9y5VTtut+`Ytm$tUJ}1E zctokNxo_Qt0$q(TKZV#}QJKzQ(3&q%+->D2T%@(m&W6-2$5iI5Zx~^q&1pK0eUj{= zq>LQ8H+yefWMqEp(e+jXgZvw~cD5G3!2&0l(J9{<6A6%K5yhD&rTrKPP;XOCt98 zN}ggdL~+Nwg%+`+Q0`#4VcY>I7%89&zgltIv!~GKPYQ9V8FQwz=!L4Yy@tiFS7Z&d zrZBo7LcdqvQ;%gNf4&d1E*EY`+qaJ~$4@84SNvm0PF=|asjN8z62c8%C-{lj&Iwc-*GLwb!c);YvXy+wVUvqjp6djtS?xvT8`#K zwdtU+g&A`BS};COuw3hgKxS1Rvh2m1J>q833e`=v0p-ldkDp;+3BvBO$Ss71`?2bLS0Bx1mngdQ;jo#1Nmm^UMxl8mN7{?JNpqSjfX~ zTDmDYB*81UIBet`b`d=D7Ti!MH+W~VIZ56}Sz$k89(D=UUT65ys2Fofm{tc00lOjYsJ?oCdAjc(l3y}(D@pxif z7ypkbH~&)q88`?ZtI|sn#N3F#r{JX#a;<%Znw95~GGS##s%TxdY`!D~X%S`@d2v1G z?a!LxZ|JcAdd2d zJ?`2T{Oeyj%zgcWrcJ?$BDuF=mi-I-uQzE5eGM251R6!p(w+t8s$ zPICdGif^{=vxGakaYhXa1Tzou8;dl2IVipiJ6L1q_TW+*uFYt8=`N6bnizdsNa%Jo zGQWzQ2&6l?xk$+1FEydF=zC?BGcdbv%}jO!30-^(Z@hr%z95e)KRJ2f<}8%5 z<>>9IxIkMMK=w0gq(gw4D6#IuH9^is`fB6+g^J#{Ht(utwPf$+S7rR*hatu@A8r-Y zc3CVJQG`dxgY^onTzfFBS`W&FYT5j%{* zH~pCH-4UX1ZD7t=R6(U5MO+LEWPB3@S!hpMQRFjCF(ncsdfF-icgLy7Y;6^5Ic(fB-=@;ogS_z6qQI3g#C3y282&+01g&wh0#DzHExc z01&Ap3crSbj5TrN6gq;2l5#X;>I<;?e4IKi>nIWs&wT!dE*c_ldO^d&8R zK_B71SoUnTY4||#XLGiBkSpyjT-hCIHY-1U{|?YUX_~FC2=_8Z6}&sx`B_Ord3iKw zaKL(Wx&qUVc^yTC*b7?Mkl{~1U%Q&#x#;2^8FjVVr&-6&Skk?@+pk`S{;VBQD-lNq zoCTM>w1k=DzCv825+PQ}&j>Bb(BNFk)3*snCUkipI6d&HH9cNxm}gUHsc*j{PvuHs zrC82ay~e_w}^wF4sSG*@dDbPH<1=gZPebTaWF!lycxJ>Iy8>`sAC?h;2_{$QgSi4ihndRPAyp7wbRm?Eh^s@S%N%R9;mu@y};DoVbo z`D4ikOAO*?7gN|1!t@ovdb*iVpPC6LE|B2|#eq_FFZu6QO7jYjY>&rF*@#{oYn7g$ zOTz?-y1tEt9j}W8`mmDjhA}foxQf-{9PijD^~|cL2a7Mldt#AkAf@D5Ha}L!O2s%- zDH{V&t^hhLrFRZhX5ufMJm4y31cad+Mljhb`n+7gL)9S(lo2rQ%Tht*1&v@gap7R{ z8VU}uaGmF=-M^n+Fo3b~k(z!wusJXHU*JYXO9)pH2woJQsI~MC{d)Tqmr2k6>!ri; zUz)_ugQyAA8v@+8GPMWNou2L0ek-@QvFb5KxY{e|m`Qqr7P~N^M{}Lk@`t@QsBM8l zUcY7DpZ~-mG|91-fw5J7d3>rck-;Gj|0Sqa-mAbIr}J2-`F4NAm~rvytjR*_=b!nw zZ^_yvMRdq#g;hyQAJX+D1cRH&=LscHZ{D?y>tAmeMq<>0Yfq-at_(Z<#uY9dMku+y zkskTLj_;f+pNs^o-Mw+E{ce-RZTFUZAnP%*lhc`MjDX`2XS=fPE@RvZ3y+}Z)zk|B zaSLp^Y3j`TxTVI%nZK!b>ch$7EW6Li9{Fe8jxK>~NIO|p8~qM{@db^8T=ZU&rR21i z;xBzcJ?fnd&(IR({9TvMMjJ)gWL4cS)vL?bUfCL1cI}Cmcl@-R5!>dmbA$+*Yq90R z3(;|=b2Pl)dHVV5j9Yk@z$${Y`4%?2IBwzc0-K7P5Zd_CLYQ}t+5 zbtS|A%Cxw!QHqr>cQc$%54O=a1?xsJl^IT{#`3o7Qv3F?X*cxyA2XsDs_h|sfO_@r z_pKC5|JSUkzszg^d0<)>?&LBG@E?JY66Xdr1FNEMr&8?~V=+(fJ}hz2=>AGx5`!#9^J zdQ27OW1cc!4+FbahE*+z=4I6Obe!^H>Sa!s{9^aoqC8}WF&G5Bvky0ZK1>)`SVELH zNCFX)QbPLL9)CT*jCa_dKu3j$uX_RayvN_=eEl((5RippBbiuoKxM$X%r3xb!QO(t zb&+vC^Em74zwDzv$~TP&v;tgnL`(X;XNP^)+z-d8U-%Zn*1NHGFfqi4;6b*00`Hh| zg^Yv5qdUku5MN=K?ksJ7+f>&$#gV3ku`w*y_IeN5%CRKWe0Y^l9ucN%nL!DFs^-`;5)`$D~P|YVbzL~ zZtV8GYZ2Dr!?NH{efbf0n`4qKt!nzfQ8~_yoH1X~{taSkEj`ckULlYMaoC)}&?o!l zL)DWHMh!JQ>JFv%f4ZITj7OL&$G9(o&D&;^;i|BxH_kvmj<0X#G7}O z$G(?pehl60gBS#OFL(?}I?PaJ+gjDT^y={p6hSrPu2h_}FFG1YoGa7h~h>jke zTh)ovkAPLwe^jggJ(BV-c0T#Lr&uUoO#B~3OH<-kX~rPi+*8V;4$rp~Yj@i>gvFR? zQlD^PBVv$GZvX&=iTg{{=@J+LN?WG9s3?Kkl zdX^5BK-s$5=+9}#)PoijO!FP#{TKYp9YJykGtFqF z{V3>pJ991nN)|(KiLV%-$&G$Sp7QgU#A;~&6a&C5r_|k*hP@>srM|I=(><&baoT`1 zaf749!>sw*BJAHK8D3(Fi;M?0bZ=_C@j&Ot%G#{y1tRgI3Y|Sl?ft$)t;j-91e;Ur zi%ui#u8K3Q>Gqq2If6afd?VYD${HCo`aKm5XhYKrN*pR)CjIGJT30Kyh?U~9%%kgt z1;Wi=D{KH4hy64h>wy#pUkplhJMcd*rV{3EybS*W4LvQD4bx=jUu64Jp|-c#+4?{; z5XHaQdIL$MIjr}mTPFEq?%h*CjHon5sqK`@GM>Azpc{=x zsm-ge>G!@;5CO>}n`?8Mj8sVvwh_N_W1tQpbd*^&N_MG@99gr&yT(;#I3@^?IW-OC zvEHZ@b}Q9xy4dktULCgHh^Zs>M{9_hB?^Y{RzqfKy_+p{wUnXnT%T9)#jIuB%V@78 z?L7ZJ8=EbTL7pOSQHxQzXAubY14ATGzbh+H(h7-fFmdjFCaDDd4r!?PCGQR3d6Mrw z*X0x+&*0HG)c<WkvW5T z0LD;H$x5wX)+PGCtT|HlS0?9gT<0!G_3JP)OKhN9T4PT-%O#kbc}uE@mfe*MmHA_R zd4x$7A9wd-NcFPaMSsTq{+Bs&AU7=wqYKg4cQQxTQ5^#o=R_&1gY?A4&5Sc>b`_)N zJY1jifGYme%-i+{N}Ia*BR;O8WBfZ0a6&t(Wy69IVm`q^E&H+Yyxst>ZuJw>Xs*&b zkE|lasyx<=%PR*fU@q>9$Z$>-iHsk4n+o`emU;VJRQSoHvbjHI6+1px{gLMujn>UA z93|O$9!Mx60!CH_^WEI@xEKE+{OHz2Zt^px{AX&P#91m~Z?*!Pfl!c?&{rJc%!ATWXo;y<0iw;p40J6|mLt)iIHf;;zOU$3`T$Irlhm|J)=o46TvS}nB@ln87f9d_UbRO?n7d?Y3X zCD~kQbomq_uojGxKk=z^=dFT;(>1)#ojgEA{gwj$BP$Mvvi=*`TUw-mK|xtr{YZZt ztkj`)e6(7(a^RWN^_&0$o!$iL`N5QT6=HNC$CUR5{9fU60=EO$)-7SRy6yH6Hs^W1 zJDPqi#D9PF<8kczz?dC4i%p`dTG3tr5`yoZ}EmPuU+tQfnk) zacvExjDhua*BN?5(1b%ryZk>Pr^Su)Ck^MYtOdNM%X8M8r`_~)*J23)4JGd>3^KjZ^f(|~JQ?kH zUoKMPt@Nc_Y~iUzkfnzGNY;vuKv2~{sBhQrjKZqln)rPmiYTV)8lctWct%qb}2YhFE(<2IBJf|RDZpW?LU3UBlS3&W?JuOfCu;vaP{Oe z238sbkQ!@@Rl?RQ40}i#k#2g_9JQW(`-Ub)-zlb?LHq{y*?ZMl1NtwRQLlq021Q;D z*lDR#PVkO}zBdv)IP%nsdG#?+2e&zJ4E@#X04B#&6|8IAaC9~Jy8yj=%OUkQLi+4w zQM(f*hKap6{*TTy4`+k#p@Ng=mzDd!bhK1o)Q(U^*R$kzUu^bssZJ6)`y|r57$=)+ z2{FI5bM~5jUh&>XzjL0?p^f-{Q{F5;Wqa}Y@&KD{U3>B3&tJI5V+_`-#|x9yu0Q5S@+ z6gBkjleDgqZzH3lo|n^x;&h>Rp2ux&sjtyk8jT``mXevaeo~vM;QQV}o+fva8`49( zEz+UVJBg&(M9*)OOYrgdj%rx-VmmxUJD7@EU64F#zE%zmEftVHF*2#8=(LprIvJc# zzqB>IhaKN+=!UaF9@pxPq_*hcC>lDm$kAJ$d8_EC;YYd6ugZt8FA=&loX9qQ!;!qT2aY-{AE zp&aa-oz^w_7^05>6|KeQhaNXMnv5TtJ1a*X4`ZAXQ6UVxbDt1@KW1O2E}#o>7_c4Q z6`GZ@?^FsRIX#6I_HUNsb#9*a{r>+s2lBsYks)1ft?JK-zW&ciAPB6g;Z)jWs)R^d z4R0ACgK$2gR`5PAp6KXfEO0FRY{;Dc!{yCXEgl(HhqkpSmp5@509! zSCExAXR4+>&$c7ns?Be$s>oNGpT;X2dop^|WZrjx`#00t%8Rv1A6*6mGVb*^`^58W zsw_KEDo(AWetOsbgr|)9$8)9Qk&eX-UN{ci-Ev=WHo}MNu<+8E@!&Zn30wR{fpXEcS;rKhp zZ9<@SO23tr;H+g9RU%gf$SPv5(tvUkm5hy~ttZ;uZ8({zPWzcrid$W9s$gf3Av>ve zHbmKrcWIZgx2#Nn(fcWYXDKBJwZiM6`nGu&TzWj6gvycp3M+o|(mN%`{UmPer7YVRZFc_6WP5Hgk2CryY+Hv`I2w;DSd-)e$! zrm2VlxP{9;CF{W!D&AzaXnY}aWNIYxtX`v;Rc%kkh;1N3zI>uAY<<&vx*2xT6TFAK zn$7;ADa?Y640YU)Ye8M1Rt$9+H95cN1~N`_vQ9*zuoLEL3!1eUc6AEt=hndg`O{`` zK5Lo)nZRL*h`yh8l0j0@_?1ulR6FtWo#|jKf8HiE+6GH9yGQOQKY4QO9bZN|e}`?1 zHEu6K#!ppww_c~jP38o%YjT`~N$5O>Mhuy|*KO_!N6#zMO8Wjd?|D9b!25JOAQ57X zO_t(1aTY&ZGg$@miJ<@Eo{ErhLE9 zLQ%cnzOZYJmb7|$m}7cq;5EB6r^w+$>BJ=e&aJYvVpS29FA>alY31?Q=6w{Ia&`~o zBYFC)z#^0>aAd93aEQhe-#+3R(~K7$({MfN(_}!@w7$mNeXPIDZIC5O%D=%NiwngicIM7I$ zW;BuWbx76wfaq|npuw-dao*v*o~#t3%Cp*W)pj;4B(Dv)eBD0oGxS3IS9NZu*U!7Q z{NHgWBGMY5V%JnXe2#`d*&nLjdVF_P(|6tH!bcw)0`#;rMwXL)YM>XDd}Lu`mZpz9 zRR2+mV=W>5tt8Ugq}8H}yUF5w@-?NqG@k{yA9ci24Xsh~w4&n~V&0aew!^II)~b&5 z?~!Ee5Xt43p5k(>bVyO5|A-2yq!=$EyEJ=PqJ_VF^^Fk;tWt4nm+SUMvLxsO@9M!i z9(x5gRjK01ynu6)TypR1>49f|$K3^OaE{WlW%F%ML!9BxRB2uFGhrt8Jf7$zl@BT^ zRn3+okI+bxuf9HwW`XL=k<4rqlQM^@F8HJ6oAupR>a=lr<6UprLtUSFElSpQ2Xa0m z3(SW5@e4$;oz}j`dVIF}e+BpNM#JtM{9ThJQQaDP`V-)}FHc8=TLcIIa!q{=&kAfLkXAjZz3uvPh`$#O0R45&0SV9Xf z#KPidH`UH-G7H|_cX7wbHj{RDp}1+mod(kXoQ2XzwA2#2GoCk<9wJ|kgF7!T77$y7 zZ9H~W&rx38HFzEhAXz<0!DGB9Z|hp#|KC99f5KlD`N0Nb?bW~MqXJbiTsyU;bkjcg zi*%@TPs$U~;?-qqgnw8kne+lVfNX!F3@z@XW=zng9xuii8y zH!IFh({5o64<~W`AAPqgPxc9p^Z7Go&7;wO@LcA~t%Wvz(*Fs3juz70QwIj7$}FdV zhVKv_?}9%>>@q*S4&DxIt_YEZ_G|(C^BN2V07`99bq3jN_Cq0@!sYm%V~E5yVR}4> zc{+^`Khc?l<$@^SKvX}EgO?At3ZIYkh}hg?pI3aw1zRA)XdHz!Bu4_3x_4A~SL6aD z@M_x_^#f5WOa!<(gk2@5%HMh`3pjo3yX(+r4)^^MKrdZG=|~W3o`<6z!-KD1Y9i4@ zNZFPfuWbE1lJ83|V`~j$&yZdAv7?fLZtruV$E6~^8`SgGq<`$2Lq{w<3> z)y(YSjve&nw=mS-9(te-#xE>4uR8aw$h>~^wLhfAdZyVeee0~jobx4kXE`Z)*RJ{@ zBJldXMpPCn#pJtz*@a=eZ*WENCL3q@jFm2y3%mJ0)CPrltzrs1qg)5D@g(|Z4Q>0Q zTdnUD#~D8rK-xE_oGFotNMpVoJ8LfiT4Jys2dHoM+U+P?`K0rnoy<#r_^s^PTxl~1 zD)RDu9>jAig`Zcp=`VMU!b0$L74MtWb2E!^twcp;0-O*>&TU=_WZdp7?=(a&*E7C6 zSK85~a8>TG0Ij%qt==`-V^uI4&GHWGzTr-o=wRHJ=}JEZs?9PboOwP;;akTLqR-o= z&2KZ1Ms7{ZGX=g)Z2FGQ9sxKAsJreg?$;pRQ^#c;#f~l9E00V)+dd7!YIU(KL=)zL zD2k+YO;Vtz&vj^m0{;uG{~MUZ#7LKh6ePsYVQl}#F*{O1z@@rD4_@_~7~Ntalt=#9 z&p)hY=%0c3uB$!*Z!U{`EZn}dd)+qQD@blBErb#v+zo}^aZjY9Kr}yky?}GT@_({WY^qaJ! zm7I4NSL=(G6Cw94y=K>oRS!Si(cB~m@uUrUyYnIT$L8hcv@{c5zsG`spLyU{;-LDU z8s>k9z1aj@dH^~k2-Gqk`R#@DvlbATbBn-ZGfgQV!xGw^nE?fD9NtRanmgKBuvTPr z0glia$ZC6EE3zYfSm#1E)|%ETvomW-`hdpMq#wu>R8m%0DT%**NBe@evLhH|F!pU9 zM8}3Jw-+_gcNb%!)raB4B{v*@;%2hObLz^X&^=4hL>~6WRgEBDM(d;ez)mN0Gn3YP zV?;PM4J%j?zVs&|ML6r9SOU~^%4W6-jtY*=}5=?nnLq&P# zm+ZjT*H+k!+)HEw>>D*L=^Kt^mx14+CafCenCehiW@m}X>@}#ORLp-6XVNrATxnoM zv*5qZ?AR+&Q5>3 zuGr#=9&|8t$SiI-e5!;t%Z}BSy^Pm8#LoDmLcqS08BsGk^9<~i77%sLU3|C`^lx1L zSA99*_)-f_z3g~5Cip+4C8FdslBOg`wiQ2*nqFedPHn-Qw*2&-=QVj3#8j)s6vwq+SfeS&?GLeg_7x)&o2Yx4(a4z}NG}kz0N%dKe&aGe?kE6#<{AM- zLurc%0h5LD>SK0L+BCWZMo|T%1F~LB zsDDtUzS`KPb|iLlwCW>;?B6N0?DT=MNs zX?y?Lw^qTF71JYp&OK>CBgXzspG4Wp?2_}|vWaeN0uN4Na6Yv79?%gQduJI`>#uP( z!aMDo8^5<^oj!iJ_sbq+3j_?ZLw|ELTj{xD{=Vxu5Rn^<5~VIgf~H4k57)Dgw0J}6B;~C6 zBZqE$Cu<}x#dQM}Bkio@t;xt<(7hM+N^m^)@p{bVczgS2TnF#Vi5FI+a#yp<>-s9^ z4j_m9q)dlN`meC2??ID-y8Mt(xdGef6@9a&4quch^?+^`H^;~I!)?3}CzPMx&Km>x_ST43}pjyKXaYTe%`Tl6C_t?rXq@y+bMypF_R3)+1 z)7JgQ@7(EeVl>6$t)YPbbOcX}oz3|#tkHob&MLQGbdwM6B%3S^@gkYKe9k=Ha4%~; zyU=qHjf|wu%qu!TTX6ZX8aVZ>E_A8;Hs)3HM}HSR+<*9u|IK^+Pr8307x26S^Qv+$ z263Yx*pUDfWP-rJzyrjFnBJZxL@Xxw$0+TJ-+%M~WM_?js7Wtq}4U4KR z>y{jmOKf(~+;k=MSxz#4x5jCihl0ZnbK|LeQQg_p|Hsx_2UN8$@58XkO*cv_-6AM# z8j(gyxPf{m(tvb?|`PfKn{6VxN$L&1Dv) zG+r@6N>O)RZ5r6l zFNGuq=$4;iN!czpzAsocTR|4>L->bh47dF=Tfd8EB(spv7(o!u*fxd~4D8Es>FI~g z=8AsOI1r3h!xafe6uf9T6a`_?jW(u^S)G5_3>hu2&5?N#qbddYoL0Oyv}0rx#1P9) z9Bnt7p7jF|4ic+x74hgM3$6VYx6ys*u~*Z%Jjkkc;flZwF_8X6gvcJe6Nugn_rkWZ zrASW3>0!I1mh)_(vsVUEQr8Dvh)HqmChN{8W z=iHX~@}Mb>ojXq{i_KJ41@&q{bV-g_?!uvhA#z)8KOWC~n7I#xIBrUiXsr-Ru~wtt zX>RvQ18S0l5q7+06;kG}-@e^8o@0~HU4PZ!u79C~Jbr;qQkR~V%4WJ?`!r>zPPxL( zLqj=CXib^xZa}0w+&N?9)yE};ZK3nHX_KNJ{%@%l4@kXSgm^HAjQ^xwWa#?{_Ql>{ zF~5v)HNm@PS&ZLA#Ox_H=PW~8u)r_zN|Q0pQCxHEo)NF2y@5WWd=s50EWwC=fV0+A ze0~xeQN|w%(0dg#^1kY=Bx$2sOlGspSo4ZI{BeqtQvObDz5lN2aW@^#I*2D4IfCnX>u$949VXTl4L&Zsyj{IDR9220wwE%lp{+a_{P zBjh9k>b)DK;O!sOg!h+La6z4eAmut{xYH3w1$?Q%^xnG31dDFe@y}_DBDx<5hM9zO z4OxudnBWG{5kG#x+#Xw)LLg|UDk9mMea{|rwgX}hJAg;fOailK-4)YlK73t}tzC<< zoi)a(uOGXUODeP0rtC3h@wNR1?Fx7G`6NEs?bxOIjxNSnzjFSDwy!A(w#dr=L-lDO z(v01Hv^uQlJ%u0ZRk91(fl{p%3=JJ3l1&_}jBFk07dSY0|B~tQ8OlV8|DPTtFzWQYRj1+udah!*z}{veV|l)+-F8-GYd;_T3~| z$5_(-!+QeHzk_cpoH8S0>4(y9sdcc^{eM`YYRXp7)NJJ1*`Hk2S_+Piiz6N3df~p0 zXhVpJnpG0$gG{oE62~)%n!_{lvU_O)O;~*v9$wN6nn(DZcPLB}f09{9hNJomHZwew zHXTn}RZYznUh@PfY>qQnIo1&UxA`o7!!AL!aFW0>?r_~Nh!-XpkH-gr-lNN!Lf{x^c-lgBnzw4sWw^8jsjX1tTJ8C9K2t7jpt2pJ%z2zy_MC zZ|6oIq9QeDCXvYMdt+miS_gvc2=wcJ2EHA0((SP2OdP4^oj%dpG(SJev6gvHWJ>!rAG9=YoAGs!0krIRCxl>IV;3W@m4@>503 zxg()f;1ov>wa>OR3qO3IQ!;MQnmap3QZU)e3z#gtOgf;jVZ`_9)=O;x^Cyc%8ej7J zj6{*7kFMD#YZb?-SMSVZy+16p8T@@W9ksUmo%e_F`yTiVQt8F^ zuNJXUu*u0S5EVjvz5g=f9&_N}E&}(?>4}2r*YnS0_+PZS0~i{w%&CvC?bqKddMUX! z;shG$`$$!Cn2`v|0--lmhXl|m@a&$8dPyD2-xzh)i{8IE?Ed*?rv%MnMT@J0@@X5X zGW7tBHElcRK)SAhJlR>o`GvKq+6zaOqRm}S3oD6jR$h3gREBJI*Hz1|rBFRJ*pC=7o>zA-phOMyDoe9OCpkB5W9gzL!Z4%~6s1 zy068LNmtErD^s%xdd}&#aZcL?UYjh zAj$hE%2rX8eyUB&o4hRf-A`GgKTLm}%i74BR`9BUY&O-Ccj!4Z(T+erQ+}u z_$>t>I9q$AZ)pm|<6=0jypwn9lY0R8!{a$tfyEJHY~|0=jc`|ajR-hXHCZ|%pTrjG z&USKo0p7IXac*Oc4W9{TqRvWLA-u@wBCtjR9-le<6M6eEIVN(88wMG)4}V~IVoou#H`o|3xlO3Q;a(`&2Lr3vPe z+p%aE3q6(#5DvI6erKKt;4;c6yK8DyT158;g!ZpGQjlmgvVwDvGRqffl8J)#-~(JP zl*@Kr+#l?HIv)Q?F~bO+Wd5)+LyTRppmbr~dvjxt^K&mkO&*?gQ5oJnZ4tfK_S_n$ zk5s%5XhhBt3fsRM8=|T>mC%0d*6UyOJ0YNGG^Lv-?1NCti+E_G;gPg z?g}U^eNDcs6l)qnLQ`x=*}$bXhNhUf@Z={A%hD?u@N`@&*_3Zk7F6PnSbybL8+}DP zNLMnL<+L77kjk5Vr9<+#^g3M4`3-J~JYwtyXCiyaeo%bN!wfAMrj@qGttQUE`|oU! zQ?#Sm5AJCq+pe28NmA`~2aIbC9eXYNzPdyrUA4}aPK|!kYKJ=YrF^wFCVot3LyIk` zL`r=xQ@Rag)3rR%Swh?n@wdZ3^GkLLVKKTbw2TeJ4aO#nOqFhaqcg&xbH!POq@v+- z9d|?67P^m{F8b<`{{yHFsWGzhf#^k20jN%B`xT(KiHteqIiOMGFX_!dFy+ zEyCT%&EP1V$A~lX`izyjTKGf8J%hix3?v@d@A*AHyOvP|n*KRRFA9+{a$yRwa8r62JUG;aRdJ;UUqFpNUNkte;UM9~ z#%ny0B+lYDtKg&Sdttu@cXo3>6&d^UQOUNP#(a2JHYrHkR{W?XvP z9)qAmumYQiXSBG?cJH^(b|~3S0ER||DDEKQc7@6XFQL{Vx4ZQL z@zEF^5=rOq5ODJ1P-pvjU*7qmSW}3fPp~BIpST&5JW852N=kBT6_szx#+y1=f>-K` zfbb#X+@e$1rj<&kl;n^=jaBxV8Q}r>sR0#i!5*qYWJ|r=d0%{pze4aoe8fM^?^bj0 zvEG{7-IP_&pLf$kVI@J;f_{=tiodYvn6q#qFW1|e6%xuEb?A!j8NhfgDNx9o94Je? zwitNLcCf%%Nl^4(soU$1B6aqL^UDkz;fYk!;Ck%V+xxt4!XuOs>vkw*qLsQ=CpnvS zTsX0haXh03*K{YlYNIH=S|kFGKn>=0-t&Jj3+S@S_u-5HQV-Y) z0?@L`uk%(#LwreL__TXy`BTbuo&UClaB@{KgMeIYwdKm(s=3kfHPvmpaZ%PxQnV?= zZWvFIW84#DDXPBg8U~$)iQN zLySLQuQq^RRYpd}QxhMo+^6u)O%1K+aTRvZ{So8-d{b8WVFNtV%z=t#+UvCra}ypT zR|F~A-8zW{%)3NQCYb{@`Ohu@;NxEXjon#C_7Tplu#G-Y4; zTO}-LaVL`lr&G2dFTvOOG4u+ZlauE;U!-05I`zj2(X9g!F*+unl9943Xbm8SeV4|= zJFgA6=J}En3%T8BD0`xh0)`c-340`CL?Z|UOL_-L(m;IN4or>ydW-jWJ%Y0hO6~ae zp*}J=yaNJj!;N>{fFiJfto$4uJrx0{Q)P+YpE8k5K?@a;oA%(*&A`rGZo|2*^!QN$ zhlL+JGC23C5bMabD6iF9Wx7g!PlkgbMil>SU7(@INUaMG>00#p@VTK^2!Cq4y?$w; z6k;FZV@SBA-jvG35!+&OBLiek+so9-WdD~>7_t2B=G zog`iIkB*<6ss_+I8XJqh?w=d3#P6@Y2t+M=Yc*fLxj&T1p)D;J`xDkdJ4-kbc}i>O zU?p4>O7(@*%Mm;vnIYtyhrw8>xSBY**BWO&j1^K`C(bjYS;;I%rz)Y`)c&xhK=OvS zHwtafesxS&pIpBR5<{hsonYS!0|gMBlL_p!`we+3j;=Vxb}%x|%~e?alJ+hhLh!>f$+Ms|9yA z;%HsXj+AS0;!L;7sQCxlmpF3hKdx=1cG|ym6rveCL?vB(+pm9O>tu#m1d0DzuRQLr zeLZYTo*uG%z6*FC!&J+TAv}>JOOPRR*YHl2r_r?j?cmr|Uwmq@Z2W9hx25lkh&*=K zt~l_H<7~VT==YBITu04SsCUG|7vgHLPdO`!2Cdjgn$LiO!7FR5{+@S19{C)0j}PsS z$8C%T9dO>W4HUENbof|G!TYf1sU`u#P_7gdala8PAg6o6_PFATRCm?J@MrAAFq3a* zm2@>&iTt0$XxspmgoV_Vq6IfO>FW4d$nBiY65fL?Xc6Y?sL248&KqpFroyY-<;WF9 zPlkZmZSGYw{Gmnbf-?kr6STBa2q=4Fb8BduOTvCiF-63YqLp&=bT#F+3}zMv-`ohcBhe_-I(sL{UGTT#W0Dcp>$<>cu>Ih136ruXx)3CT6hb8megB{=`oO)U z8k`9o1Cv?U#1o+cRS58v4h8(!Ji$A^#c3V#@)tX{N$On80~Jje>f26Fd){Fdrmb1a zIM^LH7oG==y0~ag4!xN-`*B_whvwz3xj(Z~vrDvC8_?=3an*r^bCJD{V+6WX>#_Co zq`{H*Y2Z^^KfXx=nnWFZXhyHau`HUMHr{;lFR6FaI1}&h_xxD4czEi3`~__!a*nzZ zY2_`B4f;_5k9+>Un~~ism)d)>`zl6RY|9JNX?g6N$h?G3jUjkf^Zif#zMC{9dw#YP z>*=5L-UW2a9pCbdKoVjlNcTG~4za2$5i9?vGxdenlR96!DXz5;-~`ts&{&Nju8&GhE9knb#RbH- zB#&NW)<-W9Dn}+mvQ{w3Mq_G_C7K>IET=nQ7Z;_?(hR_o?hp3LRg7W{h{H?V1GD4R zXSKWE+EKII5YB3Rp*@`cTTnZ&J~KFH4IQR`u{~s-WI6wh*E3i%8KjU9WLByi;_oGv zFy{=5Z-Gvu0qis1@|yDn0Xwp9Z@jj@88Jv-;CJ|g*DK9z67#D{Gi&>Jj~WdBF~TvF zINL4YhNSCY8-CSPRIlO<9_<@B?IA*qf(laFdu(p#05=>KFB7<=cx>zTov|;uu{)|? z99#{nsUB%RZZdvaxBGv46o2{>aah{Wg>lU+^9;cK-2J$W$$h8klF~grcYTZB{4CZ% z+{cj?7yaD>oO+6wo8WBe++SkWC+{b4HT3yd(`MCW~==(9>XIs2ReGqC4|Hr&j3 zO%@72AZvBla5pz&pK|oUCR19lanRKMs49}L^pX(OUr4N6t|h{@VD}V;4)Gw86Eh^h z3-B7x>o;|XB17zXI5u?2z=-)?AV8yO`eib6sz7Y6CJ9B!UXN10y1-vr7cc@9$d~`q z?Mh|N*)<1n$!?-ffV^%_Ua^_eGbDjycH^|n%kXK#=;BCrVK*c zG)IjE+b91|x%$t4l}z3WQrzPBi%E9zYh@GqObduCklrLKkb91=tVH-pShG5R?$cw? zF2;O;uMJ9lZBf6BOS0VVh3=0oLR8D9utbAPjm7!%B*3Ts|7iF4Er>z;qJsTkEo4N-uqTvQSBQ#B@dPRtxS%{%Xu(!G$kjxhZpkwYS|TS4f$6^X;9-F++Hc); zKBBLQ2VQ=9jX}d%=6gJh@_2D*WghRb_k7jrrNP60XN&*6c29UAkEe=>^hL8nhCkV) zRy;j3vt6ci1Al|xXv)%XvZ|?Y(!vq=P-8s5m<}X4b2{L)e39aNwjxW6DpDdO=c_|p zHNfr>@Xr_d*LOJhL-9tn|HGsG1=*GbVI3Yf1Q@7JPHhUmr>34jz)9VB<#Y7ohas0y zHX=X>K7nRg-=(oalDMFFZV&+1w2Xp5!(95Nr2mW^297$O-Dx4@|FkW7C> zd;zL%k379?FoJ28!?Y^NC3OZbn;1;T`7R; zxrf?%0E^_&Xm9s{AMVX5jDTUjr7If8G!9!g=&~!G(d%bo-JCdy+9}>qG znTQOeb3tVbGL)SOBF-xRq7x!_TAA0JWw(9zAzR|jj`p0ahZ8>yg6t{U+8KLRw8hbE z%wEE=`q_EV!mL7(+LfBd^q-Bm-y37W+Z~7w*5Kb4t|^(>uWquF1{OVw*rE-t*r6OeFeXjnwC$#W#Rl;Jqmq^!iy`+c~J@3yLTtI<+dJcvC& zKVwg&zd2;wHJpEy`?S@(Vikhyxjb5pfJhhj`-ia10W-p`!s2sarA0t4bg9HZpmpqs9E0sv`Dnrhd zXxblKufWPz=WtSxS_0}@?bm8A3SC_vyR9jQ7d%o)8jJ|+OI!@#it@4gJ~KzXMP`G3 z@VmZoGv9$$zMGIQ!!CEL-wnq?2{<%=XsJHt2~=Ldq^MAUbLG2@^q6eB|1`-n^gsQ> zzq8A;=IPm9&j9JK+|mkb0xrT1GnuU?$v3@pDOJAv>Jf5q2nyEOB++ahWm z4r1{7Nxvul7@0yX37T@TaZB|aDjK*Psp{e#vkKDLHE!uhlm6tr8If@b=X&2eTE(4( z(9G?=Ru_5lzMe;xR8Q&CnpB=Qi@voByh#Wj9C>}U{AEgKxm`Kj@P!S^9@g59L~S*I z`PpkU{pkPA8c)yI?a5iXPUP>P=LYhFfq{{ZiHkGdQz}uZ6n69p_lH6ZXeG^^r9oGF zOW!XosAKnXJMmSf-fV{L8#SpyE^6BVkpSGG#NmD1584x#2 z6r{4+Vlk4^2P$3I-Ctd~0;*D-2_5J8{f|_1tCu^Iho4T(q3IbLG#NFE_ibO-cWZ>T z?`%jXm+Bn{nZDvwEzpH!IvSJ}YsK$Q1{AOkIxA=cYCs1j47^19lIE}~=n&{_Dydg3 z=*->A*f$DQ-2aeOPMT+ZBOmA|5)+B)?eg{I4WZO?KU=x5fL@Nx6%d8BSA z%>fDxj%eK?W#~WF*fRSh9i}l9%w>H@RLFy2#Zi6hHt8(6Hoh!1n|6E7F#cY`;w2vX z_vOZmVq?D`6q1$YN8Ca(G~)8M$hyQQ%Q68y5*jN`62^4v1{ z-}hfJU-i2T*@Spde8++-`f?=_T6A!k+G|xTM@s*xq_d!`;YwcLmFI{72hK_!tIT<+ z>D%Bd=$7-}a9ECvwNL6m=$~CUj*8i(tjo^@HsX~`sx5Mr(0ZjIY zFS25qeJyVv5#3doAprg6?sC+kt9R%PDzTFLZ-L=e`JTWgS{m)*#jD(j(FyKxn77TG z&yRq?k@%mRZhCxn|BGi$dnPJ~A%%FB7XE|DeDkSQpmU^?G6Yh{bM?YPnH?1=97MnDO^^4&G$Cqq>jcAu*KWCMs6ce zpucqbxE>Lc(qVo$u`-V>vGgPt&~tD&-}@E2dk~vmjkAv82OF9lC8dB|VI)~BzL(S# zZ&qva&}4JtFV#Ft88^335pe4%5J!LNjos)v*hkCYiI0C+74CL& z`sz)-dvZjYmOK)G6^!-=fU^bRtYSakLFYX>IjCwmIJMb*b{zX&-E~yKRGfK?3)ZMM zji#z{MIr9rNIwM>njG_x4C?Kn<#o2(k>rr$U$+jQE$Qw6{!CkO(%@9Sc~ZsBz2b>e z=!i$>Fsbwz6@RHOUT9wHDBsplPmh(XX3|A$UCMT*>$Hhl;G!4uD{X;RGkdRa z^KC|(r`@Ns*?WRbbImz3vlo9V6~JC2WQbqrobvlW;cuUILLQY&0-oo4>k&AMg>iSrdacP6%rlu{U}NUZ1UoEIKR; z=y9aGvLZ_iS)@QQHfi93Yhy0Re+;I|`wuuCNez0rx%JNn+MEWzh9?A$j` z@1i-~#h=YeH@t!&<;To&JwvQ@)@5dq!1m;{5?hFpcQ)9_WqJwwi@>iP-x#d}gD6dG z-&HD1ku0cHe1J}S1RMF&!I-@n$u#O=%Dmc=Pn3$!o3mp5vO|mdh+OK_w0AQYrDOJo zn^~JjFm&v_VOwtr!;}-QXP*J}?ZSRtF?A1&wZ?%5ySkUCY&}jn{;ppn5Mdp} zxN2YiljIvyTBU40ijET2t=CX8Xin+?4F2^?Pg6fhkm4Svx7(D~AsY7aYml$$ES?z7 zwvg$0Le2>KX*N;>4P(&#DsVO@AKgfs9PS;3!8?V7`t_T782Ss>z-79;O2oE}ACO&P zvYIj+wCwfWnEes#_NnX4N7HHWtGSlQ0_S{Wq}+y@Z>w6wwj+e@nCBeZnG$}Dwu52< zC!0wvhvnxR`3c@+44wOA*Q0^nu~xdmuj}2%U&zeWnv0vy9Pj%aFa9#Vp1 z2Xa9YI9u03im*)X^Wh(Wow|a%E%Qp|#MY@AWeXJy_R=`2OB_E7^O2oT+5)R1?=tfF zCx;fXjaU*2iV7VAa!*%Ynv~LMHtQmGe)LH|JNVT9{$Lyptiji!7hJw>ui^;T*z~0| zn@C@haN8|cc*AEG!rG7y`(D0btYn(`$oRPk**Qu!s7Za%kW6gfnHpi;O!t%SaUmjE z0qQOimEB>jO7xzDWOCu5{TzzVg2-O?@vFu9vl#UsA05K*JUXslE7XkM{QE`b*8nnvgdfUQ&yfv${ z9@f)WtV@lZ)ddV&DJr1I@J@`M$i=UPehbfP)$s|b-70Z(i!r~F(Mmgkq4~*7!(?4I zT}_Nlhr^V_q;-#yvK(-=ZXu2a@ai7fBG27ZwF))v=YVWE{k}=$6+t!Q{;H?nrb(5* zX+9FxkA=4-4cP$YeB~9+f0j1<rLleG22D z7I47WK-iC#9$MOUF8OHQOrIvG)L*~WU2ssl9ctVl=SRO5s$AOd851gThIW8c-=o0N zY)^;?e#<2hw6V?g8tiilY__z1zxvAW8{bH2Kas*2yC2H+^R4{H4u3+32bj$SggQ{4 zq0=%w@BDXg=ki?YqWejtinfNJN%z(ASvkN=!GEanJjgrCfuSXUf%g(|MOGlDbeqg@ zp6$i!Bm_ZVgnh8dDrLqy?Qig73Hm{@6j*9qLplI^3q3~=L49t%G$1towU8!{-e@*2V!nf~Jy z9JriM#SszD^walHMuHBl?kf*j9@V;FD5ZFhm^(SPQ845)RVtkag?f9FaUl(dJHJ{j z2Aso?i|{$}w7%rXo`qc!z}K)hwh*{2rowu1OgEU>;G5oD0S!&G!n}j{@Yv6==CNg&QM$X8Och`$sx)oVLu@Ufp79nx#}ly-pQ?lA5I z-Ki5onbVS5CVSKa_$Td5&a>;VF2+;z@FxxMOME%i!P(O~9)|)-BAw_A zp74?$z<|W{udR~52P6hyx0ML2^!6nT%($wacePo(TtuAR0NlA%5Z3Dhh~L=qIdsAu6MvXxjt&XYqwc2N z0`5#}_b;dSLi;s5@R><<>kN8XfOUQsQC={FDzIg#7-f-!t4(R9Bh1FLkpl(fEaoQT zhz?METIZ2#z<69MiH{y6VE*0~O|T^WjQ!<@%^sIw-f3RG3Z?6y zK{i;L`L?mqZt_hnF{x8o5ERbXof8M}w=c}B?*4_$1YB>T#n5qESN%UD8GK~NS&M#= z6|ZNkw$WYV{^H;hpaSHRs!%+0Oh06(sMB;-p?b%o>5Nq9aRBmawBiO~ev`27k)KKw zRY!zyDe%Gqe$cWbInJ=1Rmk2z+wYg-jN$4!pU9qttPQEP7 z=0EfyY3(WD6)ccXDgH^HSF`4m7|w@r%P_3Aj|F;b~L+)RT;0x)v-qpzR}RssyDRt6p0UM z4aIIQUCkkpHLsB0;@+ZKF_Fm5Dn)1OvH!*V7R>H!tbRO)fb*3v&L}yZwRj7kjY^np& zDq^fR2QklD+EaWpd@-GWeWSA21Q)eZ;mEP#9kDS=J8!CO>M?GI4E!k2eP550+Dh5p zRqH0nKJ#!YbUamU&=yn)-*KDE>~V&m3MYCCDb2z#%yLYye+Vz9%orIzn_QI<3DvD} zy{BfF-Bez`=zon<6FZAON+|o|ZV+@)9RjaN$;*$>6}8<6Pwd z`rb!#NF8UO(AY4c>2fs0;ztDi6pA+SPUzdsHwTig*6M(bZMVC^o3jFiGoWYlT3M5inzy;F?laE%?Ep{F?NPX;xfFN zjxt})EGI(&tj7&edd+0}lF(JBw zeZ3-f+@CgAzc@V^a~7;z7#{!qPokL+(3I^GRmZA5mNqYvx{_P$53s5}i?NdvCxgm| zLQKLFpNQ3GWPc&%!O$cMo52Xj$ZW5bhI(>gJc^Y|*M?gsxp3WPM8iPKYFlnyNK@-B zPU?V?qU4uBU#KxMHZz>wkWci~Xb8m?@+Qcmncp+F|K+$;bU6r4mIASWR+Q*+jwxzn z)TW=OavG*&-UvxG=QdJ@SepRJnh-2vxMPV`($o8J+pWcRqxq&|)GG4ioaH-xl?$j{xSVbTf8dKS=M6$;z=0^=79Jz`{y z^eh9Br3STn0CtP4!5u@mQLw3ZzNWW*0_I`>1>>7^LF+mKi=KE4+^-}{+pj$h6WBA% zi!7VCMs=huFy5bDbRKORCi$((Db!jIIzGhlgh%Red@+hkf>gmYx-M7@WaF>8$|lKx zzvKd7AK3UR+oB5$Q*W%s{Lye@lPwF2+g!c(3Cr3==x zQ%HDp7O$_1drfIQ5~Jrv7P!wmUCx_-~xDrVo zp(_ZIOKSFoM`%yMcA}Hc)^F>h4F4!p(mj815=*)#c7Dmg6vxn@8X>U*uy?T%^69-& zXjlb2YzA4$sKlcS;hp7~+tEmjm6-S@A_&E(k!0`KXrO*_vlDMFxHH|KAAjT!!BRF= zT#MIof(kKn7ov6aQe&Fl&e`&Tn35t zywr1>nL}~bN$%JQl}5{!p_GqX9W8zzOWo#oPig3R$!3IaW6r@Qe)ltkNXWP}X;+>k zXUPsZTHS4Tso(D7}+yulR;d+Ji<=US?MSLcBj|j_T58Q6( zdAg%h9|xL5y!5Q-vR^D_XK%3SFq9ElQ?41{sJXTLrW?yX^07C2u_CUJQ$fnehuHJB z2)*JFl`pFN6S<`R!#BUf&sZx8K8MW(M|bo0zdl=2g5;*lNAWksC#3B|HRRJYYB>*Q zCdZMLXn;jARYZ#fS`R$AR@&JJctd-iA6h}bR5sU?X%Jj0zM?hT9l-R34$9}cuknL|flnl`=~YALs}x*nhx6@@1iXMY|Hj#Y4$bb)Jl&Ypp{x&? zBJ+AEi$JNw;x4Epcgx(H+GJi&5Co&W=(^Po=S|o1st(OQ7;Ket>qeIUo<-E-Nr`Yv zt*NM)Cmo{6dRN-J-&kBDM!7m2*7w|GaUjw_*r?sz9}>S(qZ1jb zv{GmL;l0@`enZ}%qS3PON}Cel_>Px4O2(MmNq}z~82`NRz=m`#h1(`zDEcQ@k^)L@%pmI3_Rbkfk{grq3!NsVt7W16~#LxS}9V`HTm)G7<2}iHojwlg4ZtmiYQi5&1;RR!gw)hkfN+| zX)u;Y;bz&Xw`xvQqf2h=89Vb4E_>L&bQN}!pt#DW*e#o%Ls%-+F{Wm^axLOtbY5*# zt7_5UHLlq}+eUao((cf09TPx(?0WJ$0(myo8f0g-0AoBo)}x9%<{Cdyq;S)y1IW4_ z5s5k_z6D05)ljz4;=H$-2;~RlwZ{vtYo5u6@)3Tz(LJ&_Q)>}$iOUC7?JpI$kCdHA ziUd{)iL#u3j#bF#QvXmR-nEfUR1a%>x=GQKk;Ko|N001;=L*&T;AB=E_0#PTsEl^S zOeL7qt|WK$`=f|e^)bf{3X50yRI%)ITg11D=`HgwHog4Y(&dVAwgKTNAF~}TfQ3|WdeQ*T1mCJKN`Qv!;WoAW%`f3bamoc2DtRs0 zluYA&@uI#rwWsrINYhf2cM5YX)FeGSx-O($G(Bw)NFm04_oBjG@mjq@bNr*thR#f! z$z$@U{U9sNcSShY();r1>)cA&^{JDEkaPbM#*nI14z~iR(9G~6rj!266bofp5v6-g zN0)97Ri%fAoVD=UM%j?U~IZE%vM(9#*n$-{~o_Z|80fWLjQ4-eqqfHARGTd zPeJvmO#N|ye*lBs;jL&zNmHKAHGl8!JEY)mK^z*VFXGTZb2rId8iG6rxOX6+-+sKx zYVJWctLo*-v1$CUeEAldkK*{{MgEV*_Sd|p9nDTv5^E#*Cw$YR(KEcF$#x6Qg*v@_ zZm^SKySEAJUqo9Z2P_ri#IG77Ppxjk$GXHf=%~^hfMs`4S-SK%Yk>j>WSE67l0tuY z@8dw~+=;&X$s-58CzA3Or2Q%@kKX5FelmouTdkT1E1fB^5&dY&sLzpg2`iOZdsvg= z+iOZWg>&@9)MlJVM1MeL|9bxF@kyEw%QVm|tBRvXF)7?XBrzUWwC_arM^0^3atW)=A9Om+ zzoYWE?f+&4PvNHw*{>2kU-lpZYPb&+C(&-jrrQ>un0}|IsS?}iD;YfyIZA})6f{Q? zT1-zFv41AW6?;J?42*KttJJVmh)N1e)mu$;`NXK>XbD3X(k(W-afHuVn69NPU8+hQ zc&VEc%>9Z4PTaT=%033>)y@w`;Q zN%)s=#4Xx-J1u~&8C(cpZ9sF54P9YS*RY%Y(Dxe7I z6IAvQM|n2$>4W@X&^wVT%8(QeU`i0>XcqL^XE@Qte3v*%<1=9f$0nxwd)F&^{Em6& z9zv;zyQs<)FZTyD{KjS+`GL95t`$(SExrxiuuZ&<4HkQr5Q#bJ1az=PDQ82_X>ICG z35(}0Hi_L>;q=d?q96n;lpui*I+icLohUbdXx$xR$<;XP9$|R>vGGSNm8JblUYyet zts!VEfjt(`5w%r}9hGP;EpqY>*K!*ETtfoWf*!78gaZRbo+>{BFFXnHpwU(A6{2tGm@-#){zW$-R z{rg`U)|Bnbq2=Yb^yOGZyDbI)8*pH`HIuzdx-t26zPwOkSSf_v#zdk~wN+eiS4Cqm zQ%PZI=T0RwC-6kM+RGzsS1BVDV6pK34H5CfP%XE(s~ESM)7hu|aj0FNR>j7Cya#b$ zLySfJoaPJ5Z-m=?8E`hlc&L^<N1x*ZW=n4+xNw!Q)=u5IZW2uUCWm*8%}-95Ml zcMA@|g1fsD0>Rzg-Q696ySs;hf&Xyr-S_^l>eSQ-L(SQ{W%cUb-In_EQXe`|nL(eX zVTvb|$}=l!aE)7AaaR~h>2fHl!Y>r)5mqK!GN^OnIdy0D&{fhI7#($M2742SYr$J3{<(IWHFbQ0 z3sQsp$xDhA{4rJfyxpM^!!`6+ohwMV;!c9t=)=(^XfwMhnP1T|Yjo4R^;Jj%baVCq z9i>nE*a8fA(&1+BWG*+l*KTJ#$#Vp7Upnd4OHS6fuB!JW(jJOGogzr>dg<$i7pI>?y}Jqty5qfSLa%oqSV6;1Q2+akS_DnY2b z6!H4y{@znzc{(Oj5a2<&cQ)xx6ur(Z>w5rlUj6csYzw-Hsr}q<1JHDEm7Q)b-NW_c zD4rdeFCa*hvMh2piYbGQDB5eN0LxA-YT1uhiJ+X6GE9O-H?mg3{cey_A;CasO|3g1 zJ#vzRwApN0x*>N{zj&bHP#NOdwHi(e0Jb+GZ&lbw*216ceE$zi^waxBcVfiv|7}VKDZ4LdLsx9n+^(mfe zRKo4Jlt_m2-o5&#^|R@R?tE&*&KDXTpcv!FrUZTiK`BF}ZV8cWt`G%NDgWV}KxI5W zd&Cg}qkJZV^%mp65WSE7zRJyfp~p?PEMUhtClpk6wp}GrD#)p!>twp9dz*AKyZ8Kb zzp~77=NQ;Ec-pHf;)lp8ZIY^Ih>Vruo)R&5DMH~EIacJ*8>5IfVsC@)2-^a|RV1)& zPgj&$E_HTgv1K~@(h1h{%kF1`fZTcX%k3OV3_IQyeLANM#Zt5jrRf)6PgB;zm>)=I z1?PaUQLm3CZzGC>AUVmnP){*YakWhd-cj#dY)as^L%o8^pykRe62i~};P-8G0G*5=`Zo>%Paid^bku6fC{qib>E`)aZ0YRFFr` z?hu=`=}yUGasvZFY))Q&i0Ddd`TSow5w6boKR`i$P`@DEeCYUq(?dfdvqpPjFmo0r zUl1s)VdbECo#pczNleJ`<^MxTuPA`dHg7y%+1&lfp=ZcKxAjy%2hfk+?(e001!jvy3GIy? z5)AtZz4F${jCe)F-YSe*5vy0rkS~o|g0UMfh}?QPl!@zgy!DdssfMn@(}AU6M!gQ3 zK&(tZ;s(x(NOw@f&~&PN?VitY!9iUYH>a~VL*GG%zq%2fxZJvvK83H(e6==xRZp#b z!`5$-fy4?atjhTVvqaP4%8kiD{B1@;b_FW_Bq!?Rj(J>H2t8$VOX&zvSYa$W(7vQ} z56J0^Z%V~|6Uii*ds`pQ1Z!XD_#-3gQj--nI|6wULF;RTP>M}xhXp@Y7Hbm3^l}rM z;NH_~-5do3V#uX9NZW~$%IA=w?VSzHC60=sQzH$bCr|qVkNU-nIX#*5odkMs1_HduMY1qegNxP%cVUW?xLrzP zg@6{ChC`36t3#&UYpW$R{B%Z$H1&#=DS!TnS^INT{;pD>zXH%O*=JW4hW^0A)8YbO zN;@jcd3Ms|>-403m=uvRQ-h-2uRx*2)P82*j2F38wcgu2Wuj2k0}faZgQ@UWYt3XyRo0^G&KFrabA3aw^6VN(U{I7bKzj60p%mGlecTfH|J@wfQO5OR8$6Z@WS!q40iN5t3 z#r?eVZH==RJ(S(BFVS9lhcBxkf+4hr4PJSz1=`LZRHpbk zddr+DDS``L>cUglu&5n}7$L#R)Et3qkg&b}@eC7+sf5|L6ciF@udiOGRe}`|cno)B zd%;09*B=@TtLgM;>1gX4&g6;2Hp!#{l(8Z@s6=OnW8E^TB;}nc#2Rz(6&rys ztEdUTE{~FiLZKfJ><1ch(z3cq6Gbbe_Pa#FPM~bm)VL_SM=gOtvb1%i&R(dxQPKM+ zG39KX&nvF7v_%)EiUU6s-rujzM8$|}M!wY5>Sl45;yPtzAGF(=IiQ5DT`h3kSCBu+ zigiX(#yjl&)N9NjCLv{j;A+@U1N@_)btt9x{MGi!OBYiF_Xx{AlzUvc2u?X&CK$={ z%z*??($P%LeeN4DKY}UZM;hmKEzFhb+D7l1JDPX;aFQd`el?=<&*0yL{@H3y1IJbc z2`8Wl_YAffwtYyrAKo_*T$}HFsj^)`eJ&sN5CxqPXIVkD)^Fst?q;TAIew}=XhutI zuk5#Jpo2a^8M%tWYo`BfZv^Tcq?cLQT(4eIDuqBwC9mM5LD9u=^cin)kn62d>MW`% z_Wm#8DSJwxB-E+SO{Oc%!u046MDGFQc^0j)Zw$!-Pyt|Qqm??LEftBZPdn9-}#($fwb0nX;URRUxH2D4~g83?xy1lM)Yl@E8XY!6) zGPCF!`l#MwVj5XyA7*jyWmQB{t4U3Pu34|YOi(e5dsr!_5G}}gM^Ie6p3dn^x%9@j z4-GA$vs>57zwXWh4pm17iru~9OIGaDyK#>Av zfH5JG393}4d!m@6tRQdS+@%q<>c4vd90;{+q4`kk#q7?&pzzGHruDsJ3D^~*M(A|6 zN4~A+APgDZaZ@}ir=jx#KwFqID-2%fTC2W4`6(R2yNUP&{zOY{;?%>O@mkDNT+|-d zlPNOFFYTwTvooS$uuAorEwAw_1(##n1XIqTN%hv<9?YRGRGq zQo#^Sa@!ea9L;AKjU~(va+w^zrEl-G z3R5BKL+_9b^KIA!q)H??ko#8IFeOWyrmlnKDwnmVG&DD^Kd#>uRM%eev7A^F1^#Gwacy?6I`cq9r?2Y|WwXE-I|P1c z+OXN{(E)vYInO1kA)C@+wA~AE*o1SMimt$6s3gL*u@yz0HGmh_Sv$bS{bUwmHknHw zjaDk_pWl!BZF2QyN`%2dK-0)#K>Y?dYaZ86c}1{h8Nf7n(xe#HXS3uo`$fC?d#X#+ z@tE??cepc4N|dx;^Hh}C7lwmnsPWTD8wxAs+N>k7y7S>UefECepIf0~pP1p;crC$5&lC`eh8g z9vkm|MV|aRXuVq%hsh+(Ovpy%`29+Z*R?gfTfWQodd&7(wjeXWKN?BMm7?`-mo(Dt zp`D_Lz4;W@7TLow`rG zEr(FIhN_x}I+ylSoXc!Y`I?OKXnoV5NFqdQd!wnp&FjC17-smOG^F%9e5APVZz3I( z!g~M52OVV4gnABx!X5B`j@ybR*9*nC0Uec3hq~YhxeD0eLt``fxD91C!Td2A|US%BU&u^iwvrw=(s{8n))VI{ih*|t}jF* z-BM(`?LCaf(=W5U(kk7=x+Kz_56Y*(xOi9_cA8oXk_)CP$e9<|m#nCio28D{&o*s3 z@;!O=hZJnMBJUrfYwQ{_evY~Ceu%Zq!U-*1Fv8ddffk}Gv9q^4`eID)>}q7PQzwdF zYa1YyV_aeEw-H+&-hE|)S-fFtPJ?xId%|b0pwGq!MdIIoE0pVuUX8>Xhi)xRibNH2 zWKq6gPtwUq;}j)Q2Brv;U0ySPfG5&zS}ez~V!ba*r&+E^=i6OtR&nzti4I+xU9gJu zY+VrB?mRq2o^;5U_F(THO1d1}%zI7|33yHxFV|vA+2o9^n^dvl7iVuf(tLi}Z+u1B zjrwucIZE3lBnNYsgsV~eYFtEtwegH{(M zMN*ztD+3-KhOAfGY)y(kJ|z^Gt6ZV4C>;iy4q&DZC1BEw6F$YcdFoUuB9I?N$Cz@} zhw9vf&01cx=ttE%=4BfV@5ak)-Q>;4tCb|$ztMKIH$kcNwIZ-JY%E?)$<9Sal+Gl1 z<6}TTu%t*ZKs#kU8%bRctZ<5>0%aK>TfGFsr-hYim@xARsBq zHw%mX<25>}S-znhp2ykz&LS>zN55{Qqi=_9L_Kg()jzv{I@t24S)sqO&ek3!RvWD$ zb3IiOd;}jNe%zgMDwt2zt%6V%napXAI+A4%Khi(cl&vEe$c|N*j7dHji^6b@XnyM@ zu}K`JHt(+5tL`wVs<@eLs+3@CKU%4MklT)sjrEKv=Ejk9@RTr~&v}=+kG}Bv*K@@T zcT-Ubm|G2I?U_J#nyq`>|8#sJ^1#0LCmpD8UVfF#Q>`hL&iz8Q>0v9N#`y}eoJ$#f z`S+L>w)+`7i~f|H{nFbj31B)X>c+*wI9RAR*SuG^e@jEHzq0WbOdEYB*)w>gJ)ATZb3n;DuoyZa*N$=2T}qvst%F zE_;GpSGrvv&NUPi;b%E0#rvo7T=<31tNZksaa-PG%b)e_W0Rr;Pf}i?*U0A9t9{_> z*)G~VpYqLtlC}~&IJpRd{_2AD~8$RVa~ESmPF+o zu|u1cQ{<48XiqJeCZ^g@^8iK)hI(O&wXbTlSeV@XCkgvuowF1h*lp+?I4<&JQfudC zWclXdn#R&NbE<^3d6tu@mZZY-PTy6pYN{B2xB0A48_shUgLPFTg-zlFaqnx^lDgsC z=Ml_XI9F!Var>Br!pps1v_y z();+k`+Q`E-iWM!Dn9>NXrWw7pna9uM@R4CP?_cn-U_Xhh=k16+N%%YrsZo{k>%j3 zzN2AkRNXWQIrnDYQ1@W#oxY7!ru`H3qXVz@vMWh?8Ep~p^F3=sI?In$6RWD+78nHz zk7lhKb!E}E^Yf7?uPOH@4-hY$Mf&y{!Kbt5__@>Ae)avN;FCW73)B;i0?+dH&g>W^ z0=tt3&5YF_(D8WVq9MmeFZMEDdz{Wps-$f^HB{yC*(-f+R!k31b#x4358=jB7OS0s zP{+Me5AGuSmmzVNL|)9~$)Pyl$4jr|(EKMCaIptKJo(ON1Sxb?OTg&oBnMuYJy4de&*W>UuM#;@k)bzm(0OJN5VmoM$ ze|$9>MxMu*FegBc&^mBHNI- zVXVOGXZ1KuwXFHZbY{0})Z{1>*$W!k#;H?Mj6~kY0F*IF`GH01>ABDN%yH?>^Lgbr z*!{Ngtj8A3n@(Pu<)?1`uUDkh21=KzAML1#ZL>ov#Ouu2lTyc_4VqnbdiJ7Hxuf&e zcq11NcK6Py zo(kjH)N5-{H)W(H$F6Km_u3MDX7klji&a~VBquX{k!TpInqYr(=#stnmL`k20@$u5 zb8n~^+_Bbo%vR6lQHz81PHq8v+up9sP2T|Xmnr>^$WAc8{(LT4VCWB)|7)BuV0eYG zh`uz;-;Bju{S>)Uc^39-#xMV{0Rt5g>Fv&diHt59Bfo)$JSv8fA<3t3$+&^mD?vSd zm6jbR)*Gm3ujQ{oK^1vbiHxQaRj#}8ZJ5@6-P=^1<>AM(?pm-=TAmFs@6{&QyP)QD z{aV`7wh%-4wBC*4b54|a8p8b+$V&EjKJ;m5$^Y3pPyPCeLQDeBew>}fo1|z=t#%mG za+D=6L@ZH6eMNm@F+Dqtj_GSPkINH=CET9%2kZGPFcuDnrGN!(Q(C3;c-X)p1DvtF zMUvW0nYQ(an#J;%TC)hWYTL0p#)2fY(8{-WlKV6Hd6TL6*I6z&x6|A-i`AlbKbjJ7 zl^at*=VMrLV^5eOr7kq$9EX`_1?<+tr_@xLs{}`|`m;zsdnQp#%9Xp=?zJDKS!Yig zYNByCBc|(9zMSgEIDBs~wWN07#zr*6nH71|YNrT#vwFo!Rv9guMWuOyHhkNoT@_oZ zYzj(m-d~mbFo}%O#n(Sc>hL|C*2QsPZbDSPc%P8~Y>w4xcSG4qwQw)}IRZ2w?mw0r zl_ojg&O0yFV_MS2o*zx;yrEiZr6~uk!JP&>6;v)BtUxy&TFE=w9h!*-(;%RT2pi|(`s zPT0YzqFeG z`bb6{sG!-W;JU;ybc`f7+D!U6Sw~`~Oc&LBmRUZmB59F05M?@xlDy4xb2i=UDRI?h z4==UJ9n z zcTprfh$a|6`KC^eInCS!Ift(&M)mm$EJ=HrgC;8CKgq4C45JivxY;_}BK$M%WIDpQ zpSg1}Z}n-l`v-^<8NdCeK6`)bC^9b(c6Wc+)q^vX!Lywxs|pCY%;qsOmBt)UFFb@B z?$TAmihs@NoHBzb_8j@_tauAQqz=bGMmT!c2cFs8sm_2S2W#w$Ln{iEW5ki3z^{Ph`ErV4R-=W<|z2 z(fKn;As=Z))9>&XMbp8cmP!~GBvU15NvWa2b#d68W9lsM(xut_qSj&+P0$?J zHd;k`O)ji$<&z!Xf&}yGq9}h`Lt>k|JFmyLH4V^ZCH(akoTlkqGq>pmyPMhQ*;}(c zU?LoMj+vZ!Z$(CicUc?Ae2W+Hd=UH37?0?a02*V$w<$D^s*@WEngU;C8A4W~!1^_O z-ILp;xn>bkipgyE1=fCtTtU7bo4zJu--q?ZY>6yL6(cN}5rTE=;jwIymfAXcoYjxr z#m-8qYgw<58(XjEivp_W0u76AROxQYyhd$TOydwC(>=ZBOUUiPuq)S!4v(&rDwdi~ z%HH9kdT+zWvn1u$;NA7V`#HC5wy|`U@CcXfoOGXoOnr*?4u=CYThGOnW~ieM zHkHdhuxtdEwD|*L?ntLAB7YEDwzl1H96SoLOjVyo9Uagj#1(wMnA{yrId&#L{-U2n z{}7PfqjB;T!-UT5nuV)P)$WMwq=nX6o_;ddq53E>x(jM-QO)AEdq~S(_CutpoC%`Y z-fUdzc*?U5V=U-g3rmJLAq-~Cd>(%-BC;ArFk({w29*Cz*wTw6?IBbcuUq$h@C6ZQ zeyJY+!B*)P%<5pL`5NtttOYNWV#uZCWTblW{J}DI%$zF8C9_d&ujcWbyM|lEofh?)ubSd3OYgIcKkkZ)XYzgijR@sK`dL9K&%gX_9lB}P z{HWA7)6wIv8qfViTotX}L+VUvariR&tnS}DW|dVZX9^V^oXF5ZgV$1mf9KtWkN`1W zD>NxfQt-t12HGtE=UCe0q|9Tmt*zJwaJdWV4HNUp@Y4y9ka9mMAy*S#&iPE=Ctp7c#rdmgyuGpwu>1c)trz}!qSZkne zNELHJ++dAdy5jmpE|yYdGmIWpUmhdOB>#M|U`Tl1sk`Cy`p}5PJ+w%;zyU+d4n1=w zof{!B32TzmudVsS85H_DFw6y4G|a$ujTSeCdL-z4NUfZ4hpcPuH-#s02&DbYm+(FtPR#N8BXsRmBFU1>upKCy zpKU6ptLU{FgCW_1K-KQ@6bZzjH@m_Q6dKJK)-ToO3mVx-3dJ-0se$#Eb-**MG zGC9I|zTvT5&A?vUyB?TgRyirFdSGlR?u=WCBykZd89IG3B-#Nv>o|=`6YFlS7A;m* zFN**!0RhhPFDwlK!=eLaay8+m5!fC(&R-d~f%XUgN+=HP?zMJco70!2x0qc;}qjojm*C=yKaV-Q+`~emB{E!6@Nip0p*we?c?r!#V)-k*1(ejn4>TkZTMt4hink#c9iXvr%3gy$zf z12%o7*Lj7!QM>lFq#;KdRfvAWq)B+E%&hdtW7w5z(%hpH}Wp zbghgAsgBJsI|O@o8#sN+o`ub}43sizJXi9;a}X+NiXH%D*ulj`+;PwtsDBsOi*>%TBKk|`-P->z+F@T@7Z z&JPG!hCkiQ2PALr+1KA8J^+;TS2Dt)*Ra*>Bhy9h0#uqAJFa845RmyW1&Bu12ygBA z0jKFAOz&CMV?Qv$~K#|PW5zU5!DA;UYG11W|BSu7)cih+8PL8 zFd0IECe!Ygf)PXe+bSUNEefp_Gn|A7#Bw-E{M@XtJ4Ua3i8+pg{VAx`emp-+odYu) z)`Uo)?C5>N6(2JH;bR1rx8w?>fnC@pV-NYQ&mS z{%vTHiwDNvVd|?@&NfU~V!pc2>S9)iYCBXvZzh+(vx8_XJcc$;YV0l8w3&)XYD;s) z=Os>wyFB;9a<3UELe!2bt$#b?Q{+DzV=(=>$7fAEI2UE4+bNZJ3oh5(XDWp4IHi+) z7F&Y)wuR&SPpq{Um*d5(olmFT_lwn_Z%?s;6+Ojto?gYUBxL~Q^miPk45X4uL*EYx z0{Mdf$}26wEDzH1nwy=_@p+9X1S%rIut0`D^Mp+^7>IocZ;g<}^K5**|D=P%%CSh0(=3Y52oV|g=}_DZsaJHaLJRE9nD<_ zuz$xCgqyQcx(^YgLgoF2NBh|vw9{}}HeY-Bf}N#$N=Us3n|FHBG|==Hp77p4%_1W) zT3Cua%+>~35F&{~CEwy_!s&v37*}(Q>(Sdhy^+lgWXi8{_Oc6Q0YUm`Ai>1k)oX;q zD8Jm@pO*l#x3_Ypz?+xX%Muy44^)kIxaaR)UfM$~21S8F_V$ZM6kg{O>TyHfBkg~Y zb?J*=OJ@r)rhAxD^M4_%WuW7q8lr%aARj3xg2<(R>YcK4FYDdId` z6!|3?8+USsR80q56Ccr`7leo);F?(I!Q^Yf+_ySzf z_V=?-DY7h%bv7&ZhtnPq*0W3TL!4abK%%h&A2?;YgNCdsy{GHQHJZk&X>CtyLTpzR&ACfA zDmxx(J;iX>PziWaztYcmo>Dvdf2Rgr*3xb|UD-P4zYh(Iv`-widHb>%=HwN6Tb^7G zjk^1d=_>O25{aJOG-J5q5?}*~b|in2)iqxTJZ)cZdMKjbPrDYFl&}6aK@dKNFe#%S zN3hRIR$9@g-jCQX+A`>#nIW18 zQ`Rcc+uT=jKHTxkE;0+dTP7jNAU@~vYjf7YiTE3!lITMLw*>ShZViIlKaKh@<>jR< z6?is`Q9U1JVznG`0(VJMA;&}L)!xy;icfc&i3s_n9gKXYHYbn=* zsf`&fT0#0QWI%*Op?Yx_&Vba$WVbF&Zk7W2jy1lMEk=GRo8yj>BFc{~30cc)gl1N2 zq55jfr>+=0N-mUStOG3@=ogy-8Itk)`i}ZwCw+cm18TCE>cPj3{adUTLE|iNn{$ai zy72J=uiSxl&~^bn|6sOXLJ%NGO@=Jw7vBSe3kjq4svmW-FU5Y(itCB z?^6{&7|hy4ZnKz8kv0J+ETUHfA81TNN(lj(4xLB)hhX9q77a zb(u3cw^)qw=UHbmiz!uL(Y_9g}df=%AHN$ zDnD}BTn+y!uR!(o%&oIX4upG|0L^Gy|?be<^toi}aUq`FA*7^%zQSttrXvharDb4s#| zNYKa{+MoI*W!7SZk!yOFX2<^KQ$k!7OzOf3635w!D ziHW_!EmT>BC{m3nlFoAxDaJOSXhnql&($i&6~as{v5Hxf_iSdc_Iz;aA|s3WGKyPd z+kZHBHYkzDN3iVzf~(i9C`P$eW=oa=fZH3_C1kB_-`#j24ze3=!y(_yk(9{Q22!rg++>%tVv`b(Z4zdkN$4ALa8F2uAVt zBowAVumUuW1s@8jf-SAFPh2pg5~%~u)FAO-0^bC?`I$C9vn|kePTgd({TXa+9px@V2O8ozGsxL- z^>R?U%%$<>;H9#c@V5II627ZLMz|zgC#nF-2|&TGiwQ;-gyGmu>c7SCvxSO3QXjoe z`4=>MMFs&Dc$KR59uBa;8vul^-54PgeaVCi0>Y^Vn<+$?hUbwwecy(QfQU$+&hXSz zp(By6uojt3$wRZO${Ob*&;c9wBSF3~e#pf_@`)!d+rd;UK04mEZ8K>m#=V!dRGKs| z?Pzv3p$UEdep*FN*Fr6$>Fd_lp~2z2lt({%jx&fI4BSgrm3wa$;>ML;v#|1aFP_#k z8rt=|z;ka#STuq4n11GJ>iIKJPe3J70f9m}$S+O3+7jY8darbpEAbT#?D-r*L@~3} z6Qd#rWDqolkPE{xR!IG_P>@BY2Xfl(H%Zh%d|7h)|}%k#d(T^>i% zBM+{Px1wF3E1!G4_?ka2U}-W4Orxhm1z#`$>5BsVmw6S6=r}k&mrAKB`o>c#adqj5 zCl=)Jbv2BO5V9~Iwrs6n`W1d$el$u8$>R~y z>@G{ZqN=QC;-fqr4^4P|7r8Lfb(k8M8rBXOK}^J7UdWylE+m_hxW)F-r6w-j zMNEw*QBw&hi`h|97UUb>^%=MW2o>#f*UbM$NNa1zKIA)8~Qz@Dyb<*23>|qM56e#OPV>~ z6lgj8ARr+f|E@5oWX-fpl7iqGIXhnlRyOmZ$iGP;MY&dzg@lv33q4;3LO%X!;GbwcUMBKH7_9gIi=WpCzxo8u=7r_K2T$!aj4NLI# z1iUDoEY#j*1%F&7MeV7`r?ZDd;w%Fk3fP43rkVopC+Bswp@kx-&JdW33aRC*oxn%< z9K3vv#$WC|oJr2CG{VIFq2utKL&HOAJs|c)PR*x-yao5CXG+=E==nr9Uh0ZY-NC$Yn{dtP}YD*E!RSDeFy%2>#3$TPYkN8XTia8D1| zRn{=fFUATVFYD93&%83ct>vV+dF%$kb{s3WdaiD+mTi-I8TkIo1y{fhwCA2ya9nVt z53kwl{V88`goIwEK`(y=fIbMIDSy&vA2&wGqCHJMQ3@&Xj~shhAGG(9OAcmhuDTV_ zQ7Zh)Haa8H4n(F5j~5`NH1Hn?8_2sxBWBEh8-fW7&x?r~$@AY${p3IcKXkU7YH?8S z6`pa4a#C%E>^0bf`|W@*-K4z?eT@kek^RK(DL`!K%_*1BQWA&3>%n)T{ph!P50?@H zJ(CekRW`l@HnGj6& z7=TL=TBzXrk1b`w1(l*xr(A7*i2&lRLAVnw@X2g_#z6h)@;W-21CIM44LjI~`>N9aCg1!|| z#qD&EX}7A};VDm7qSqk>9ap5DuCeXAFc}LG68tm~dDud_Z{-(_3IH&yes)1aKc45K zj;0sH>os!>j*c#fZ?`!ldlK!VP|UA>610qL#mzE>L=htYa!AYHE_=l1<}Y~}ck9WC zsQ*6yb3|Y04VI8a1(zz$KB&ZM08~rOaaEKej&@YG$17~vuU)F!&Da5rP?jzyrTfS~ zwAL(fcHlOaGE(>wlp-Hd1f$MC%*i1gWLo^Kus#r|1M9cr7uQ6tQ1`@q|3@YA5{y8H zIgTXDKfxzZL`q1w7Op$~k*Lm0itF&=9t|1w>|Ou5@N1u`E}kTZe!^8x$7VEf|FXyw z(pVo&npiybp~7YkmTiKy7~yYzuhRu?4;NVOVqlv{yuRfjQlQ_;w69lX3)iLe6}JB- zo6LB-{K(jA;ux){7K}2m)arx5=YSxgWEMrcxuqui_Qn^9^NUB|fbeYJ=$jfoFxD+) z*)?;1K}E3mp;z~V#*Dp}l`!4|3>yAoHrR7)saDqddM%|KV zZ?^1j+6=HRRi(6lF2SF)(%+BElIv`aG=eD>UPGfu@MB-JbVzX4mzs z-ewmK+>=u4XPpRZ-M-G8M59mJ#3bp#GCBFT{B|h^tP%pP&27`qhG+k1Sj*@glxdS zSiZdwZvWiRpHDu2fXYM@$)MZn<^?{Ai0WA2*-nesFWbT_h?!3dyY($4;kV^K>7c)GTJEvP$@+d(wR_eG9(UX-&mHlhc)NgS zZ%u|Y*tHq!i_6tu9I1t(KVl_ox6@<9<^y(#QB<>6;DQJ5-H{^q>A{By2;1Yb;@A+M zJf4tH(GGp@9Hhrrk1{8){Ai!4@6lwVaP0Wee?Fg|vd<>hBVD6%_3VmctoCO?@Q!Sh9AeV;OKfkgL}64({r9Wr2YVhZZ4xmb2F zvCY`o4?&wICHdKwab^xPusne4BE2iUJ0^1r!JS>`w21r6KD6)To9ymK4@pR9M`L%w znCPbd%8Io_u6|6~`|(}n%>iP-bmJ6K;ZN3U8sX);pr3vYq?gw9a8J2+n1fjLyoi;a z*9*2AjL>sYaD|;@|NA>l-2rOd(%mi+`9Cq;p9VyL5~)GJNrBj{OUTdLsB8xJ&EUh) zwWlV5TVh)8WG*y~UC!><>(fxohc_1_oR*Ub#X2fLrOQ#Q;0o zxPEz~n0nOk5A6JB;n$G>stgY*!U_OwxFnx-JF=vj?8YCF|k_@ypoA^$1D3Ne#-JQR-;kgiF~!w9*RkfNfrNl<9AMb zzYqpd;xYMzEBu^+U&8Fap$Tw-rKhz!Wn>q{Lz}PQyTLLy>_<{he;OHL)GCZp8o~ZX zPR8Q2b$b2uK6_*S!1=HgM7XnZ>IxwkKB_Ksx8fpg9kG@Hl)Zaj+vxT#o5f<2$+#NAs3R;Zs zaP{%)w`-uc+^FFKp}cJtQN1CWINT4rSu#2`MDVjDF5;3(5kF;!)(Gcd|MisN6$Co~ zXoH*doG*FJG@P${59s|RG<36DLb;sR1tpQ;gQC^t9sUd)l#elP04tvqZ1~Ox$8Si> z6^iFwI-GpTHMt^14{1&!uBB2f@6LUH|$7iSmdK%(brnjlbHuL*sWzdFU>WR!zW_AVX&LdHG+4Rp7Sq>(8HNCsW9mv z12@}yfTZ?>%G8i)!9VpZh`-)V0Ab_2~hA1#affgX#hYnrv!%8D{0a*UO15&D*o5i3Q;b0#ARx0mm>)wU zMgH0idVl-Ap}^|{U{nuGW9mr0pcLFsFoRnGbR$m4Rrt{eK`G{B2d~@$X{8ecOZ?bm ze^Pa$m_W3S|G!%R;R69=zgPwICTaflYyYxIsW9QY&4$Uwl4l-YQ?WZ_Iw|yiawke9 zDpJUZc#Q6c6(q{~)@RcXrxZ5UWS?l%Aa^@F%N>eQM|hV&s>9)b zuO+e|RRi2R%_ZH*ywTCLu4HRN7RDk+_L_5OuzbuCxRv0>SD0{^hHXwbN6_Z=5e3Y| z$^I1CQGaA+#-E4U{!4yCsD6ipI?eQ_u_1NWULYVjQpf$|E#R8IrDl_o;a0ROPj9nd0O|$V z1~!!i*3+lC7lAU&5>$Yl7H(0emF8J7FOE zPVKE-y2O;V>se*msVxjERAA%x#yC!g#j$zrudKT$ykQ0;6ng4#d#{n-&Y2)>G}l^u zD3$XotX0UwZpgCn!UJ*m1psyOIo`tgj?|a9!uWRr9oK;z}*xvjrsQ(?^<$#8|No5;2ZI{*=PGk85z*iWU# zoCPa>A)KvozO(wuaCcJO#ZiEIp(*jsy`BArH7h9}LvnEDsW>$4;vqO~pCEiVden;Z zE9B>NxW5itK=a7!$B05~UW5(y{gg8G{%su}HqEG7Zst5si3r&;7hSzhY%d~(J-;`j zJU%ocb_Nn^V+4|BQl=6Z4+$Z^0R%1~ChPXPnV90iUtI}=VeWCi(vw47?h<1SEGc!a> ze&WLU^p?YN3H$B5W0CHS6#vZ?YfGu%LjwtnTT~lsZ<_)!#h$@_L#rh}TfVPS$leMyQb*jYd-FRqFlYvG z?If~g^&=wG37qw9#WjFPYegVdFjyS8+_taN99#CKaC0V@`tbOb7I-h?iiGoQxE=;R zOS5`Vp@dDST7I?cqC3^A;xZJd?X_y1UvSv%{=$&h9xC)z5@ol~NXKO!a0|f zcC8*J!eX9MZ%ZE609Xw3(W^VpN)rId4S4cy-Mz!9m4WUDk9_YQoZZ&6cc%Qyrn54z z%Gb|5_DlE}eOTZAY0sZSf)j!aoO?ffzxA)^7p#1nCC*X{nZe~j=ym=AvvTEv+s+3zxL^OcsM*$4!{RwQ z$}fXW>wFw@`LNlTQ#{C;?XwPX z$PEQ967^ude}l-+iIU8;fv0STn)tGR-k;ia zd(G{>jlW%5Tz*6TuU!Ctok<%lCTMyxY?>iL*=a>RV8Vq5BUL(8IaZiZOF!HyfwHPi z4Tok+zMrb39{=a(P?vN1&TN>e<;`!pvWLCYx4co0B?O_d&2O|C!bFqSlD`%b2}5~| zK&yxu)`2h26klV%MQ!4km8p|_8rwu|_H5hg@}69oFxZ^ryz#`i!sEnbO@B{8O=I`$ zwmA)wIoLVy#T}D?G`xPs?${VpIaRN?%*lRbBnv(={>Y*6rh^2vdUNJiA zlGt`4@7=^oJae(4Xw9zZC}Ui4M|=8R5&QO)2;#TFx)+)b-5BdSW(JwM{S?pH#-gv_Q|U&4xt2%<2MJs4lr+zIngE1e@Ff6m$d6*aFBfh0A67C<<* ze~XU4!%UkeCQ*rY&O1&2m-2Iz@^8O9GXJQYYD5ylFS!6%Au@HpsomC|=Zf6m}U;?;uOi-pg45bWniT^G|DpUei(Q*{)Lv&#zKkDBp#-_4%!@z z6;>%dc*_ng#3$nZKMHIk%(W-?YMUuhe|OdV{ePmt1fyOqj~QKO%TK}9TysC2^{|{3 z0L%Lv<@ta-?O~ha2#grZiolxd@Ypea_Y-b0d(dNG#ZWIbj;4%z*)K|Gyx1@SlqM)q zo$*u{wlNb~RQ6fYL>Z_l#?Gk|O_p!&4oozfjQyS;W+ zoljn2GSArG#5WiiDiVS(j_e|$)C<-+E}xGo+KHzTUIAP;0m(W|9za~yHfY|S^&-6nRdTI;%c?&nKAgv)XUUcll4C!3e8%4 z*mV6LJ>vKTf$eCgooh_J!-^NZn4L$h`Y~MC`by0lKw@hQA|s024t(8qejs_PlSVNvD>AhD5oU z%P*$gUnZG@o4auSur&ufzL1AzFDUh6ASRy*37wZvC60h_d8@&P&w}+@d~%8U%6->8 zDVz^``0rQ;)YhKNgu}e{k7x61GoL9)?`Aw#oz=ZHL?WSoxyLjsz5-_BX4h4Hb#cZ@ z;Xfrv<PB$y;055YJ(uyrdzUt(r`N= z>so_Vs)Lkiz01U4?xWAYBt0#+iB2VnpA6&=KnGmtz#kX9U3-+O@=QsKa>KNv;VykO z?&hmeee|Tzqxvm=rB{>9)dLB{k2VhUD{>fzO|}_Llh_zf-`mZw2OT!3-yv`U4J=Pm zCVK1k3qILW1U`K*_U5ts(r{`w8*D$V(01;Zpz5-2v4*p` zDD?6tjrM$wwBMx9e%wviPD~`a7Ps-*@x>TGa z0H;z}FV7<2MZj!hJH*uu^bwthP{TsNr+i>*s&sMKWw%N%5O1OV>(v%T7tgwgyo-XA;6?125V7w3%m=~h0i2&!mN#UH3a&NON0s9KI+Voj=s9l z@H|0{uyPjx2mmgM&#Hu;Uk)yk{4ZK72;9mRp3T#Yto=23h zrzUcd%nkznP{2=GRcgG*b8aatWS`8RgHXGXt`T5xMqr*oZ{Bs*c?Rgv-#UeNC%SHphKqhpC$-&f?TKRdkRH>f`8u?>&LwU zbziNlD{l%{=cOlqBa+XF!LPV03}1ni#gGk_L+~U%Y-K@i>JC=ra zw-v{rc~4Rfj$Y-xbyy#BTnO}FmuEJ<>RN$CJ!Ttfs-@)&*uOHc63`?r**s1pB^fC> z-pw&{+)%68BbRMl3(PiiT^;33v5&dARAxUnX7kq-S!ZFFrCS?oh8I7Wo2a+Zka0Jz z6~e3W8i}~NObY`R;W+d~5v$~|CnKMylUnw~~OU}3iwj>tWpt#E0l5rHRd z&r{TMF9OnZHG7`!=D|#RiVo9jB(!|YQV$Mzp#~ZH4iY{iI*j$>=0K#F0%U=TzPgKF z5g}faL+^Hqm`O*1(01tT1@W*|dGV><8B0 z4TpdOaAWL9Z+<;_5D}&74oyk(KDRfq^>Cgyj!&E@V9r0LFoF&fiR~Ys#}okG%fiMQ zU3WB;U7d4bzZVw?)CTHvHZAf{IkUXmhGb%H7&~T1HJ+KcbICtF7vt*G%d4&^MDJ)| z1J(F=eAC;pw?s5pS)!A0#w~}xuk9UAx|Q+%pokFvGic_|ueM`YX`w|0y26z~UGAVK zn|CSD6Rr`%+X^n^I{;i@icnK3#Hb5Y;$cmlh4S>07ckwa_JRyS36*6Pe4Tfm=@y7RQk4vjlkh{))Ldg)^EAxWYof0eUCXc^(V18 ziRID^v5eZ{iUSO3p5lh~f`r|zUb{n~={L@UQJWZ1sLe$sH}8P6^~CRDZ3o!cL3{FI zDTKIq_X7h zJyX4sa04`9#g^)>nSg4)(gSIerV_J+m6m+%=)&@T$~B$a_b`|8?B}X?{BJb41|^(w z{F(!x!Z>_;_jq)f0R^IvTyhjZ1v^!j(f;s zsmKk>k#sXHK)X}yp!o<4&%flf*1TxwjU7D@EElY09;TW9K@OXuV@Xnot|}hX4bf@N z*r#b$k#M~H&R89xPi%CY_nM<1HbNayYS(j<-A2Ri3n3>hDUH_!?^L>biGaAdneFNaTe&f79AKJ_?<1Y1N z#I6B6P+rfzk0CWHy<`~BA}svx@F(>+>m5Oq-4@b=%T_#9go~Yv0jc+mzDa3Ey)xn2)d_ijH_DP^xx4b` z9UfEjv&#HQ-Mx9)xgg-hOSDc(s8!_uHAa<4h6z+^lo_aUv8U=k^rtq&p8^VyC7XRE`m z6$t%Bj4I!~w4~660Tbr zY_&<;Qo<_#^5m_gMnej}PWIQ@MREY64udM#aHed){^J=d7F0eB4K^xZ3YqyO;t$+x zb}mQZ46o{NcW|B!Llmt91OF+#~6r zetx-A^mB;|biR)+U}`Wqc~~oasRZ2tb9i$ch9Ojv48El-tP5$!&8gjWV*_*<6Ve;B z{F-lZX&fMtH-7DSC7k*Rj<3<0PHd(8phdugQtD}a_tYohk1#tI_e4R&MRNP_T}CRG zu}qeh-;veeK6wO_8+pg_Ui&una*IF@2E}_K+-`onH^I7*Z4LUAkgy?Nlgl?0FNfeH zO#AjS?$7w`rqoTimo`e2&CVqHOi^iUkv3B93+1S-rjxIMNp*7NQL5iJN?+}$H%R%( z?HCxHIx0b+QepGIHF>}Np!bSWRCyX z&7*#t+)LBA!G|@GbiEX*_L9Rv7NOCGU6w3PP3|ZgVqP{}Vs06rC);h~)3s+G^O>r| ztn8<7iO7S#6yhIVuxE$FMxLe8c)RsoL?lLQ-nzj}9cMhjNw()WQ$yJUQiWPVk?qox zRK^n75x9ZB`GddWXD+ZC$|3jT%e(*bYhzVOa5EJlH4-FWH|X6nC%AwR@|qG+THdKC zeHvcLLU>0Omb4xIyl5)LHl%e?6eKYj!zr(x2VW`Ivj!?q`Fc^rPl4ItB3@3*8NC_* zQEHoHfHf(fR{v&toLKZ_;pGk(8r)T)m!ROzKhv^0ZqR91h3`)&KT;OGVQ-foXVw$W@g# z40q5~6{upE#HB_#Tupyv9_A_3Dq~94XZ{p!F+l*QAElmHY8_8de%kL;ZPo@m74@O5*mVU9PHbE%6z zbtM?jV7|L;L!Dq48Nbe*4RjW<7VCQ1cVmtpF`GJ-X<4^u%FWqGHGaXY54<~1hxhN& zM*v`T8^Cf)_x|bNeBzicXCD|FvM5#QZDk*+L<l;lD*S&L2KNLAavkxLjndc9YRuHMu1R9Yw+F z@zw%oYG8j$Aa`%y)Ee=|AV$lOO!?L1+u4}b013DZuli;hjJ={C$HMc6Y|z>!nKt(;x(jLxFzWmVUZm2%4xmPpV)3kf#SbbW zTtCA#+d$6d?@0$at8~?4aHl6hlk2C?N&AkPe%bX=_!bN!Nk&E4xmI75-_@WXOv?+0 z19kc@pUjGOy&mI&hz|N3p)-&CX?+vtCzgL)ZX5RBms==EY{nQ3pq2LTr79WUK_3H7 zU%WAbmdA}*KSMG}C?Q+DKhd9_l9H0kcF7amf1h6NS#&^9cQILNH<1tt!#yC%_Z@vo zHWC;U%E@H5bf3rS$4&xk^VGEc+#Q7MDcZ6U9@>;X^T9xZcS%({W3t7a>GUmf&7QSb zb(mtkqDpSg%NG6B{d!R9A792QH!$_c)Cr9L8--pgZm;E@OBQo?82;Flo8#Z>XPb}z zTI9h$BOW-ve#JY_wLH0zS5UxidWDl>eZ;zO2ISWbfXBnoqWH5uw7cF-4PCc zx^U!L!O*%E3}dAtd76KyK}vQhWx?@M%~2wFnRiaxU(PSW0ihOIQh&5^t_t=7Iz8>( zE|MU0-R_S%uLE~bN5t|2+{ql$?^>P707FH^{2=v1&KUSI(g z+bcEGet|**fQBn_y-Q|*HBLpgiH*?MYgkaC;*NKk#rz8n`n``oRM14wlRXs7X;WvX z>AWkUXwXH!#6j0u>40D~o{TF$fCC=CQGu{6L0smgzr3klFl4hRAI{mJp%_STk1h(^ zEW8Ad(AiKG(JYgL;C#jws`Qq_JgJu6p&;?`YPFq%!twChV+8ZfuCucF5?AMdGvr4i z0TBG|@o}L1)+E1HgHWC28VOZ(xmwMpoL>JE6yjrNA=sh_d_AG6l-5eE> zW#>L$?ypcrOa96N?gc~t#Fd$=uz%E9AY2_-vPHA{-@xe4XRb&0H;E*c{>@5Dt}td{ zmEEd>l-h1*TqX6=&Kxy2R_E@D=9gbCW$J+1Fu7v$fwmza{!XX|&_GA^JS6<} zJf*U&)h3PSHXvG1P!8b7vgowFwT)7rMuZRlkKr@I(6_rM@zwgn1wa0P8@jcR=LRnW z_iR15$PJ}|{xC2b-XwAVr@k}c2fd-K>{5m6aLeN8Lmzk4uBLl*d9nruRmGzmfY}Z221@8<}?%`fM7=0fTuo61G;0~ zL`PfoYE$d6s8$8wwO#ceCpd_B?7=E6!6Qc@r6>I?JFH&oAP}I}h&!nxdAe=tDerI+ zdZ^mVP@?g|QQCUO3hxCu1@J}Rv}VQ4dVDS(m{PQihVPA>?Z2b4{vUTqe}9+WZ0XKF zV{NeH3PYAmG$D^tCiGMS7p5PaykJiVua4{rDd_ICOT@sF>Z|FZpqR`5qD5 z&)W7!;q!m{9AwP9DE{wv{wR(FF!Di3;OhC8ex&{muEmodm9HlrX6t^)LydF5t9x@w zE}KwQREcODxJAFkBeFO`X6d@w0;iG+SHQ~xQE%# zX!RhOU(8b-k8}M^E6O!GCz0Y;Da8?C`nd@M%UR#jk+#D*tP`*~g_tFg5uL>y=44@A zH*n1&WU2eC#@dZmJdJ%_A?G|<73HIP$Ion^$QoaG9_b4x5?rO9)pMG`XP2@fmG%V@ z2Xm;V_jU`G%nmu4^*$?t>Y6o|EBvqh55Lp)Qe6zDCh69@;(5 z?{@}WZs{_-@Sm5-t2uq2VmsWvXYdgv+LdReHBKWx{+guKRyX-s&~1-rhg^M^ZlvJV z77t|s8SA955V-{Y52H?Rs=~0#Uh<5@8188U-6pA9LM|^1p1vjm_dcl<_c8LAuN3zn z;-{qs0c?*J!1lCq4toUc-SciflTjjD=f%nFd(pnA(4s;&Iu;EFO0yV&W)O*@%$E36 zliKasnx>r3U0)AjVH0Tu&vLiyQV)0!>+y!$Pn89%pE(Nf4+}IF&w3!w;`SZW_G6AK zeJ)*vF@pe=aAE2$W3=#W%S~jDB!%H``RezBAdw`Np5V|O|9@6aue)Wl4x~39CVAk$ z7K?1vZ~2N-M*odB0{Y{{QOTkXbr5e_kbf_ORhVLLj+zYx{e8XhFsTnYlLzF`QpkCF zi`Bkhhi8eMp&4U6|=UXTQ|y-IVR z#1fYekIzX~sYZs3qU%4>uRv^p;hq^7z#Zwt2N{Su3H-F$a_=_T67SuZ`t^L?Z05)d zU`4daNc$Q15i16GVMLyxG$8%SC0DrDxRM1jFKPJnVZ%2q*d*`B6q5qjo zx8S3GktLS*YkNN^bNjY-gk1Q_fW-B36)Hwx4shKFA4cIyafg*O-!C;Xh;mg*45-nf z!sfE^i_%@w<;nOrkEv2;FJh)Hf264_>(=Y%0Q)xgq^@-iS9{*6R^EAvb+}Z&5Dj8DGq`8YaJMTmc9=Vr5$m zk1SKiB~>bClZEKJeJ~shys=Opd}?HF&}=+JH}fBE^^lNx><1U55fdEFWs_FZ21Dv3Wg8s<}W*ucGJz;iXlY} zig{ZK_l76RJBrV{t`OX*F^W*X(X6>vx_w1K!e5k}^m?jsY%~MyGYHG8*dCPEwW5?b zdhG2n*c)X%=q(Bgve8LPZrNG{UT}GRH^{LVc)^77KDSfS4j3n{KY?ST6A zgyXnyB-8f1I3)8T3!(Gi#La$$NHN_l}<6fBw@AEMv(m zdX-E(s;UNOTeLqL=YVb(J=^_AC58O!@)Cd_I~O;c-tSaEeXnQ>1jc=%^UJbr&iq6T z-_bh8_F0n~(5->O8Ot;&I0JB2b>Lx(miH?ogeq>zo6eo=jO*+Y(%RHKtX%p&}Y9rwgR5;3T z-Fmm}tSs7T>y(<1#3=2Ysu;{aicQRzt~P=O#H}L%7Kv|u3cai-o>|+igzyrZ zC$pp$T(y3W8Cx7>YCb+XiIngD?02bOkga6p8n6(v{gWHBv>0PJTw?U7PVI^t3W%dS zF)V>Wl-|ffs@vXK_Ks8EdV$U#!Hu75`BXD#|E<8$l`RM*FF_ft;9X+z|8Orr2V_cc zh11nJ8(w+J2K9AuN5KPhI#BNau-y1ZG@)A}!aQxDwU%WoCBsI24(j4AxYQQG*Ifrf zE-Qw^<$`Jx{)s1;a_3Eek>SV5VfKjZZ=an``DO=c>5)vdj$W-Nis*?7%MixCqPbjG z!!^G9#Kx@lVVZ`m1|pi9PvrGE0jTaV`C{T71lKUc@4W!WNZ-A(GgXsg=9<%<6qR;u z!aPrr!(DqveC;VKKT94$inYoQU-}K4+1s~mW4bqQ)r5It@_dnsb}oKOTD7SeJ2>8% zM-)9n5ujdS^Nh*LBTrnv`6b6epZ-)#8Fqk}_q;932jLCz{oB&vd#{Xr9bzthTlXT3 z*>bLcf9V`BUT{3#&;$4{XE5 z)f|JS-00pE03I)+C0{7#-y5d3txydq4%qgK~GssAo$dz%HLh;PX{5 zqxrnhs^N78-8xIAH7g)}Jft}_`Ht=-m4Sp56?MaJRn+ai3-C;>@7*9Xp>0D5Nn=2NYI+jU~P0v=N9-Bc&)A( z9>S&70zdp7G$x~{KK`3srK=*=$mw}qG4T2z`b1NSQy%RkM>26Gdf#9D-u=f!F>Fn{ zhw`Om8Lc+jNmHL%%>Tp#-|xK65~m?l&}9~xMDmAP+NW3~4iT1gw9lWfD@PXT05`KP zUu(D7-_3R5bO%`zPlB*v#FKDS3Vukq%7x*e8#lJ9EClw`@yw6)N?s_$j zT%0$S2sAICBEF%^*Mo12)*|KCoQK~yscO&I4={4vr_Sm-I0hH^U%oK{mkEp>K{XND zC&z`;yQgMpou$54)dKTY*S>{M*7CP+y>ILP2Hj^=IMBNFCq3eP8{_yAdzvx$e-+tn zw^=lEWw#&xj44G*Vin>{4eRT+w?DS)#Y-72YYZKmV6V2_DZE$g0B}w-g+p~J0qK2r z3G05e7wl!^HklxG(g8}l7~Lya>K63WH0KtsL8OQ0Dr1u7(64H9+GtIGVvxJ_cQ?Pm z{6sZrm2&GVLsnNFlgdwJK)BcG!_=7{GgwOD|D^&r=?bE_B2H$5C{j&niXMm3iG%ek zPsS~Nr#~)1Bp+^Dcll~&8V6#Zg~j0@2ORqQ(plIqYfCcQl5C>NGVD;Z?6>&@2q%`a4#UcS~LT)t6sBLVU@wETU~{okDwJ`_l^&l{D0dt zE*Hk>+wb+Cc!3Vf-SfxvqS)7m|GHZVn3d}mIeaMi&zku2d&!nvCBryB@vPyQ>Zm@$ z&XP14^;uMpO=4p}lAWjlx?%re(sW|`V|rc9%N9-;Fx?4IzVpm#Ibj>KIEj2RtYfoo zepq_fh)Cc|(PFsN4QMs+Hu{1_Ye#|ufsF=0fGDw1%sD3Cxgl>yJbL(jP`UW$lH&?v z(qf%I#ZHE(VY;&W=Up<1;-S)^#8PE?+YdWd9!R%A%EZxiZ?ul65_R3jSC5>YjE848 ziO=-mrc|*RWga;@J*K~JVfys7fh16inMH^3hJy4lllgNIS|)R)>j6sB4+()koI{I-$`jmfq+*9Y4jW-wyBZ4 ztJqLlt+43CN~sFh;9l8*M1)gw?vD0D@Z}~6TM>Oj(87YMfVw&w;%1JX7?%2m{}=uo z`GC9q{fokXCP6;~Yb?sR((Zr$R|i-0hhU<7!%llCSr_N|CeU%_NDkh$a8Kdhld@$x z134P@+RLiWQQCRAmZi;_TVcpo?qZC^7Xi)l+Pc{D4b zqB$O=sSj|eu#LhqNw1;P!~&DaP(Yx1fc89oQXPTEK>st-X|VuE0Mu$N;Y8IUkUo16 z;h*aj*Qm6V$!MSeMI~JYRKRuQQze_v@2g$^nprvh37XOGahd`Gkp&btG5R5z4!zw~ zr2?D*Gt<}*`=URbAfGN;U(9^=~dj0eA}hfzJXW!kD?V})UYzA{wz z^78J)j}p(T%;v-3*qqtR6U9A-5C+NTw?mld-jF<~Iu142R$mQoS+5a8E*@<+TkK`g zvmGr^=HRkWv)+FFV9c-L%8g01=4nluRl=*(`HP`-w^UKD-EV%o#|y8Rww;D*XU~d% z%v1b|+fOK@$se#a7?6x07tu?b=Ouf*(~lnz4d>>jw?>-kMNsjn97^*-QHXFD_ocT5 zT0!u?$2k6@w&4+1zJFU)negZOqW{DH(q-MY&EeCAsbvy0@VX^sUsX=OejxgasLSO6 z)L;SfR1f}kox4E)o7>&V-Pw`}Ft{i`YDQ4w1DJhywp|5uwY(b2@`+wD-FX)s3SZ63 z_9t7t{2aV2r&5q;*Q2FTdFZ9)G55;zS21z72%r6H%yEFM-;tYw3Cu-}ip)>T?m^fV zn+BXM_aPOwQ`L3FB3`*n^t$XDvblr&*2a4#eL}rl27{L`#3xB!X_sp2^s!KQT=37< z_mh+AlnN~WtXC4?V3Ok0ilJ|SN2IRlq&Wzl71qwo+LEOwM>*B33&0>H~8TknPLWG*AHvP8I+Xi2dzP+xT7v+ z?lM}Bjq4pqzf;ej4+e>V>yf5E{QSpk`+ws(bXYK2*$(gaeRkd*LIYV2A>{W-wL%$q z$UH2#>&~l#F8A*d_w2s5Fte|-?@5H$?G0!iY)Mq-?{!Z_wRarBN!jL4p4QI6!=l%U zAm`ssZp$(i6xZ4HKtADvOnDEwe9lI%coPjbJ0HB(y*|{U+8KKu9w%#rQpb}>y}%bj zds{NxQN`Rdim9|>Rz+MdLO%RdkTz6TYhS(`ta0P$B1J7{02Y7k zUHSVP5r3S1ajBoLx2nVVg#NuWDb%Gx8|)>l_Qzw=*>$h9CJu8#j8gWvv9uE92UbL^ z&E}`)O0dDoX&K06D=mwN@O<#opiQIDlw^iG>z|+Q@lCj_cJg34RY)FbPq_HF>6OpTaRlF_01^7Jte3>~Cz$1;UeSv(cA!U`S_-<93dh zYA?K6tk&YasmyKi^N|tqLiRc(KHxNG56F&NdJg>`3oq|AxiSeXFp&l39t&^c|2 z%cxJ!@^Kty=@nG}@F{n7-~v-98zr@UF<(~;SgRE9=Rz6L-pwDU#9LJ}k*4lns@T2c zl$^?;S)7qh{O-I*QKBrL%pPtQpQZ)tl1XW&t!Kp4r=5Ws^*|EzLeLR0BsTW@F-Rfz zvcfrKV$tailrz+#cv+YA+v$!n-%-?+rgIpYUBpX%F7|sm{Vi3I>7}=>%RlNzs^X88 znkoaE1RwsruB>J0J@9|;TaDd(+W+$i zuE%zU#G~*W@gYD#HL}vQQ!URk8g|`=;&VcQXt~f*YfNda;f$+jH9u{Uz~Xo!6QP$7t`+?vK z-h35N#Ai+@arX1AOPi9qcwBgyJyUjsz?9>!Iz4b{zEppSY@p98yD~;XYkvKtv8-O4 zLxCx2-)Aw^twH%o$C+)7=5=hxYuqL>9k9UT!-LC$-NRYVO`)#i~ zs`Wt6Y2%|cBMv`D(9;Y9Nl;or3rByaZjb$eK!Qwh3>9D0tt!dcJERH@+TIXE6g=6# z6Lfwd5Kw;pH9f|0o`lr|G}rLMk+Y%TQ;*HIY6n4!WVIOl!%vnDG4S94uC>YdV6nik zS0+0ly!XFkinYe_ED3pb6V-Z+5JRe}-Zq&2AYP zb<}u*N5JecBk$jB8j56eXwkze%T@av8ZqTBP2|DPCJdOHCl>>;MG3Aih_WR7%^MbQ zaxR?*OC-TzprK6dX&OFSmJwg@lhLM(U?Fc$#*>|XM-wb&^C(_zAB%1x3fqUAh5< z)=LH1*EgvJZxlCga@Gg3!J!-b#+`XFD`Wy{x~8)lPP-1U02^+4Y!@s2aCimC*#N5+ zWY)G-jLLn{2CN4B=k4cQ_Zn?=22yWku`=xxje<5 zU9KXq^{mVcy|tSz;5$;|kWoau*QR~5U2BMYCURER%<3Cp1{rfdx)Pwu zyNU0Gzz-kCoG_Kib{ovj$P}2NWms=x!p5R}ryy zivVnDVQf)#6C8KMyze3y1DD9ic^{Kca!e&%+X!d#SY79}Ji?!Sn(UXeGdGp7r7@QH znB^IlrWi1V@0ZQ*YHA>Xc*xT9yTttcuyKvB(sRCG11VgBk?x$_)b^lx&yNa1(aHK> zxj%UBb{NP$XvWo~B~Q>Ped}n!*w%R@@2wxhU22gP_joG-a%F`a7j^?TYLqd(M;Crt zQHF@vt@oj$ZTn9Xf56_nyJO8|tp;8z3e&Fmg=XP~c5R()`9%iUW*3))c%oZm93FX| zSlzLyGPPlPPDa>T^n7gK-frPUs#_w8`Ay4$fponX#6`jCuKZBh802;=ZP_a0;~a-( z*}}YfKv0YQ){D>%+sgmsC3slV!f|gnR4y-*;(x zs+{4QSMwsJZ_7a8eEqQvkn&$^ndRW^2B6<7F)WqE(ju_RI6aakjE#nJ(xkX*kw~RK zQoG<;ey!aWfEK)4i}U91llqRyuNXSbi8lHYDIF`VE_agC~&>o${g*%AZbXs@uB?6dA6vWcF{$U-fIe z%qKPpF#u=GZ)YOPbrG>q74+jYuld=m9}N0>*N$7n3c1pNLC#QDXf{Xe9esZV=_4^i zVJ~G^K)N@`qm2r+NTu6cTl_kcjDB)UT+G)3ed6xFLG`a#k>!Wk?Znbh5le9$9Lzm1 zBWJ!;eJZaM7RikLi}Cr>QYr;7RKHDYK;(I(dkp4`yw`~^QkB!~+{)P9^z}q27BwI% z^vRv`y56zSs)7t2*JMhuqQNQY< zjch3BrO)@8650@>5C%URc0m2IK>XXZyT`E zg&h7v?yQC!4j{nG`C6(@TEn*2(D-wy>KW&-1Gh(U>m^?0;}Bh}xh4#*xxD}=k_zo#|*>J|l6jqpCIMY^)!sZa% zk&IF#WlgcjcwOb0Lj!)Kvr3u|{2+D8*rddG?g9Z#e1>|M7Cgb@Ypf9BIb1Rg6+nVxzGcj|HbB7Qwmc8{U z;R8GTXd!>cx}9B*9Z?v%h8f^Jsrk$&_>>+BO3#>jA|5*=vcOHi{AB`M&moW|RV4;} zG@^?L&`#Xkl32=!PI-N{Mz=Woc!c;q$<-JjA_~&RscC+xCzg(KHbI@}3}pP*pWEnn zGdtLHizz3C75fq<13!s<(AEP|5^;BV60jBfxOLCK!#8^Byw$5!5~+@Z-dboX4W#y( zz6?sQQqnZ7eM%WQK3O~w5^W!1tMBwnMhw8@d$mCqqDDDdY1n054lQH!)umwNUjsR< zEm;;gGiGX<6{lw`Ewyz5rH;1GwQJi{<&~kd z+LuQy@qpS~UkS#&e3a!b>H2@C z;y<5B9|Kp@#e}u{j{-PYQi3rnoD%8dplV=#|H)v!Ko8Bs^f5T@Hbkf)i3CndMk`0{ zmDDdyA~hA_w(C~JO~@}zluOl@PJ-p~WAd{FF*O$emLUZG8{BLwVa~TLk8?*9EV1<7 zQ*vn0xfK(u`nsYM_djS;a54n%Za&a?iSCW_STVSefEU(@KxzA)uLE@xuMFWU2S;~M zU1R$+>^9%guQ|iALrgrf_hTo*$1HVUFV@hXM*{sco?HzW+esY1{qzbO{M1l7gSXWz z6SJVcJf5vaPjmU?rthF4Jq;-uCCsQMBpL2Hj|_b!cwsjCs=9>s{Ou*_@!^h_a+mNQ7xQV1C#8T0F^(I*0vuAhs{yp0= zq7PKVM@e-5y^rrNi5dq(^%Ft^f{Zx=X320ru4$!N-uql2yPjC2r+2^7*E}k~UaM0? z`Y3(AP@pNGhV1dl%Y$jAiIi;(`qw}m=N3os67R=AxaOo3%Zimyd72_L_aY4%FS5Iay2Q`O#~$uJPdd{yGlr zq2tdNB(ikVCOEG@3s?`B8fVTw(yMTg;J~*=cn`VB=ed)AI}!F84|}{0K=?a~GKafm z_3$@8YD6<~p*W(1jdBE3O6QAccHNidd1Lqz2kU-J_*SDwS&BGbQ!w>%WXRZ9D^UM< z$~jE2A(`#^zQd03X*Iw1sk7Q0JbIACm(%~KP(?Jf!1#!Wq~a3t=J|8G6ALnSl;tfO z4YS;x$cMT)NV1AcEjzc3U*lB%iVqvPTRUMD()AU1z@7Pvsr-+5q!D~TAZ)}4>HoJI znm)=pe*ucLT~CS`4PVFg-dDCeKVuzqQ-uAf*UYu{BD7tqpGXzxY2fv;u2YhxKPj=1 zdg0#3;#G?r^^~csJL^3xY7sI*G9SJ@XJHdI5#3Yn)@+*{Nm;vl+y|ej4ye8^;rP0v zmyK#o)M>1tUm;bg)u9u_g0yJYfRekO{^(B@rQp-%O5e-VE$funogFa5Ti3Dgd{s4O z+mBKnMg1IgvD{zsUX4el?AzJGJZ!2~+1Swx`ViMayL-6{Rh)y|{n$Y=D~z3L>bSWp z9&6&IpXcuK#D3&S3i@uOVwM!KjfbIG2=r#pu!dF&38(WY0}EtAr)FNdbE0s%9v^z#JpTgB9p$Q`fU2!JL})^W=!+vv<8%HSBWCk-*^Pxap$a&~BJ z7oMs*g1pnEno5px$Y3Sb&B74uGU#A*vt9S5SoOT8QH{xc(SxG?3pt@Og0C<=7=NCr z*NUdY+(aQ|<4+ZBexIQ6HCaJXKGVg>%o_6;gQ7)JQ8v3gAhi?jo5c?Egz4vZ#Mdrr zX{)A6FV2%-wa+H~8$TT2_vx&*Ic0UYc1KH}J(Cle-e$~P(2=c42d1&uq4ci`EHn}_eG2rGq*&c`W%Xl7H!b!vjoQtX1 zqZAbDT?StJ%W5w^s;Ukqwm6KZFU=~FP^`ocWH_5*;LSm1g@YS8!CoNwphp^rf~ej| zq;_m&V-vmPt1^x}IO+MizL((lhT7X3SiR6~y9}J>uCskQV>AApF4Ritqa3sUBnDtu z`U=KzV-Tk9Nph71G+YyLynv+h}ErNR(jZRw*h4qEQ+63=5Gm=<9+$as9aQd%;NCG1gio-Vvvm$ZwJP|9296 zah9kdRNMD;tP;<%@mNV>W+dMHr$v^X$sbMya6R=c=5O2l5H}^sKkTRPTM09bP~V&0 zWKU!Sy#LDq!kj2bmf^0Re+=cqBP)-d8&p_js`vqp@Z@z^eS?mJ&B>#yEP%=1`IgDs zP#ZrO=yxF9vxg`gu(V9o%ql9^tS6mAda!-@C|Xuq6$1~t%=1;!<_b!UFQm!_COWo0 z+rNhWnJCNYlYJp0Mr0mU%*VOq*nm-lQDbTkpN64UgyG^2Gm*$18%vp<-#I9m?#b71 zvZ20DzhC?<2<*Gq!vF7OB!AgM8wn=lYZblQJ9d<2PdgTXuC|(upc4BUriK#LP~Z7- zC=Iah!WY;hnLUs{2phgt+jjH+v2~SES#4cc9}!TbOBz8Mq`O<{p}VEKyG6QNx}>|M zTe?fSySw2#y!U?I@Q&e+!@wV$efC;=uC?Z#v&(EW+rMdMc7|iP!6kA06_?U8ObMX- zB??t$RBwfi=q44haJdWU?3wSm=~T0uNN^_}1a?dtbUFX@EJRG^ky2*oG|kAFxHop| zT{<(78w5(UZr-x*+EL@CC1HAr?vowEuU50ifTLYjchr*K#Z2Pd_p}%c(lhIQ>sHED z&sWf#{R(?m%=&D-`G3*lF1x#9g_;TkY1;+=E;zL!Xna(!8PfMW*y5mF@;U= zWQJ1{(!2KdZ-A`1dfIxk_okqhLzODGXZD-E)tL^c>e^$sacDBt5x=A=?0ofk9hc!~ z-)ZCIz+HEFncV-7Vt@rTi;{#iaN2tt-D+FdhH}mE|4;Wyzc&fk z_8h>Q_ZD(<{)A1qfP6lNZEUmK+Ei6KgsNP?@iW9F0WJt9mk|XUDSLfa%-(WMW!kFg z5Dgnils8Gv#Eu_gvUJOF=5VsaXxvIsT6~8^G>JwQ5Nro}92rG8@Nj&)?c+62^?Ryg z+O!vp^UOb3B`flW=wdGDPlQ8RA!iGYty>+x9P@<9?kn zztBd5C=W7wti|;}0Zu0UB_hp0GE>gMZmW@4Mn-NF1?nt9>lE2LZu0Sg^!_~FcdO~z zcg5AJ@wGAoDi^uvfX)NWm z31Ex#uao}O_BRk+oXlcM3sj?WA<|b{DU&0CQ?I^$=}vKmgOaa)a@FY|hALXIa}nP*NJWMOc!~ z#4HeWR@!XXgbaGp8DolKt)gI&_E#t4*Y5b^p@jls zE%8WwRCXJn4>j`5y-3~yWW{7;nH|$8Y@z~{6bSC+^-GwZPA5PEbWG6XEcTho7HPz5 zA_65@yZ9_4*aX|~4NVhS9Qng7gu4xD7gdTwlk9ZLMSL0)v)pVTqHbp><#mhG=I}H# z=MFW~x$#&-M42DvHklfyJquyg$5||dYR(eR$xaaynNr$Tp-g0Do1Ee!sc%kgBCE!+ zOmgho&48ho5zh_oMfrt-ONtGrTSV*PRQ8(%zumExrXb|~{V%=!^6GN!L>z%aHH`b<4l;tc6 zA->|TjJ5Vq;|W21iNb35RyzE?IjaDeaZPNKBO_txd@l?;B1Z>`WZ0=p+0kp%*Hay3 z)|t&E5*Xe_W`-t0#mXH5p1GtUL@QMN9q3QNJE>VnsNJMx*yABkSulTm1;$4SL9m%~* zOQe62MzcuR6T1(xRqBbDw~c6R*nV-d^WyoFif3W~ezp+jahnz4Wdy~2bYVr)Gw1O# z#nIsxgWr})plwqiN_ARrPA1i+gKTu^L@jf+G=iD=b^MrNd*vEZ+eqSo zc;8tOqw%f6j*n0$W_}pQjy0ys%|6hvWv!fj88%#5fK~x!>X6DQK=#l~q+`M>9x5K* zDVaF#F(wNix&h|h_l#@z{yd$ri{}f@yL!llc${|JgTSu4qNV-*TMGA*Xt`2x)(S(- z1n7KQco2KHW7Ay}dSZJy-GA4KR7&D`c}vpPrK4xE?OVu+5_ZYStX3!ihSiUCrgPM>um^P5=}TRJ zAcGuZapMErUNKx7iQ6qWV{4u|#~Z$hd+6N%koAo-bkB}`p-CE0g@sLYQca_tzZtlZ zdk4G1=(xo1Aq}8(?01a8pT>U3s+z#;TXpgQOegVxVYa()n28;ATzdV*kGm;}1r15~ zyR<+X*!QM2eVO6ri^&^3PR`>g06A5*+}NVlG$fVQoV0<27bUuX&Q9D)4e>#&Id~78 zTzxG{aa;z_^vmv)gEqh2t}%Z8+kbD}XQ0NXyZO{<%V+>t6ho%*UEH#qP>#=ndcOJh zoM1>xFQoyVIlsMS*!fA}*#ZBw+Tt6-=)@Uaep#z+++2W56*~Q}AD7;s?FP*AUB9`< z&K3SvJUkVZmVBUFHiYbun3qC6@9r& zZ;o61hu{C!ZQ#*-1#wyR9B=^neWh^m@d=WhjhGKNeQ*tNC7eHdniuOHyb-0AJev|p zEJv>@$Fn=$q0XzyOOG-pt;Y14%yy1st%zp|GxmxLMOrRbyP*p(Y8-ca-1(8(QIHMX z^XpJVRJuLCd4m_;5#ePZyVjD9d#`<5Q)L9mZ0{tw4xwUYl?%-KUIVr#Xt=w3FzO3f z%7u6qQWsiyTpHZC*cb`FgPkp^rz1`b?(*+3^T%=L7|FxPT>RyccY%n`!2J1-u9bQi-~ah>0v4u5C9!4=b;0FXiCB5FsW zKBS3JAca@qo3!blV33$}&ibBjo2|HyrV0X|pmOK?%8*R0^(@}Fob1MqTdn~jsI#ET zpg_UCXZDkOAzo0S_d?zB>iL*zjn(g}$DUAt2Ja5jOQ;piIG6 zUpGD`vg6g>#?DwQNct}ol$YL&FeuJo-?Ug19nRsQU!}R&8VQ1Gxf`+_Ov)ZQ`kV?S zsb5_B9Z*(ssdt{&%9#RCEJHrE)){_B+0zp`w2uj>SId*=o22k{?sMsv41|LU@whV~ z9sUid@sEJN5%bC>81dhnJkMsN@X`X0z|)s~iVnTyBC38TTrtZgrv}?rAew)&Pl59nfc{NGH~PXG5*IhU@JjuBtc-S=W+2(6-!9xfbdC~Uzc8Q za2y#Amp!AUx<)pX);a@Too^}`QEJ0~vsb?@i|F9i3;gC#Ps$>$-E2L1!beeopidWem+@o-Px39NSc zhRiVI0-__KV+K%MdEku2tJ`w1yWZ;qqRlRYy!v$LtpA(Bq73#giR7e%7hK0NI4=ynPv(w^8d-sA@@7>nm>h^wRy&n)3_XfqZGXH+ZBSp~u-jy1-7k6P-vn~mfBIwSJ^7(2_zgNG{fUfyd z3N6%?-|K&|B4s0iVdgRr#2-cAp=qWT%fy!`%mE`SV>DqHr0z#MTE^Hz8P2HE3(+&Rsup1&Ht@U- zjcU-;xjAtlcYY6UM}Ocga6v)(|4v$f4Y8;eRn0{>pcyPLZj##gWoB=7vc8ItVUqK? zHnk1DjW&HeRy-j^!k5cAve5mz1ETz14IM$e+bl^sbUG3yWZD$8KzXXl7HPR5yOpkL z_=C)BV~Raoi|6F`EiVD*7@Nc%%+d@H?yhWdx;r3~cfQ?Qc-I-@9uY{i+qAwoQlJh9 z=r)L_H5P?bd)(yCm>?UZNK3i`)&!`M_Zco-g1nQ_;ur%c>1p95m~sgvA28@t*yede=5R_|MA8Nh=$ z#r8$Edi=xu(F>}2BQ~?gJuB0FDjp8qa``eEw8^$P{gc#YI`mFsugo(t=-7_yZ}wD@@XB{enm-2 z+m4&EVcjkD`SRcaQ~PVYP|zNGk!|%@uUDUlleRRP z(9G!})TC&^$nwiq4A(2yC}{M|9%!wv_M)!S+QX{>R<6hBL`gq`(rjDJLLAWJ4 z&A2Fw8IfZ~&9hDE{#PhLb+2oj`P0hE{bLuMASTca3TiZm;{C>vgYe3@sJM9D?1qMb z^xN(cO`%SOItIt#M?B>ABgRFl%Nyc}9qYkmoiNj`Fett}o`$Bh=YOtK_UgFeT!+^A z5Bu%ELy(&%3xnG!SDi}yVZ0|758=Q^g9AyQ*aMGB{9cV)-d#-Kc@+QIODg+2Bwf5&r5 zFqDnx<`>I^H?1KGgJya*H(q12Ndg@^hfJ|hM~FG|XD?oD8YcMv$R`=SY!Fa4Tgi={u)$zeQATRvpTi&N#CAT$6bvGz?(t9ognnPp5}!kr{-p z$mO72^&sw!NfPIim_rWV*y{asu_n5{4$4T=npxE>84Y(66oxw`_5v#Jo7DrIv($hZ zn@0!dF+Yt9pYt&>%Y%l2u&FS1+BAS(`!Cq$h5IDy3sowG3e3EzpN2Q+V-O_&IHg)P zAqakYtFox~k@apaoWVowIDHMXtV@*5#DjNwcKbk;68dUKx%X~~3m9R_&ug^Cx3kLUy>z$lxH+qMwc9`cOg%LjR#B77PZCXp7a-Y( zaNaYCa$ktPe%+>s;rubBH_qZZd6LTpF#vi*|M2vf0sMdzg1IZ4%!-HoLhJs@6Z1bD zqFNqrv|xd}4S@-4+!tm-wEuKMIimD(G}j=w8eEAUB=bV(TyZd|t0dd5U*PtT$TTkDOq!LV4@GgWSl3}5qqZ_8&8azH;l|Ggy;e$Xhu zBV@1mC+xof(-ID7WNttSyvovpyR@D3-dPq2O9uP6zZ zS@Y=BrcjbQp~d|>SA8N*zzrGo>u&n^Pe>7r*29*s;iB+&Xlf7)Ei!#aIgHKn>6tG% z(C-pugRVcPCh6Qe>STZ%?j5~x|Dy-|$F_u)0ugA%P!*kt^!{mE`RBXgLqOU@ew0C) z*tGx?G>mbx6Tssty+~1h(Ro*Kfty3YBC&z;ky;*p`PhFrCk1&j%!BsHI4FwsjT~UQ zP{Nq^_+}e^x1v6^epzi<2kiWz+a%p7#F9?NiTKOjXn?BkuVS^WKZla?EH22{FV0y27Fa*5MG)6n>;ASOz zPDB#I=a_fjd)8{IT|eksN1{tKt-f#T;GrwT8ye8}P~uL>^o#xXdp(0d0bQZN!8`|? zjE158@t5JjJ07~s*6x~M6V}$hZ@wG&=ZGNzhUa`>C@br$TV^8K=!;|&F&$xn(XC2o zOis$rqq2mgTufg+f5y(YL4HCAiqPZC)zTW;$a))$GWzB(^wyb1cLu5|ja>K*YJvq-8uD*MZp{dUF z3fKO5rTgD$?0;DeC_r5aH;KayA%Slu1972c@nCei^^&W&EZX58S58`@ATnx?YXQGl zqpwYw-TwG&u8WQdtW=@;;sIH-?S$uX_jHS+GvM@hS7QskQ6!%R?^cL=-)=na7L_FR z<*RI6RxwtZnlZXn6-L(jEA(rffL2VBaMz+cUu`7uK%+~p+tQFAZsBzU`2}b-x12(( zRcs5Z-UNiP|8rgczQ(|_v&geW>vJS*^MA|wJx3@-@v^+wFa{H-Nyu=*Y&d$;=lmGS z^CKFih3b$MkKSr3v>2e;ms6S%_lEM3R?1yXJ(#E}vR!Xy-`ZUykLfPEt-!yWTa7fd z_T6&OR=^*goCF`bs|vw4BuM(`QAH0-*s_euhP^MyKq|^ z%ynP;Ip0xYR~(FHQx}iip^c+;H5LrFZVQ()om7Wf=zD}yHuCPS7F}E#*!02OqrtF{ zpk!pj<;BJ-*djUA#q4i`CYZ$^Z56H?B1uOh{jNhjUBPgh3=6UE$?XV80{DuuNeA13 zC8#5o8j2&DsZ8i)p-#0(G&3T>oBhuAOduyE zo>7!yBaWo*Z2USa%fEWUW6$R@Xlo(vR79XKz0fAQJ2^~){d0C`98L@SUS8)*+r@$( z$&-X_pSR}yevE3=Pw(cPTb+IIDr|sEaByHU>yA8p3}HH~M5(hgPbe!*O=V=L8e!Lr z8>u)%8y);i%o)PQRqHp_p7aL&EC$4p+PZp+o>RdB%NQmva1%wBVYmZy(PGd9s5YZG|m!)TV!7rNe{!?*p{b2 z)iI(2(e{1ajTNVDTM9hJ%A3}UpoPDr4Lb3>WzRWey>Hou2g7Nf*ZwI$@y~kt1AuHM zh{SXMX4Cp-0@LCJ^)f`Z_(gA8&kMYB7uq^f?Fgw`jQPhy8e>ZeI;flzkX+||M5Ke&XTge z+*3AkXLQ?-Wb{!zQncrV;OtVf{bb1OR$Q}g7q}i48-cH2AxRvu+CNb9UO@zXK0>dg zg*9OErjG*Ir~|>DP@ZPDdSb+A$}9^r`TN#j-zZ8%g-X|amm(Kswrc7CBSZb>7enW@ z1Vje&_n>x+7F%lqzR4*rG6jJdHtnyZq_}IgFgdYcbJ#B{^G*oxR&1i&U!yyu*tXNW zY){eyDxri`pIow)ym<(P!ygX9$ekZ=>DCRATtrZ|4Or)U@_I3GZ;Q~QrccFKlRHIR zE@XuX2_`s6I)*z&q@!t*2&F5AH3@#^y!Vw>B5eXmvo1q}IEnDt`A}jQWwWV}?1+Uk z7P{Hr++90R#d95wXA4#~e;qqbnk?}7kNeV|JcVS@jApQKQ3|Hf62n7AOcy(cNJDDoQt6*^uJimZ> zoXsd-K6quQrf&#aoCNzT&&eqWNa3X)yvp;@oPjHEL@|mrJ2+Hp$N{+xo>5}UZ_b%7 zPw$#I9uirTyL*n_v9J;b!p%E8+iw=e=6>-*>f1h1s{0^JJJjhn^j1OG}VM)9-e{4s4&_iu%#=OVcztSV{ z;|;tQFNfSUfvtyKcU(#WAG3NirqoCkCQCx%^aeH+e@!AZ(SSV%%-mh;d*iWqY`yBj zazg)&mhT8ECr3&N;Rg>}-|~92DYY17L?qAEzNiFT<)fE~4%MhdLLTQ^y;kEA`!sT> z^N1rNj2F%`#Zm4}&IX}9R_n#hIwbTfeEBLfQ zfN3H@bFkIE6aRMkn5^Llde+$)yAO7))U`VES9On=C;^1VFJqA4(&ATpopI|vS?%W& zYnOEb8C?6NUjD|#P~IHSj`2Kl0yH=*RA3QiwXdS8_TyubhrQb`LWPnw=O`Eam+s!H z-o%CxtY~(=`df%M;p0=kZtmXZQ{5~%w)#NQp6FL?!tzH)`^uB26#nMF_)hr4vLM21 z->f8*W8+V*q>b*EhIRX?M$d^fP)W<(>K1(A;Wmpb3C{)JONaHNt&n&uFEVfo1Fjcu z-;2(}O0IqNTa7(M)`ztEqX;x`5>SMO=AyxPTx@VF^4!UC-(f)`5$|k!ddMozVgdrc z%GdZbLLVtwCP0ZMBZUuL8nB3T2uU=s*|{<_T6Y<$G+83e_fvle!PE>WRbXo_PB^uoM;J2yN_9iUhB-46+~pF9)9begT!nZGC~#>B&){_#@?y@ zc%ksNhS^Ojb(1K;C{4-I5}_wZIMV&PAR$Fbj;Fb(C}Oz>pL+E@HJ*Bx(Q}f&S&=#h zwTP48h)kb?d!_$$RC@tRxt;t0<<0vati9*LT6AGsP+-x@lz3U4?W|cLS+!sTh%kk; zsTqBA)j)1U?NCkQ0!_!=`>OnBe21B9f~CiuB{9CA5OW;c1ULYP=b;nw+fc2P@qlPK zQdL0{ep=zdi4SK46|U;A$sVf26XVmUgGkAj4xzRanw0O~`yVv9vSbnajB!$L_LZ*t zt%`t8ypC8%Jjzagj>NH&k>5q|x5wlb$aa|^>fh^OA0Y#1`XvaXonX7@_Q!A^9@@w} zHQ*dxapLlT(j>1j%<4O)fS&MbjXdKldY@xR3Mw7|whkIh!J(}9yj109WsQx=#U7tX z)=|k+bH*s}P{$g^WIlGJ0Ov4o-{g4isrlQkcwyqvcEvD!#CzoduA8zX_e}%%vXv9G z-hQKRb-qk0j*7k$YUf|ModhK~940C&(gL!BL$V38yXuQde6yJntXd%>{$s)0Kf9C2rzC8)Nu6}8ctg?Fz0Jok*?F6)H8yuhad;s1#~91<7mXz5sKBWKUk&|8KwlQXBCcdKLT$zG>x~zoI0YBR=QDA|lFx!0@(>BzD%Gu(RDQ-34`g@2u{#*p@ zI6n29+c_n6f=W^v6Rq77jEeFG=)Izk$5sc(D(ErHSRDnU?j8Y6jrPKTd}>a z6NK%h#Gp=uubs(qrq0hP6Pg3=>lDxT*Z84E?-kz|q*vWe2F@@&DP>L)SRU`8Bs95D zs@rQ;6F}-73cUIrJa(KEYqJwpjShcf25#dENO0fIBeO+q{<*T}#Ji(3vbG+nPK2`} zKheAyi;_1D4zuu37xLSq_XK~{YgC_D$LKcYblad`Je1u06E-n&xXe2#WA+S{vF{b= z#RSqQ&D1RiV56CI$=d2_al!Z_+-rq?ykim8aeHVaurq)_EbJS4j89Uq`p4bEKe9f+ zeFZp-t36%A|E`}~+8`h+EHrIIQSrjKX}nVEv~^QEVn;){Rg`iwdB&CH6cDrQ4S)d6 zo=qLdq+-X4rVb-)_comC{>Wyo4;onA@8efT$lO3-7|pam84iSPK>e_}{;=K9c_{Jt zkto1%2CM#JB>8L^9;cp^T3ww#^P%cuo-X_at^j#=Mebl34estG@vRY!le&^&P`!Yw8vR8n3mkb;i;6cFBY_mX9 zXINb=TaEHmuz`|Vz6BfJoa7xd9HyAw9KaJDX(7R1yMPU>%6B&7a`e|h=2tJl%cH!` z#w?r;r=&Bn8?p~5O z0-L`hb1tG`kLK+oGJD(KI8OM(FP|gQx%L|ckH=RTU=JJ$ugF)ZP2_pT_~hs#m+2`2 zcL#m2MU1h`y7LtY@uc}>*^A~?BrhB5g23Oc>GjqbZ6_d#=9I*`?hRlXPk~wEki|EJ zg#q`m3rGO{A+fE`6v@fy2Xk!{mTiO8j2Yc4s2SeayD>kUP;@+B{lpegOEDaR5^h=W68^ajDiGHP0G)o0s?0>O)uUB z*iNmtk1zL%%^-DJI8?QK>N5C=C)j}5DjlQwT@R(B04)(85Dr@)z?VkTY}8>GV08|k z$;t&SuG$8x48w;qpS;yv{a@lC^=fi<{;V0OI8nQ^e{qsnQSW{sGh5SURWl;3VA)Q= z^^Mq%^=!4c`!WQ3T~p(`j6}z5UQch>XBCuEVZ_|Np}M>NS#ih*Op;0XR=SR zHx|%U`X8}}`)Tn4^)WjnEIZBuPq5g0*?gI{c29*ycz-;Rp7)c$_tn(X!7u5??D4g* zXG@=5&&Ri=zZ5=x=M37x3MLS*vNjT=Pj)+H8Vw?zFtJukRhF#bNG_nJTMOz-+}^lm zoxe$6kW)RJgFmlxMnP<0t`2K@xE$_8*Tjei=I31ng6`tZeF%_!_y6;)0|ikcb^fb5OUQ*c$~c$}s41R}7RSd!>$_D2M` zRaJPAVC}BL;kvoD*dGKEM+6tf7Y!S~%5$K!3%x^X2X>E4%^JT8*K zR!6PMt+Lq4Oi}mt`ufrZ<5t8{sEGOgCIiIo+q>xHKee`ZFxjk~8)W1K3N9NX4{byH z&9;Z=eT8=<*$0|$CGyH~j>CDi8zzhiW8{8z!dc7m8qsrBYQ-(P2L#`)+zQ&>X0^Xr z_j0ucDw`P6TFny`9jvZxBCtVkz{@@73*G>7;TkWr8gJ3=BuEn6B0d<>^_-_H)85=4Q|LIt@o1@9W}gB*Fq-OK&Kr zF!wW!gk>c7e#Mg+@P3tc+J1qPCeSo>Xh}nwZlRfTKfwM%%~lv~e+fNYE-INA%t|TE zxv$t2q)#%Y<^JRT^2PM{_%ZY6J6KZk<=Xq#5RkvS%ka3}24Ow>cWgej%-tY_tYxXi zYzVMjcYZ{?Z)<$e*MBYm1EczWXLGTkv(QxUacZ$D(0{al{gykwTe&i~h#hNdc3s|g zr)w}c2i*dvC_3Q#`SoRB<*tRhhNxmHeFwt10tC8+t3v)nME~kk$hR^=g6$(G53Jsb zYcVJJ7Uy1%8lc5&=es)yZgSTL<$*%&6@npqK$finaz`^HM7{}SBdxY)53ee}ysRjm znbnLT`y}OdkM&3!{ek6Oe3Oa-uOerIY<;D#?o-pwx9wmErfqa8Y8HX$ZQoV8ZAP4tm z76dd*ff7pwBSoL-CRzBVv%za`Tpvg#B~z$ESVoMIUuX0*)l|NLJ++0`5J?Fr=?JsN1*FOTvsU!`#qE9ufaDoWWm$?oQFUosRT4l@w; ze^5BC&M5C}SP-CLHNQ|{TkfT@ZAWIU{Ms2?>vPd9VLb29M3x!HT?r#SadP+|l_8U# z^Zs*hBDLt);3K_3srxOMeEk6k#))DvA%cn>LMf5y1Dzy`%!HU({5Vv0TfC_a%d^r@ z|03(C0>BRS_CAmtN@FUSJ32+nYCiy~wn^|-m{MS3R>buX)c`jAIqkc^w+bFqCEbG$ zU%VlAs9Dz}IZg44H0gY}oK3OkFNPQr9A>oi)a&VlPT4FC=R~~v&2$A2lI_OvNfcky zf85x)Ruv*cyf-Cu3lHOWW!aIQy99v)GejV+D_F{D{s-EV5WnAQwr_p&u398(uN}ln z0&PDYlx!Ruj^r-ZS%~ulDC&WeUnyZ>qVnRJO-oC{Q+CH0L$Z5yx->Z|i)B1q{HQ~= z6h(1AJ^kcx&tO^kpiU%#?N#9|?{K~@NK%kU-$bA=DV9X?O?Wi%GFbaqf0I@}sY>U( z?D9u5hV05mF68$zjRR{Hd|dSZ9GCq;ochAE>49^J{Om0;P{+0)hPw$huQSXul`md9 zvXHDYA5{na+1L03=PLQO^z8KhxT08=*uW*WVU)KOf?ND9ysmn z^AQ;v8UliDCwNM0c{Q{RZuhwi^;!(?LNh;y24=uw&&yX;sqVay=A*a492c7@%c%1? zyU#^;*Hp~B!rv@sJSn3`j;7jw)RHbVjhdO61Y1}}D?0p)w7{8V9U&%f4A0DQzmpZL znfJ%HTNheQg+YRqtdf}BSNmpq6>fQsq;9YBdcSi-p8Gm0sg2)zTP$=ke-X{TBOF=~ zqF{d5JGg%g`oP4%OPywS@8UE}GGm(38IzqZ5((<;@NolCy2^hM&(SO{Et1qX%$iLV z++ce0;^G5|V93QGJ=EAS6uB~Wz1IN%Z0~$L8-5;jGZGroThM+$oSF_@0lKQ}>m5V; zC>M{282qHo`VVx%nX%n#Ui!=07Y5XJrstGe1Jvdj6&8cv5<<5kcZecMD{C~_#Muui z2|o{8#3_!dxThN_T2&BSm>L@Aytwd}7Z_R{%VevckotglulZQ`MdLpAYxO4csH3|% zi?uVy{cT|@3eI%Rr1-p%zkq3Cv>nD_#KOb` zWu&0t3!zT!d8AwpwQJ!|DSl?Icz&dG4UO=+t#7&s(aU#wy3YgFh;gs=K|(kE>q~&&bMb$GBx;NE;BBThES>R}H5V zFfn71z_+J&K7K}@C8{uXigAiQdFrjv^KxOWS)|8?rq!IpmwAufK4+491NB#t`)9S= z^2+Pse!v$QpA-=IJ8}Uj9zg@B_>ItsOt$GP>C1m8qV^$%-|M8Jl)Nt=^HvN^Lr8u$ zX1(A%oZcj)5=q@g^jY5UqaOlq$LkHle5B(ZQzP$oSkPn$@reFcWa%GuX6Lp)BFY)f zX(lFT<*SW)?7ec%DzJid@EYyEO|Ao-2czpIeRmakGeZJkf+>U49$oH6+(|m}5eu_E zbo_i;CbL@}0%|+Y-s{k1F$Ux2?LZskgn(rwo`naci0OPa8(Gn-bx(_-2tsl@@=+BYp?zE^e9&l2?mEHA(Y&`4-*bS>QDh-&?MiLnN``wq zY&ebx?(p}?8nC?-yQX9_v;W%0PK_tz4Q0ns!J@YIdF4dkdE5Jw^X;z8AXet7*gjwS z#{X*pxH(m0ucFSeg{*mo^ zSm2)Ote@Q{|GVmRh*nRHUgcVPS>@2TPy?F0xgXRb_lG_l&dIe5Ob#56$?lE>oP!b9 z?DUAiDbDi>y%alN37yOzf;Gh>x2}s5`a1<3bjj-9=3eHyDq6afEGDOOr1sK(dtj9* zD!^4Y6E4Ct6uveMAM*E@Po*;9M_E3ZF7uVuPYXd^{hg{zgJkL2Ip&CC@O17iSwLJ^ zu9W_b{P)MiY>5ywW!)?aoeu;4n8EpjH7t9LaIpbUL8gCisC!AwCsC2?y2W#|c5LNK zs*;6D1*cjYYJ((Ao>HSZrOGN?GdgPyK5+UZ{k&)L`)LVY9&}jNgL{-t^hb>wmw{`h z`pF5`JmF0wDlzJ$m_&KNSBPk5>N}!+Sza`2MWGteXtKRXMnoPlU!b67Y<^C3G=iw# z{_YLa(Y#qz0ppyAP=@sV-KfC-I=3#JmTW%*wsPvBi;@{|N;?TwnjX2!Z>#l@d?siq zQ|zbravcmm7W*+1_~gi(3Ua81&T;n z(9`h0YkYp8CDF;7Wdm0G-O3*$(nQa3|sA%>&5p$ysMq!lvR4TLJ+KFx}m zspZP_z!1^=09x)3_20kwL`03KOq5W#aE7Qeang0=^WmOD_{gSSpld2o;5P1KiF$nf z%yvG!M=fzG>Pfsd{q>uOA*8?c#s2_2o;=TxE$=jAp5apZ{tkt;SV52aVbPXqaY;$y zFT^8ws5XoIa3c4;b3&Pg(r{97MS>2oGW}h`X1LSG_VxMsg9+rUs=j3M#hb|`9E;i2 zYRK!L6pG4GG>R~cK^0>cQ_GwnsqeM~F^@)Ch#G-zw*zY>T6T2V7f_eT=+^iN!p8=9%LF1Lyz?R zW)4tt(Q2{|I?nGJ{ID8(88Hbz?eZ)D8em`m*{$&%{%l{DkjbN+2UbnQLZZE^V`v&LA$WSZ;-NU zLp>PMy?tUP3n}8KU@=;T*J-Co%dZUb1I zbuPBCk7elQ^?|YQVm9~Y`XStPfA|nwVvj+9Yxe(2n*VRJ|A9b2(ViF0TF=?I-u$A0 z|N0LtbrA1OM<-S^usyV0k4;z6PbTeVD^lVG!v-@rSx@mh0zBm*Ug_IRYUqW~_QtVB z=2eyreN(zwY+$glk!znV6g?~Ar=Ay}V0xRW$11n+S%Fzl%glx@{CW0PZtlHuizDFzMjTi}}h$L)|k3&dxhr$th%2#piI{#V{Tn3sYLEgbhX$L)kXSGB47QA}X z<=`NRR}yq`XKzFpSrgH83EZ|Yg{uKd$}T!d2!rHe;n+la_t3r z>e-1|rs2{v*|(HUvB`@9 z1`DMjnVu6*>aJbb^Hmkj^D!hHtnPj|`N;OY_sOr=&dkKdRU3PlN+u|K<;^<6EuA;L z$`}~P)Tpelw@krmO|?;h2Dq&LlT8)ujdm80MJf!+h$^-+Rg37FP8SHAF9I(1M549| zdQ*mh_I^4dKQ^) z@ZNfn_*!B}pS;FkuYc~mJhy#A)D1pS|M~^+ zqZgkR1N<5fMnA6$Tv{IAw(TzDmaZnWA$_a>j+c zK%%oOr1^sU>eW6EM7IwO6y82$MmOZ555PcQL;x*WS*##4HF;t`-*xF(7j&V%6`w+i zjxME5qBk7fqjrN;{>+{JTqpDCy==0`UkI|U#O9A&`=8nOy+B1;z*5T{n8KFEOM=IfJ*ldD7F zt~VELSL9UX?u>eyTp1UR12ExT z^o);TNuR>q(I;I;8detzqG*p>f70{Q>s6y&VVB{A8j`3A19-r0R9p^v-tL;ldAoW9 zllxkkdn;1nZ1}VK`0&;h>j>a2)UVdHiEjt)rv?d@J0fH5-h4U)ix4ZAxx$j>M28_X}z0Z<4=bCH#c6KpLekXAKHb2}cZlSk@DZgh5 z+KboG*s&S2@uzEj{n|}waj43+HXterSMS{fr-gmP<+I5NcV|xF9NUwC+Xr|QiM=eu zFGShF6u~4am6)Zfm0ymQx-4EIs?1n)U4yk-va=T|ef6QB5D}(`F9|+D@5eoZr5S6O zOpF!C2jjtz!Z(S7$$rSjX&mUZ?kdxSwa_X9Q}-&s=yF91KVrW72QLG&nI`pxUc6(& zP}MseBHVMgxT;(dL(3-V^-k2^01Tqfa0}jf@Tu)td~P#~GvGmPQY zQ6d`ZULIxIjo2+2#Ms^N&Ps_ctgy6Po$DGyZvJ|V?_r)lBhRV7nwF$1_UA{8ST}#f z`x@?PP`(L9%rtXLyr4@?8sqGJ$jhz0BeSAp7XdyeLar1oHY+Bh-MC2&d+;cMx$Z-=r{SB%F`OWqo zxSFwacu4b5F^?n)Z%&zFtO$uew+s?J=Eqh#dj-c$#(9HBpnD~7TqTi`%l z)+Gn^ZoFvzxBfwF(}-ZF?Dh41AriXd-OpB|+s#UmO(Xp32~|Jykiljdo+}mNsp&sr zu|IhV>-+~b`@2FVT7Xx4RJE!^6MX)?lLh&8Cdr#>Iw;_<^lWu15~AN~ByHXqzMaKa zx^CknU-byCr-P;{U_UXOjB(Mwet6TpEt8+-HU87(gBTJYXZkl)m+by)9+5y7rE}rZ zyE)A$No9%ZjW-1uf^vYh4(rc$zy=C5NFjfrOL?%;g2H zU-pJkeEIqN#B4>-*`|qAO0xm;WqH^($5kOJSM5VsrCe}3l?$tf2o(LT%SZvtqo9rv~hLvw9;b&X_c%-hD?SbzYwPHEp-u;g@}h?{_%@W=P8?;hxDq&z%Zm zTwrIEko16O8v}6jJwJx&wp6=A=^XKEE$0Do=>@fGT$F~MLjz8Cf^051qxMU%=gm_{ z#yrl;R+-H=)x%trj;V$B2(qNK=(C+zLPLi?9jlEHgPofTA4@mHlRM923YsUU*rGD; z%d1*IDdUdn8Fr2u&3C0Bp2n=^p1$g7%hzEl^eyxeb15*HZ7sr+BC{yd@t^1G&v=Rn zgzo#NI<*;QXjo^?>Qpp0n`D&xO(;p5nyxNd$Prab5SmOfyuw!BsloJ=6aM%Whr2_xyasSo}LFS=Qpte#fUVzY=f49r|u?*9*S(L$i zcJpL=s2=PsJ(?gnS@8|jNd^4^U|vj2mj$#P8x-$*mtU>pb%+U78cfQ2N7qf3&F-Ab zu-&Y@B9fD$V6iRsIL{}^mS3R#9h%<$eP4hg2)HyOFk968J$bmqQVYG3V1zP{a@>a8 zHUb^?JIKHBgOlYJU?bloej??tnu-|M5To`vA0beBnnOih#3@~gU4Fl7LCppx)+|`Y zsVSTK@|=#n_ik*JB$j=mMQtD zAG3DV)x-}3&1-Z|aj@6~?pADk50!x@%Vppp3BXdm#x{E#KAbCK{=kAffN^f-L8K%j z;=}(Cj{!O>{CS8=HKmCRh;0ers(gA4HY; zwG~mO4w&M;K7!A@Hx=iwax;4wIK>K3Np<~3{Qlc?gzV}L?>)ot=N6W?-zEIeP_kMn zfvjcoj!HA4nBv4%>i*GGh2<6->>YaWG=>|+>^oYY!J?x{OUqcBWlAJ3T6XZdi6en> zeeyQDMYjj_F8SHBFk*lR$Mc0zzG%z47S z?zGGOrZ&zhuYy`QvjEy3jHw+u;XQdcMm+Z^jApp>Qaa5Oa2 zv6xly{JQ7kdB1qi>gW_5(EyRTdy$&0LN7sTkxr`H#iqPN1S=jDzV_-oZYjqN9^6H% zW;bZ>D+06Q?=)dC4ncDpmyq=K;Hqw~r9a#wi{qHOQE%O3;s_EFOf|`e^7J+>yn1p_ zKOzq-)*?UF_3hL#p?Ab2`6wfCwwJ10S z@3!3aqMZv0#mMU#;4^0kcSP9lFY)myzIOpsG_&&87RKzfywuVS&{PLzYLVAY4<0&K3SQzKDJW zTlvFps97es!*evoOk(e%k|42Vh~?xU21i6^{IDou{L#Hxwmvu*Z6a(X)*sl-fUQ)8B)Di<7H~gU`#}J+$oe^Gb%2luSoW zza~tG%T@1SnmDG9*D&1$FpKl7L83bHgIy$*Zra(7HOO@g0n}@T7beGcREw~e24dJ9 zZ2LhuUy)aVX_QohKfhhGpz-DqUAiV?Pr1miqrv`z4uLQN6TED?=KIjpg zkI!OX5g$~H(Kl6&kQYiWsFdmXF$T@ixO6CG6)B zcq@>N^aaSCS02d8j9Ij+EbT~bVAbLyZXVJP9I0!`@e-qIx}bL?eQsAeX?13XY1fi# zMgv2T5gw>tbb%KY8M zO(@(O=#I4AGSbzxjbT5!TIG!ZQj5;*_4Ak%Ho$MfR5+*hcqDr8#S>M z=fzjij90BI)B6nWUaynV6!N$MjUZyWbZ?u=b@CyvIuAfcNn|B_=;w>J3S?6e%u^v* zzUL(AsMco5isGf=kwf|ByNywY@tWmNX)K+Z!7+s$CIz3|j z9<7+Pc`Lli->DwQO)^~jnS?-vSxMHnd=hklP`YL7*xSl@OrVdc0T0+8@&npw&EA-) zv7%GZHSHLzQ1xpXXO;~wkQhjb3?yzn5~4j)(EL-QW~O#tv@TeDAwKvP^yd`kDF9;C zH{+SBXZrg0FvO4gRVS8ZGC;~Nv=Xgx{nWuOQH(+^)s}@ny1VZrGDEewD1KNExv|DK$J^SMN1J|!G${gHs_eubktQ2k zg2mWOc4Ub7|c*y zc;I2R2zM4En5bCSA};q};{vc?dg`85D~4VLt&n1SuVD4B_}H^zX7jXVv%hbatJjIk zYF~d_T`89;J({i9CgiNSmTWlWVsE)%?8wh!tIM6FrKNLk*>U@MxS9PEGOq66)ba3? z6fpai3@UG?P7KgC>}h0VyAoYhn|NZMx>?o)TkA=`vK$imsV-p}*fbss%eS@@R5GIP zZ5!QYy<|KzMxE2Jb6jAvpr=mo{o5&-?$I*s5XsL3gIQaR^ATbL7YRG8Fhic~=SQ{l zGj^VuM8ntqL;3_<8^GlD0K3LAR0H6r%!GW0&HGWZCC5idQ|x(GGn=HGdaLByps}qu zH^;w3ZZMK@Q2R`5I1*j8`wU&=z4!M{Nd7j!Le~fIwu68O65%Ohn?63`pCu3>k&n$s z&mSb2@c&MDD8ewDb~-E{GVfkmqc9ZKmY5*PWD}%in~r^vQjd!lBaJc>^~Cdk5r?s9 zQ~<|#V%VT8UKfD~Gz0Edcbd=Udsq=%6C9@Jg%(m#bCI-Cj=Hr{)7~7RSgmODzjW*S z+L$8YrA0zaH?ygqpWryFMk_AwD44&C?kpl+7lPvS;P$woxYS-Ci#DBKt)Qr+|8;4` z&)yu!k8R^N(+abU%@~Okt!cY4Q{YsOAMmDwga{ansJh4JtQSwlIK-|f*f6?ayf0K} zmTW%!)~H*&Q(ab-&OyC{B_qE>&zTc%7@DF07AnLRaXNfVnKn{6lqBoxNB|tu&gJPmCD2}dbJuC#Rm}Ac73Qk?ca$uUE zGWt<#Sz2?QmcwLi2(`GDRt`1Got5TB8nXSA<_bVkUY(g}_gr}UQ4>GHbuA0=% z+naL^2WT2rn<^YmTi1d(_QP(dzK?m&rUGdS+{j`P1XJP0?I%U{EbC&$2FsA%(j20M zqrQ+WB3qa{QA3W2i@+l#2Irc`#x6xEiS&@;p^MeD4}JuqcNL|QAD7j%Gx%@zc{v*0 zr;|W}+`Vuq6^ZsEPGQ9>j=p{Et}?sa2VF?pM8-#dhem67=<8Dl{Tk!%YoLUR(ghl! ziuw-q=Z_);JD;RYa+qJn{X-1@)n~!LLkY3R5An5=rosnv9sMj>n&OIbn9Wai;w?V2 z6Dg$pKGL3QPNF@lK|=x$2wC$|!^IgpE&~eEJgRzYhNc`X(iHl*%sQ8+ljx$u&NYkL z(DCE;2>V39;X$82wLP^eCljp=_Er=Z6*;1Aj+6vSRkTsJ1=FRc8UuI-ZEYu|_k9lx zja}S>{c3TdH|C@OCWDIG=9kCWJWScf2Fu9s%)3Fd!i}l`v3=@@)>zCT@rL#g@>*WZlTa1?ShkDxc&_q+B#nSBvhd}sy|`JrhH4&vKb!luoA7j_90KW!c&@87kqB%H9 zp929{^10I2tb_Ho7s7xRK!YH))!i_BO({gRR4U?fFg@^CYJ6>04sG2cX^5~$(rP@Q zKpkKEB&EskwN~@DNZgL(wVUrtPJoK!-lFKYz(-2jZhVpHyb-~OjOUSh!?ZqNdhyoP zvJOG!A6*@+BuzgEcRwk;N@GP)CwVGl#CrMQ1_v?;gd+`!?C}-z`AzdOd3rCmA{;M5 zHMSFn-&{IQtCAYXWbmKR-~DDK-^Jf~CvEca63g=+GoruDZA_)01GN;w{3rS>VL=c( zPB=#BQMDd*fO1@9x<*aj#-_QQaH$$R`_e8vd*Y4Rw4_SG1|U?ZIQ3$Jn#-)4bv*Wm z8wZ>AS1HU?eRQY&dqeEaQ>1dLo#gRyU-RH)y^mb_#oW+(%a^B7tyJMbUvR2y$U%Zm-V$!KJanwY=dc`T~O$XaAa{)PapbKYbk|N2K z4r)pXYB`V_=sJ?y+O-3rjZa)#ht8373QLzSI z)`VJ;YcGV(a08x-iLLW&fYNo15B*i{-j3bxFXw;&ph`yGerW`W;Tz8%&F>5}v;aYK z*CUH~7xAk2PO$g@3j;7>SmLfnh8b_fPvYbtn^Wxsjcrvbq#sB|r3`(UFOC_Tf73)j z$#iZUjQgB0asKAgM>OFA^Ttug^4-%1tJYzkWC5ZZxE_~2JOjMqm#>66m-5o+6N%=s z$k}J9p^{KFBZmBScMM4JR-k*MWsv}%A_xu8m5cfd%PXSaZZ6m)EWIl2e%*K`ucG01 z)b~D<-2n$U5wVcJ=C=CiCVWsL(Bkow4xGsk=s%lzvso{x4jJp(CK*}SLuuZ@+dbp= zFB@N3j1EMmaz3|=MiDJQGBZ}3tZ-*uwRN>H(9pqN;sc^pftGjjf>qYtOG~7gsr%pe z;Uart6|gr66ub|b4thS8NQg4i!y~7DyuJE(jwN&dUZ$^F<@i8~q#TSvGV*9D%ElN{ zS0|K5z|zX|2D~JswKwSPl!-eglfS$0;X2CsZsMlOsoW$r@srHTQo4Q)&vQ)hg5%}c z=w85Jn(6%-iDcY;&7uDCFwmyUpt0^`CnmW!uyU`FbnvQ}W`(MtniJY6gK-YaKsNId z`SIU=C%(Gxb)PXvuqZRf{~f1ZKqHBCCh{=_dgqG$0s;Aj04RF0B8^#~0}7+T%kE;0 zbZz+fCz&6WS{)LM($0?G2YqMvxdES$+C9+mCqIM&dCHSm;~J=DNjQ18I9_W_ziE6e1TS^BUZD0#P*`3r55^6djt72E47GkaI<(K)boPrcDWZL`qn!_ z3^`qSUSEdihl#mka)9~=&##A3LfXJlm5#D&o`Gg4J0PhqWK#d;e1}2tdQ?b-43(sqM?Ot9iP{cH#Ex zrTW%6qme=C2?7KL0t$p$uWY`5pBIW0#6A%zza*ml-`R+N3+9+BLc3=~i$SWM9l;p8yz_ofT;ypO6$4pao(6kg6jEQ6-f_%ok@W;j4Rk3tsB(adYt+q) zIJY%FOW1Qd3x8vc91dR1QFtE^G<#^z53(&u1=TKQL`5^~o*>;$z0&!deQgqJ%JuJ_ z@&*Qy;XOAQGXap3phNw9_hFE7yq8Mu-GBWeKN1}K&Eo72OFV>M#B{p=zryRjqL~9>gsN06M_8Z z6yWBCba{9Elw-r}W3yOH?vf}h#&-!p7q|gG zD73{_O5f-MxfMSfHZ1Abx5-$?C4)g;Q-mLx>0F zWI+CJDCr+IL2De_9C(2=|Aq7Vz9!OrXiSWW+6~9?arRTJ0`oJpU7K<8uvs9FYS?{s zI-xiJ>Lv#dH{HULH z1*Q2g?qC_nwW)+7{QFT}z<>f?;He(GqDFvhH!u~39=BWF{S5J_2sCPq<^FHP(#Hk` z@}4xg=Ec5?@|N`6s-9^_53-ZoFZC`juyQZWo~2Z2$it;Sl@2Oh0%MrmG5O*V*i(JS z3c3ter!m-0-7^nl{Y)O5oUk4Pt#pGE# z=w#aTXpcrI7zjl*oX~NW0dJ>echLt9IKp|8GxYEZDue^svJ|3fWP-|0`lr0%dsO>V z6Ge|hh%+PVIJ;TxYGipJAvqIG7Lprj5H7uPynl-ag)pa*dYRg4mxwT55_}#|Txkl3 z(c!!{@>WB0^~wKfOCW!U1nfkKOj<#~Z3Prg@IVjG{ak{GV(&WE7o@lP>xtMEoBMG- z%tr8+_g^hy1%YJsp)znur`t%O6MSM`T1b~S!KF%9a7h3W@{seLbFA^Xr_01DCs>$g zjw_eenXgh0|SMC|@|Vl(xC4xBRfj-RzE zKT8S3m*Iq9^A?Y}FI5V22n}E<1#I5{fdr!|zR$z? zbB61M`czJl^;c5osdX;3suhZ~QjvT2LeHe#AC9jQOsg>6&JH0X*F@#H(hNKq+s5a4 zv@sY2eE^lHzQ*!RZDq8NT0z-L87|pxhCEb=wfeNZE%&dGFq8b}9W6ub3HkS9y?~C8 zc(G2yLgl6S+f=cDn6BU2RxQ51Zt$ry^s*BdZYL5hiVR}_D>RIUFF%9?tf@w-+84!K z%7yQ@E7NDh3>5*i3wMgMtE(z{&aXC_DxaqaI@+u@RRkZq8Mo{QpU<8dsLRT?_B(f) z(LkERsP-C*s@|)s*sf7aBNEb&a1`0MeHfLZdy1rZ!gF`Lf1qxiR?tI<2H_&s^M2>Q zzhXFnKD#Lsfknu5|AQ}N^L2CsPuFuH5?(#3-tqcQO?6*Fh}KA?qiAqwM|!_$ zZeME#5RIv@wivVXz|y7W<38OY45H~D?CXWFi0{Hw0lwn=fKj%39~Q9!nlI-5Cujro zG&yPRVV_p0HOK;^V}@%_z|gcXP$Wi>@I+GiyTBh3X;CWgb{&OE{y`r9HKTT zHQZAB!ooE;|DeTy%;GR}qd2y(I1OnZQrXrzBD}Jr zeeLn?MI2RfSN_V1#Tc+YE+o8@xRQGHJbAJ&OY|^}ICFGRMLRrVfx^2`otN#WJ!Ohk z>wT`MoG=g9oTMk=*m&lC#Zh(}_ICCy3IY969YTS2TF#z7l&wSWLj04@TPql-h(GQp z;Pm$sydj4=XefMIgsuJEw!hOGF8Fc94fr@`iGff3Jp;bAADYUh%8MGNbZvpCV*|dl zK9VgE%>iGX#3+?4GMUQ-=jLtM2D>I~Jg>WXV|VRdfXFN+=tEzS>K)nU{0Hyd3I8Bg zIpg}(HgMK~g6Yp6#_{%s-Qp|tb~%qs4kPgJ$wZ^dFBqW%`Ip{?xwql!KQvb|2pe#G z&ME(902PIQkwB2!#Ah;z++i}EwQ|nDpPJquWhLOOvp6?t)im^_gk5)f47%BC2}8YZ zkkdH$g|_hJHwc6D63PM{uWGpgj=~_40pnJgdJ^aFs6t5kweyIhBwIS;lySI!DQM@{ zcf>D?=HtUsNYBUl+eV29L0yLVC3>D3|DL=Wji3QhGZ?BeT;E5vUS^0ary{^|m(u*g zRYFh7=m;I)#7yL?^TcSJusS|#{W;5Ah@Fhi4W+V~e+D+}Giqe0X;X9b%*fsBfdEEn zM2iV2dcwkR>lyJ%lk5u5nZTihskUS;L%yDCBy27fJt#+GL zA8U$>5>JOtlqt(_-BEWC@-(i_VL=DmPdUmCDfwlw2?&vr>Cb*}qdJd(Tb$lpkG9() z^hf9(NSl&F_^S-!e}QT`>UkHT&=T4%y}eBYbf>DX`ZHXc8bTj?Nlpx;30(ai5xoR{ zxLa>uO-%)O8wo+|WD`c#2FAUIvcFMXQjyf|FOTBO`NN?JpRQC%`{&?hwFO+`u??v< ze9%Ele$RrK1;Ib2KBhZ0G;cDxhkcz!ftg|oGfV3SxIoRDsUXj_5(j>!v2rJl_?p3k z0wuR3ytYHRg>tKz`*j;DU&=GD$39!zA zHecRpU~&Hxz>*mUYC4hB;A^-D+}~Fdr7*lxx>W(QV#v0d&Q-zi<(~xF#0?}-T0L0! zVEp@a=C1^xK$TFqB-1=FrId5$7*$^J5#8O-elIpzLS?H;Mo@LGv_D9JsJd7&y779y zgaZ6F#d|Ocq%{jTf<+)ziF<|l!EBN2kGcAANF-05umwMnARw9vwlxdO4flN9a`OZq z2sDGbq{%jpd2tq~lXtim(*$o^{B2NGdjwq-JWSLphC$|!KRf1j_y5S6Y4@R)kV4oP z-JC_xznJgAsc?w2;zv<;32G?^M_2JHnWPEj(eF+^0F+(rY39>DteN0dnw{op_AA*} z(U5*#w8HIZ$3v)9eY&relRS+Md-V%vf$+?w;_}(%m*)kG7MfcK8e)E25Q8pYmguLt zZl{mW%6d{AmQuoWFR1JUPpciBv#Nsx!WRlTi+he$T6P0Xu%s>p!$=uKh6^xa5dV|$ zG8v(@@SQmSsfB$5iDYlRACV@Z{}*>@3Dd)1$x-k`LPJeK<>!}|(FV;35r2K_h@f1o zo9h>4%j3q74DQY(*i2N9N$j{J(&BQgA$~wfm^Z69&J`o8FPNo)i>A;8cf)Egi5Fyi z<+oB-T}3kTwi-ss>Thzk&fF$r(8mMsA0+V;Sj#kP+o{_U zF=!hiW6WXc**}EjOEaf*7K+8%aKW36ibdkiroNuZ@yl+aD`g{qN{VdHocMhDSahbY zU!;Z$+feyQ4yIl&E3Si{#^m*0-`*A)1+OtI{t&I>VHS6=Jwuq{I1fkkPu^V!avQ$o zztO*LGI5kp2hsnxkbueTbc`dp8do5!NTJp6$8QW0QOe@!L5`Q)4e?F4bF815`Y($f z(#16`>03=mjqu*tOK0~qq9pU=;9mW#cMn^d*(#&?dCv;vnlX?F!DnCp_sLN~1D^}o zx0RIMUjJ!#H-qMzf2Bwl`p?$0zY9sY()68e3lRlwwXs>(Z#>dtmK<(LrwJJ1+Yu@E7NjI%39wJ-w0<`p-lyo)0gSh#~ zFvrY7QIgX^m zZ4kZ6WNgVsd$ApS&r!hD7MYfcVzP_WF}&25>gM?U`CTy)*1Rrlm}F1+oLX1|h=Ky> ze!6`K2&r$U)6lrO(dQZ@7NRBh70JvAWS(Bp@^-6ZYXh57t-2fGTH)E52MI;j4WWYF zSPWN+X@0iL{E9q`$7_$_kJ=NOqE;Y=j3~!vVj_cx{bv*i1im|!mG|%9{v!_JMx{Ne z3;S1&_>1-M;pahhi5Z@=?BPvKkc*nl+`A(>ofQ;9F@5KKW;SX?m)lV`G>Nwu?nkAT zbT<--5yj{!I|KL}1ATMg^02jfXpOj|~$PhWLZN^-0iMCT6DHenCPO;m1 zHF$aP>^522-~3%b6?V&SnBHrQkqe`J!w#)ueputiWE+@rXR8YFH1t}{UNo9*Y8#&0 z;W&}C%jhUT;}YRQtg~*(Wf2hzq%dK(iR593*3UkSfZsjapS|Z&kY|#r-j<@Pw3CKb zc3sl1in=_eZ^J)NiaG`U_$2|_9d{^zv_{DHhdjEN1(;kb>n`-O_vz?lVy+yZPXyBr zU9G{EK}g{R;#=y|=mr{d2cA-vv7V{ahHQ^Rf-q0!%wcs22E6&KSBc~O&Ce*ZeK-bh z&NB!yfBSx4KvO~8vt@t)3%{?BdI1H7x7;N0j@S3rbP7)O>U8dqh~0bb>yaLL-OpD| zD{{4uBjTxsWvduvD%mdO_%PpGoYr$~f*!*-%JmD@qtllwZP*W1I`hLds{K@nK-QyX zj~AxWdpnF5wZ4*$kE~EA87k(lzZT`VPOOVMFf8J+yd6*b4f2utd0!< zaH`$irF{LRE2fTK2{9|ExhLaPRkc|vq)ol!Hzu&jwi@o7-iE07t6YIku4Wbz;SFmx zYB)on-kZp-Wss`Z#Uei#_|Gm89KOKQ+=D7`mWf1A_ zJFH*8Xp$0H2X`BW-mrBn$T&yZY^irq6h6VxXeAjye80d-Avtb+3sl;zBtfqssQNIQ ziAT!~Q4+qgNbc2Sxb-`T1~F|}bRy?@*@#zDgPvx}!B4au*9a>>sq}Eo##yw*OtSX%_6EBk z7nmms5BAXk+TFvT+rfj}ih`qyj%w)L#k+VzHM|~MKQ~L$4c2%^(kAdVMsHcPkde&3 zBfqbkkarw+)OQp%fA@HLPwaNq-R7X#!QTATY8Ym3X$6YDj;&g{>Fl!0mrgnVp+5RK zEK0#KFE5DFzlk^%KCur6+uiVu;J<4i$h!+ktb-W1rbBfrO;0`q$ZFIF!pa7sGDvo z4Yq(v=ROngF81E3~+WYnQQ*rX1Wx)*ZM! z1ar5fF{)B;V9tmTX)(TvKk8u}YL9L(Tdnq;uPjFraIOl^8Eb@je4rt{RD)hir@ zCf?Os-s{UOc!$T^GDvC#Zm<%zMIN~$>=!kFCu~aW%y26tH6nQ!dRqHk;=vr(02pdFwSBz=A_ijET2d1~T`B7UNN=z=4!a^T3>`bo&sJRV#pATW)1r86(G;+Ht^o*t_ z2sCKUAXESnpmNwz;GCc#eVReEu5YA3U#ieuU=&Tqwv$wBy~niy8BFO&*B}qmw-b{Q z;%vo>mwb2a0%EGGhdTE(X-`+*--ao8Qu1|^(rsZ%19Nsyj!n5g@~No7B;EKaV#Sb6rPxbJO8U1Q(coN^F)XNcSIeKY@AMBR&SlzicMR-8`$s1p}& z5^P~1KRyB;R@*Zgls~41s5s`ot?-SsHlR6*SG8<3kBWgX$k$2;R&qJIhU)!WcRxJV zGbTV(dIa#D#&*l-+p+k;+Paddo`+F1zXkDOJQ7nDF)zI&R>yjd@m4yL z;E_XAdyFgjmHyY4wR6O&efjExQ{ZY&$!SqetI^kD6VSBA>rjJc>FoDQ)%h9l9MHxg*YY z*B)Jt=qEowMjF7lEqLIPq^8^SdgX@nTKrc^@$`ao)PxJdxB1wwE}K_(?1@Se>C#&# zAu5uCZ`%^>C-L(yzSLH2lRC9jz^O%7!Tt{|<_j3Sb_sYN?JPvd8FfizvN=B?0~=J{ zt0nvkN{8s}>N2A9jn0q{rgcizkmuSL<2r3->4)ttH-miJN;jvv4i$_0QcsxnyfbpA zBciObBnxuQrbj{j!4&l)IeeKtHz?+#(}pReymP#c=1`E;^q+Iyw9z;w&Cn>?w@ZZg zzh|Yw)me=<^W}S-{H&ujc$BIzZsnOgJRi&V?CLDTqY|(QLG8YTbnc`D821Zck9$no zRt7J#NR|(mU+RBz570$+ymL)T*N*rPQ{00c6oRGHlWNW^7rK-pCy4k=g;{pZY4R$~ zzW;UdHHZ#1VGh){FNF39bAgOw%_~mQ`zW~LSC0g&m!r! z@U5S|xiSk%7h}(sIb8{5B(|pLlchLQCvrl`lY{*|3YL(;8hb(Lq-7-~9yeFy8v9c9 z_7Q^5Qcb{dN@#llPWJ>RlNH;YFV??S ziS#8xmaB^O5&Soo@n=QN{{p46g8r}2hv;kJN<9=ydNBXFc{Dai>A$1lI?3sKHWTOq zr}LbvR|TvEUaL8tB>|=pqPiP81h902C=6Xb2v8;Lc2>H(Wwb3v9UnTLZoLq@>NL*x zcb~8G3kULR`G!I5OF22WPVEoOGHllXcx&W zYz#kkog=xqC2yFx&@al^2`!Y$zv9)yER*nHjJj@ zP(q~z_-^BIR;CXN@t33YYQLrn`u4qT26Ap`g3Baomtv2k^m7C)nFvzH^ksPj<4gj7 zYC|&F_VJw#{m?nYJK#d)GuMo1y6!W)`5Q#zE0nGyorA;O{rju!2Kvj~HT1WSo9D~I za_7oQQL&`pqKgD|e7$FeTI|R5j%L%@;KtcQmxr+g*u7r@1DC{p*gk1q5_vE8a;3}r z3@$Eth-!-Y{I1SzzAkS?r@;n)1)RfJpD6hMQPZD3?BWyQ3$j7d+?RBI4OnQ(x32$( z06E2n)}M8(VHWfH(QyDVkx*xdC^}&V&;p=4%Bq`-Lp0_gQ)+p7Di!(GCUgbUWaT;T z_$%KP-CDqy(o7Q7}Yeh0COzOxaGBUk$eX$mAX$J%bZ@sN>3NaxNx zUmJ?i-aqV$G_z#p#v{{Sr;)Owyx#W*9cM1|3t~+kovv2tantt7w7L0lu3|lHZZ=jaAfq(ZRUdklxtyi9-+#hP zh#OjVvB6#k5W&MBr`*=pL{2_6gnU0`WT37Z3BREe9H>mfj=`qp`lGP%=SOKz$AWE5 zM}z4|XqK>KO1irv^#BDG3{wz8b~3Vu14GldVu;zEc?7STsIDKD=e*J68HowBan(}?f7%`;SVnM`~O z%M?hbaT49C;l-lb0=L`LTLsoTG#~w4`t->0@nxzex51($pB_Eyl-OP8(bG+?=Pk$B z#$w{G%ewaiFTU+>VXy8DF=&?;G(3oR#h-H4Cx`N?rieh6Ok;qM~9H@Gx!5 zA>IAX#kFzl0!&izy`^PrUHLM-h0ui#NE|agSvYO+%!?b*lKxT5-o9#9y`q?aQW$S) zbh6I#{b09*+xwmX$S&Zyq!-Gi6-)TaV--jlwMTRJ=9J{rE&dYkjA) zFp#Kkhc(xitPA41`-5fw@8ZE1*m-9qkO{_k{CyS9sS$KcqwNy?Fp1#3S<;o5v5vLN z$670nXCIxNFAkzB<$Vzn*}O&D(7rt`2Q$^!K@v3VObd_Z8#;^e+=~=Hi#GpENHmWz zomngL%@9#CBYeuqW;&Mn+-p!AoN*mT7UAs)-M4wVKdyS92I7_A5C;rgnz+SGh#x$uXQk zXVIHKmOgy=mL`-k@uR(p`Ka&O;kk}7_M0VoC1^L^#X94XW_aqt-=3b z4z~44PHMD@EQlj|UP39wV|eX6LYoH16CxF>J*`{2glJGGwQa2&*?Z$5sc7@GqPgh$ z@SPboc>uotXq7quv1PDdeD8scZ(u&6!a1DeQrgVSDxoRDx1G))B%BhR*dRQd<^Pmo zf20=Q7bFn>j2Bu`|4e@T^_o1Sb}Cyx&rZSG$|YakDEAZIJhn!8Ek77plmd0vI8nQ@ zkpM7z&`KA!zZ6*Yo{sN$ZpR~&Cj_qtXG%iQtxKytKNiG_1QR&+-mepU3`5;Up&d~u zZ^m#8?CQ`c&exm%KG`NRiRFudc0i%vAguNJ&PXYQpFaA57bcdzh9! z=uap_Sr}WEO!sEEm71)Bq;D_F2e&me_hfj<*4XEl5e_u!D2#|rq__NBv1{Es#J+k! zl-S} z+tb5YUH}Bl(>Ji=s~^Ba+gERn(S&09>Z+A-gf$rB6A5I$F}#+6do_6_rt&X_@{6C( zV@m9gJvKLZ=>L8tetrIt0%L4nlOC6kuM7STk8D-V!EtL`^Rh!}*hSrerMW&Tv+5Ab zY4?WpvICZLn#5^r=|+w3?$|xl_kPV;%kdcOfgystgz>A)sEk7(yzLw|;*1k4s6L#SFRcMwzEz9W+#zZJcjp&jJcz=cZ>vLaTJsc)em&}za^mU4rL-MhM2P!o__I@v z3MqX@cspcJzo}KxpwzCTHbEqOC)mi=;;#{9o2wPE!L|jFoa7XU4R(2p>zpDU)%OW2 z_cuZII7@u-udhS1AQ03$biHR+@iymTGtVjCJ9aUGQ288tiKdSWsoudR&>zK=zArSN zM!WCqH`KDJ6qM0xM+^ODd^vN}T$hXJ6QkkbSB9Q~zaAAFwCz7mp+|IBT|da9+-Kx2%LQ+pNmv@ZR;-?c=0Ys|)823h*|sj&Lx zd|HYnhp5+2?>(dYT2R58YpDoo@`v8<+j4B>q)pduZ}!X_&1jXhk*#C9$PD5k=ko~` z4+c5juX559hF9&Yz_VLdqOnHlEZxsb=Ny&+g)?YvPgPd?^P#Vw}jJRg#brLj&-|)wQn;XL z*U)f8qczfFJym}_AJ!jVJ;dfhD!S^`|7aY2V2YmtiC=I<(EVco{MYw?!wr?h_M`}G z$<5PwNEL}$h(H6D3g&EV(WQ^6M#Mx$s?=C6i#dhm$GEC%+IxTxh||?q6A=`bXCVS< z{B`;{56A`wmEBLt%;j|-Dx@Y~Hp}%92bt;b(()J0l#mk;k{UB=}XXp!AW_M2_>;@F{aGO2n zJg??872n(Hn6EfM6cKZ)0he2Z*Y2TVVr&Hz7*)waj;v`dAH%BECJM#=Gh+)`S=E@X zv@P$fOMCTBVVeU4MXNT#0b(D2DW#XC6lP+vn_oPqdF7;Z^`1On5kYQMf7|C$s>Dah z_+6p(Okh+j&o9G{lgBhl2Hqa!*?27xhmBR-+l_L~<#ztDuSr;jhlOyzruF~VoIVBA z0UIgQlDGrJn2cx-UbhtSF5$(Czi<5wrSIsiu1B;6!)(gJBn}MZ(w=9FQEE#EYi!AZ%8>u% z1{Ep^=P7#-*9`2{?Rddsy&l#+)2Ci*M4e%ukg76}y>d`sr6*=So~Z@#D+9$2z}-s^ zwdfEMDRD2wc2uo0R^n{ z$HSLe5PMxO(EX*6Rr+e-=u1MS+36vHPKX%4ac6*G)j=2gQPZ?T2%BBU)(#hn#aH#If4mg@{*2IMUI8xr&e3Ii<_;PFln4 z?cS+t!UnEVgePN_eFCaxDA0^~r=Ikd1!CNhn%Ud{vGQl;PcO~22YGq7nSp9{pkKNU z@^u6PsL)Gn$g3;M;k)u6f3%TWd(`*;*n7*UD7Q9X_=qTA!H5bdp&;Gep%Q|Gh;$4g z(jeW87<4GzC`b<74NB)oHz+m0&^5p?%y*+F^gQ^S!}tDq*SnszT(04ceP8?P+IzpG z6kQ`xRq=?JT#9dHj%Usu2eMvqV`J!4T5j|7F?f2uO)~q{ihMJmG3WNB4bvi-f=Pqz zr7P08>%!eem<2r_)y8i7eMBV}4!HP&eWhuJ$sS4gg67eBP)-o%Z;F>4%=JBXncu%+M&*P^ zGaP>W@B>$u!(-hiW38N`Em4Xf4zCtqJ z)@wa5VZ;YgT&YfTb{uC*O6)KeP5gfyhNR6W#zqW9!!31ZpRzjITcso)+XPlJ+hoC z$YM3!Jf5M`OGSpAC2yny(P2z8HysrC6aty5<8yJ~q7zpV7JW*R+yD)(*Bmu%Gnxo_$z@W(M;Ph7)XGyj}#eLf$S};s?Roc{ig0b6*zu7XZvctZzPA;5BdLhEZ8$N z0{!oQvuGmqcKc(cIo7G!=w3kE`#9oNeQM-)?`EYOK=NtNTiVlI{2`=&=ZR}R z>*8gzFR|Fpr}inxFn0c{s>35|U-s$`OLzNDp90AOiqk6JX{x+Zu>0*&ghnWFTUY9; z@IZJ(jWae-#}xE!>C^NPRB0agIue%s{yq77Nsssa_6#&VDAnu5%v65uZd<84Tg{d3 z^J%KCW}md(+qA8gB|OlC1b6a%2IQvtWplQHjfz-w`oA(wg>UkQ-)|~hT~0Zwq)gHI z13Zi?k0pq_+8vG2Qh;Q!H$ZPhz2f7cILAhbQ!dJJXhP<`A1}PM4_`PnAmQprNhO42 z-Kn1CZ{9d9h^`QvR0t&3zbcnA=Uw{v>&)-#A(=NH(pR_RlPF)SxR)9Nd3pl>&cUSJRw~b0 z{=t2)zjc$>?R*%P_=7cw3^~tt*}VStITGx8Jzp{XTR-BWnC{g?*GsgtAZVFqqLt_f`bgKQ*qp*5)TnO#j=XSw#Oj$VBT*NllR7J7f z!BwaUePzCk`BS8dZIP^9!sDi+1ek91I_?sTA$9brc?wX#5LX{gak&?<=2y0d{$RA* zYQs{xc|pFsXF`3F^c|F9M4ZE&4+#2imf>c}wkLC1YAcwG2EvMKqw3h?QCMLO94nEy z-CQ7yJEvn_eUgwjK^aK#BF+FkVU?gYML(yPK*>5TaWChCc{vC5@S&2lH#FZ@FK?EgMAoN+!VDRWC zrFx<(>J&0P$c6UQOXpPjnUzIVz$$48w(|1&-b&~Tdg>pnuYeFzlZKzW+#3_(s~lGD zR~ip8ix34`%4f&=Prjhl!7z>M7JisIRGo}%;Uwhq)!0TK)`YR&Y4QTL{%hX_BosIn!mU+UAT-oSo=$;iR&%73PkW{H z)Ab*m^5+zwH{b{>_-!Q{v)UPd#(s6om{_Sues~XOwx|$FvG((0nl+TE+^A9BknOvu zu4>!Ua?$1S4JI9=r(GJOcmwTu+t|+4%On#J(3WA`L*#)ajdBzn?Vm?)GDI)L$$u$& z9STJIWAmW6qI|Q7o_k!2P4>6-d5+ZPGXkegf3)3M(GRy0t*+jZ7o)Y|Fvr;@J5>_4 zx(FO1N}?n0;olih5=QMccA_x_&tm}|gMXH?$(;)1fZzPX5T%9R^-3o-(h2c|AB z!BTSs>zNE}%Hslt8V;U`1V)hGj2q9w;155%+~r9zxSIIv$1T?f}_T}Ad8p?dM(7e*qr2s0dq#LOzGSs&hQ!XhDL=y*2kq0n@W zg!4z*UGt3Bdeh+qYR$k;qu{^wy`Jjk1JqDO?ASkF0qmwKUW?LW1AcyYDh7i|v$&j@ zSAW@CAedLc+DLvXDxWytiFuQDO89GL|5n3|O8V({Pd8FlKlLc!jzKKWWxP1Q$Bx>D z?ew}FDuRJ7ulIfL?O)ZHr~^5i6ftuy;!~g;kAj;oB`5MC4!5T+8JIq?tUD{8_N>A? zw6?Y!GAlv~9BxQE(jjkWpx&`_y)x$rQmn{7QgIcL@to)VB@$B_nGP96lgJq+FYMM@ z+2F5+>G&bwqKRXYtaSKk*ylXAF5G)(j(8M%O^<8Vbae%~k4MOAE^%+a5!i3XMzx|^ z%X-eu)|BD3w>o4|F@DkUXk(a2(xjm5W%+D>Ri~#MK1!q-%oiW>R5*rBVuWw4q^c(N z_k?~lBhygUNu2%S7dU_44161P&*PmT(qr}0m&Jk2`42lEDn462(mHIk-dPY7coQ0$ z6YB=JdJmMW%#H4>wQrEdY`UjXvu3E%3A^3%0|>_RW zdbZe>QR>4}gIRtvC+7ATW-Q}ZS~-12rnwuR09O(V%+is>NHHc3m@(3;=cDT_;#TIK zVBVcKzwck}isB2L>k}Unz@H22!8<+W6yvg&$-E@z$CWNEC>*#5w=Bw;4 zhY#LF^UoZ*?cQ7sO44*B>T#^qupi%0cNRD0+FsFLg@$R5yUbv)zVZNF-)*d0=``Cr z5^LO-Qzu9j#^-u-U5j=^{TItJOViZNajTzM(XKcaC0r$FzwBWwJ!X~YY?V7fQBF+A zFAO!({m&#GfzZ){zXY?ugco9Ymxn%)fm?v1$QP+F8=bqv@}jmQ9e6(&(wkorv3tsf zS#8^9`x2Gx@GxB%_=wQ{YTClqC_^jFp{MZJoP5f19YrGL%}e^gay+xUJ`s3&F;r$) zmHv&A-k<54o6K?ysm>0k*>O950&L=}-ncs!N8ox-csY~S(7n>a%?QfRpp-7xxUR2T zXK0OPFRiXk6Pz0LjuV&Q3Q?LL7?;@EiNqO-&kq37J^8YX9jGHa?IHQCSk%(1FUo6{ zJFMtJ z8x@6Kl5vWftf?$2*PERuj_4h!0Ei542; zzEoure*BNUMPCFw--MvP5wfx9@N73ILVjOL;bDX{$sjYjC%P48tJkC}9GgGv*Efz9KduM)n=7%|jX zLJoN<#eHSGJ9Y*P{JPmwQ4dz4Sf{7R+dM<-W4;2g>IC2R^@T#x~+e zk4;OF4F-gS#x*FM}76Vk8RHvEYZ2{LY49vEa;e zrB$B8&4LZi3a_A)x?*931hk(}{Jx=fkDafrb2ZBu_r1XuuzErn-=f6r?G$}iU6lN2 zcMY#wZ(6&05;0*C6z-9CYI9ix1WjU z)k<|Hvo=QsKevS+ij9%Q-19kxfR0g6cGb=PIXP;QMIg$cET_$B_jX!q+)SnOkA(fb z=i&`vU8(4uLo6}motyXZK0K?1kic;u%>jH4yQwbT`6GtzGYj59k zu@Cy7$zmy&hA;R0jk5O(YM>Mb_*-4tQ4+AKO78)47Pe9+Dyx?2P9a;Gfgckh-@U;X zvA=nIIKP4jp7`tB_}^!rN9gnPw{gDhFSqpvBab++5b4XEC>){^Lp6T?ch;ZAGk=NGu|h#-(Y?INjUT6td{6gDCfGD7XGbVX2_ z3iz_Al-R3GAo40QRK1n5{N}M3qV(G5T&NA;S|-ydYhfEUB@Xhrw@EiihFIUh7X!ZIbxcuw)7clm|wJqzk-Kn%4W$tQvgsMUET{@nM z44S7FBiP{6DvcLVFJr~<&ga^km!YT44cIuBnIang(qnq-QW}!ax0mY^8CHf?2bVm@ z1c~VZu&P18)`?OZSfsuu?^GI!FC}^- z|A*pt@vYC%3X82KBx%*f^uNKsr+jy5PPy4G#}1y!{&BtRkDmK)r>w9W_+7BmzQ)PV z^u-+MxOX0SJ_KeL(^mPJzid&ZU6A+Lr@Fba8_KW!iM~%uPSwGXjK7S8Y!WEpAbnGa zuPq(X>AIJgr3XE;cJw`9qo4PC5Ab^=p`)2xce64j#xQiW*3(jdCZV5;Y2(R$#Uh`& z&3ot^$1=4=^1F}#txEvHe`BGnI2PN$MS?!;7~56XXP5IMnMQ5=I8%sqc(pD#lL1bT zF73k(_2H}Z-jO)`7IU|!p!RV*&q7S{Ko=F>5vn?qr9I#;o_^x{@DQ&O5=^`Z?7awN zrKB6OGnT37`RM~b-CZa2IdhFi?A=*xzu{;Xuq1nm1?y+=OhEMlwKWlRF-xQjlO$B~ zy`a;wg*>b6$fqA&ElUEYfb->2^v3vBrHUJQ-#xc4Y1nJafr&bNTsMA5F#S;v|1MU) z3zyGx+;yNV1~Xww!54cu*A?G-t#L&B!4UVhS>tMQ??cuuCn*D3>yKRPv-rARt*LAs8FcifxAU+!pqY+t ziBcVXOq-31=pQ~NR(Ds-17X~pfs>XHS7B?~Mc);cHb5l5tPCNV4W;SVqLe zmzgFa&oox(Vxn!Yp5J1pfMWPBb#x>k zYuMsNF^$rP^f|Dt59e>w_SxP@!BEF}ReT=Ez40!4ihw}7xnycz6O1f7R3Wzv#B;(_ z!mh4DkkM1xa1%z@EP3v;5i*Pyk?8VG!Q* z0MNGo=Rj{4BQcVP@oDa45P0LlcjSu5yvqnKIp6tfCHkH1kNF>02kS-|g4Sk)ybAZT z?N1M^5!7AT7)aZ7vpTW}qxZ&eo3oU>eZQONNrv98yCG5&7W?%1!2wF!N z2j;q6e+8gg`4KvL7ppMIdk}2t@J8$jdc%IBPusZ^HMV0nqdfHoknlW zHgd*sWb%~=R9d6k>A&u)!uuZ;8mt{QqbQ~!ofDkOWP!iS)S~vIO*9}|{Dqlab#PG; z%=#Xz&KXuD{g!AC>dJu88Rpl*Ltc zzY2BpX8gG={wF>=X81qU_n)@_y51<;s=q2d@KYZBDUPClvn`vcD7lKkT&@ z^)vd&RDYHJl%;$BKLPp<>vJgfoUKXbGv&z$_usAbi3#@|anAz9a9A?JLRr+5BMTIQ zj_C~e3()72gvN_VVKJQlHc_CNRI6(|eMwMrn6~-me|z#j(DV0)>#hQsr|&;}$nJMDT^5Uj%%&ELstj8#Q#P$UD84zw10mo7Ebrqpg+7pob{YH?~=oU zY}_$}d%ndbB|zraV`I|x>lVgDjxc8;5EHFhVt@D@fABztFraEz~G^y3a zQDqc4y!_lr=KP!I1Qyai!97$tc7K-m_*c);=7ovMahJt%=^YDr$Gf`TG`2L80QT1= z#6U~Ju@C*h9X}C4ci>EkKc3E&?yaZ&HQW4>K`F;S_7H2LP9S}jL;bj+B`KeYjWu6V zDvKngYJT+7_&+n|F}I4&xv^`YY7C2^;9q?EmqQp|8Di8Jkg}sr-LZh>+sOWc;-DdO zHcoEE;{eC~PH_{F*v?3+kmXCU?p5x0BjBiiLi z1IA-NK>VtX3nSZq({kuG-fBUT8DiwKxJUR)sW=9Ntgt!<_B9DmXpMTY z6^2wW_x*7n5YwGIQ{`zt7ukY+900_=b*HQ;1hc+W&kQ+VJRbAMPV7ilxUs+im}L}C zf67j~+3_momlNU{X1p*U#l+2Cyz1;6SQT;iIQH<2zBZ(vYGh<{EC|Hjbv)KLAOo8! z354hsl?7LQQVPWkGl32>`eUE}B`hxI-I!cdCd*>%kEg#RRo<}iN<-kW_&pBpBf-6J zR}2erGotl)y8lhn|3Thq(3?7egkn1&rSY#n=6MJAzAjkKk8TQ@!J3EH2895>=G2J&1# z!UIgB0!TXiYwXqC#J-((Nk3XP#J0UYRZm$RVeTXr=i>BAt`Q2Ma45-1(gk9=IcN57=U$V}ihR{qQ9fX^`L83(BP_LF zj=Jq!d|$tsFUJXEsNLUsqPK17KZjTCufDf26n|T$F>%PUxGETFY8i7P^h;xDw?e&} z(_aa6T^^nB9ciiDKrzYo@rQ2M&=QK+i_CVoJC>g|Eeq~)9wkI*is!;GB> z5>%B6o2=7V!tgAvYS|T0pCFVchyr_>Q0HEI0rNY8Vp4pR%rB_qM6}(cWm}%TsBLtIKyEeNqsniPRR3KsfUa1c##Wij#t% zNPh5gCy636cDh!aUz8CGEkK!_TeZ&>gtsXj3Xgi*yu zAiNnek7mXL46bq!D>PJisrZ~bvn$#%y35CLQm?&!d2n4xrSV%S=%|0mzm00(r z0T3oHe*-~Mb0YX8nZ=uI$ym_%stPkcp<7ebAjpmN8J|%}b8>cRXH7dx8GT{ukqv?4 zGuy|c+Kx<2T>xIz(~ZSL>(yq-tl;zqLe;kyC$C$heQIWxyYnrxvIP03B;9d(f=@*2 z*)uAP&fAvOjFSPzGF;gM7=ismb@55wf|^3ZrB$g>7SPFnyna4Qa%1tSbuf;U7D!R< z(Xy?Zd0eT1xau&M04b=OYtAuw`KbZslEXh3?JuTZzlY#Eqn3qS;sG zgjYpT?q!DL%~F~)BMc`}%#63ckJqapxg{;xDMBkL#68stLQw6;A${ftOP^duNKwT! z3A2pk3_7x*PXz1!5w%Ub=9WQntx5g+_!5)Zn;%}=XqZFY7lS6X;y&9}N^A)h7%t^X zj_wFI_XHk4<>SBk&b!W(I=ANb+@r{68mh>M7-x#f%>%dO#Sez>zFZx;!3lW zRP616ltqCP(3QBj&XgvXW+s`^?>4MtnZMg=Q0V>pG3v$9uH2IGT)nv2$5s@ykYYn} zDap}m?d8vHPvSBFN8ST|)7B{eRF;qF3zO|#=i%}cx7#C`a4xdMA&xGa*c_tMq=Ouh z!IGnM?bSY;CmNhU&3~r<)3Rm0t+lw)rv>Tmft|S?_hfWt#3poFgn2*b$?&bSq9REy z<5Ro!-88=T)T(tu|6aY+b)3&K+%WaD-5hEy4ZS9v(6|$pUa8!jp@&l7hF^uNmKupq zW?yfLe!3mRKEz9vImj9$N^B7;xA~%p^54@J_kx7(wGh>I72cqiI8s6)`l~)UHvaR^ zfG~o1q*!~QG&cfXsYDw08EB-H0O%`r0+ryo2Rd?AQd%H%@{2MW%pHMZUJi9zuneSi zs$H4xa^n>(>}JlpdXuOX-Do70w~R)P8tDe5o`5a{QJHmWA*zJ4g&;XRpsT4qHM6Yh zqeEb?f=M)LG=jIP@Fd(?NX3>q)9;F7#w}gEuv;}>*}lY7&&|1@;a}P@r=(_?X^5To+L#d$}hke-;~Y7_z@6}FmOg=srA?w@f2#M#oY4b1dc8<6Gt z(b3xTEJL2WGpkYPK*5=jyFH#0ktQc-v7x#~Y_?&_GCV24MV%Fa;B4I!gQ-K1*sjgv#IQhXel;8^*Z6@fyxgT|6IPhf}6 zvarp~^lQbWiGKrbP^#AL zyX&<+HL0%>F~f_-P@C6beJ2rBW6)4_N;cGlJVdqh;;RRgF(E4L+xUxEH*`_U`dx6I zF-*TAX3TJ`mn_mcN9hC@JayhpV9sOc@|-AsE3|dvjgJi=9*2$HR_#K1uVyb#V36kQFS|au1Cb|V;YNdp%(J> zlF*b5ysUImGHkVc2nPGiNmGVY4Qtx-$Ls1nL4zi-XI)Eu^nms_%wiwQ#J*L z!4y3T7m!%6|_Y z-VjMSbT|P=fzDO2tz^Ur4HYFyXKf}_X)s|Xci2WV=Y?oTQBCfLIUi@+(jl@9liWqB zqP9|!M_uRt)-(`j)XiCgjl>@l;>HGzPV~7r1r#pak6*Mzyh*R3OzK)xwWsa4Gmf4n zXSHN&<;@>Z@9#I9jFmb;D_tG#nFIpl<47?+lW~B=*C<|(Hh1wdct!W^S0&YxS`Ib} zgDIcZ{-&M8ZEhX6=lUXpT0cWq0S9LZsv~(qUwpiv>AFd=~RtL^CcdG zVb29zbDns;fMfF_o^m=|XI?_}17zKlal_7RsP7O`me8rP=wbGhhz;-}f6>-=EFb1w z#zHl|Qiy~AHvIX_XZ;f;Qdj^ZGlbiBl@mDjw ztJmwi8PPBjkzOw12IeZkmv2Tt*;w?ZiV>k`&wz+E7jvHgXik>3il|AO5WLZ@__W&t z;GJg&Y4e4R0TA+3_UfwUV96qPo`~L^RukDC;uDyd5nOYMNMTle{?=F8@U91!5s|I} zB2E?&F8w(%tx$?plkV+d6H^%P$b^3XO3UkG>2s_Z^2|6_byLzz=5pLqw9xH*@{!(+ zVkPa5!`IE*qq-D@$s$mDE-3Tyis2I&0(WelV0j~PCf>Jst9v-x(S6TR2i6MhtQU64 zD^oo%^J|mBWcwbLG5gx2gTv)!cSruUX#M6neA(%)Y#KfPGmUN4CT@`{*6`tq?W@t9 z9_*$RrMcNH{YFNgne`bI0(U*cDS~IWoQIdjN}^8$yu6QmyEM7EATo&S zKSr~X-1))yhAWntGd~3g2fV!SXjirXZ5U8(xc6e`Bonymq$_I?BL1<)9Ts2>zMS8? zG_mj3nPr8AXl3MiIZU>jz+S8%o)tWgQePZhH7Rr*eR=|qHSCNC8IC(4Hfl?dzDbZy zKVj0V{N5NQTv#jDPh?c8!=T)7YzqX6l-SzS+FsZkcYb9D``3J^YYa&EtK6XZP|~4( z_<8oOdatv2FE-In6$VkcUr4>!w&e6fyd~o(y0=LXy@xpW%1mtkeZ%V})7nbeo`|%k zocjiIGKF8+rVtg+7>Y5urZrm&1HTA|Y1-Em??`}*B*WZk#$mkYj&B@4))lM^I5!i^ z8o3W2W%aY2yqpgYm>6Y}HEWM*D5b2p%!VKPy0z;wX^CIC5Y<$e4@+gmj$86tHr17X ztC@iq@a!2KBHPRdznMeZOATuH^wUVH64m2+EZH*%sYYb0J&QeNc>)*FR6&R}Xra3* z`iE^A3OmF? zNem(-wa69GSt`Nceu$dz38oQ#ZAge^t=Gr&6WH3#u*jW`l91&pRv}%Ul?Ggj07NS+$oU^D+1!DRC{e%+d$C zbXa?-{imsPuDpe1_}kbljpg&@{Z#z(?8a+RzCIpOaSIE3PWub_skLCxWB?!Mlpu&A zpiDx%cqZ|>>%#_3Dh?2Ig?9{Uk4T<$rLrVFTDzbdfF;-<=gyS0j|PsIh4`&28dNzW ze|AYZS=Z{K^wzEVLiI&M4@=_q)MIaYdSLAhKD*@80v~OJ3xO-%{iG=Cmd>>($BY-V za{^M36;pR{N=8cR3jTKHsGI-d__e3DqeC_9NeeIzWmvXg7cc*l4d~tHb#3R&Z`4mR zO*w$}>~pfRfRz>prBsVGtI7xuBJ@gLK`*<&&j=g3|D{)P^RZx-x8X;?m7% zA6OuwWbERPPfAkVR5jsxupo)cbomXm$fy1e*i$LVfV(`?B~ChR649OOqSL!>$UZ+vRPItuq7GZK$=8qOGetLEdobquBj0jkqWJ=$C?*Bh%4N@+-3WZ zfgp;+zT7iQYgTuX3msw@d4yCGNYsUM${Gt2e9M8aY3cm@~?&&!70UxYqw` z=a*f2x#4|n)G!)F?cyQaB?$3_3;biP;DVx9P7i^q1*vTWyublLr)sjiq&nNyE_Wzs zs{B+P^44|{+8?C#<(4%hF}P)>nnEg$XuaGAO%*nar!_IXW#GDHg3w3v%nNZktX6$} zmQ?L1+ox&tPkNc{2z%3N&F{xv3q^)&)0fLIA_R$&CuvqTAQ2>yjeD5O>$K-bYo^C3 z_Zc^-*V4Ox`fPPRpU zU+N#JAO=w3v{sR)oO(k0B2$!BO22i&0#ugDbNdtZ*jI2ff3SaxsI(3XK0QUz5~M0c zu9xqO$H-=h!_u%qY2X>lGVA`N7;PdlIR{-d&yI`I<7$Sf#lTn%vl0`F{`>z#fhJ}^ zV~imL&pXn>4DD6!wnYhhggM0_`oghBNgp^iN+c zyEmpWXf*9NUx;+VQ#t4&CsZvHN^0GsZIANYHfwTy#ueW`rK24@9pl~eYcusb7f7-K zFl;@jn&YJdV2A)%;u{Q!0fJdHYW-wj5_G~q()y=_>8dKj3v$y}hlDJbLwqV=DvHRM z^@+pG@nH|^6e_C4%M6jzX6TW~wX@v^-9A%=Vi^8pUlQkoNS<4Gl+A5~<#@GaDD*StPza@Jf|c z&@Clid<%kHv7V;CV1}*$iC)zn5t$x#utBo9{A4bU5eLuiBWomx)K|oa#La>SC!DfjyRcz4g(^Mx4ZH(TK(N_)4=AF+zli~+A zr!eALqIcCbPay`EA*=H}+vu&W6}_!vJ-r{nNWS_8%7sn|hZf~$2t@|Ud8rs&#L3g| zy&vMHJqB+VT(Ms@#;kgY?4=b#Nb_y)xhlHrER8Wtl}a>*l|(O>NhW1s{> zJUkjSB4saED>c-B$`g@J(hcPxS}9V(^-Z5}x;;7S+~LPI@+QbbpHUTo*_cCX@veNU z=pCcvReRb6N7VembDvmAbE0*qPcNkig|$h;qxNg(X?2SGPMLOTE^oKfU(mbQ{&){P zy<{WN>EQux$rb#^97{(sp!gf1B6C_P&TUF<%4ByF{DBK#$ZWE@dhNKuBOp&9I2GWN zD;al%eZ=P3nEK$2)12iw8C6u*E4pAiaPEtF4f_^TRR{PZjUbCb*vtq@e2X4GP(C_q zglpnz_s)Tr=V9h-6)7vsU9({aWTt3ILewcf0;P&s$xRn!+0XKia%jbFnYwO2ij+i! zGrN0Sv)Btvf@U=sB_m=&OGKIETRzMScxSc4><^o|w<&4pAq4T9)~@d29&;R?`}017 zl4CUb{D#E?tZ=?p+*yTV>{DE*D~yrU+z{Gxj~i*R|&Swid8*ZVND7im>wfV z1o-5Xvd-;7uB^xT5J?L0PaT3Da90_{#Y)@uyDkVEK8y1xPplxwQo0LeM<825u6Y2CHAY;R&8bF>Yavh_iHv_O7@IK zB`i)GQym-d|6sGI0Lx#b=h++u_% zp=|!EaC0@pFs^pRS% zlSWEc_$IqX(q@EEZs~{|#T=AzqJQyLE34mu(#|YJWP4Z~Jt$Gdrw_w5G(1xh_;qu51c-WIM)ZZuZ$#rzVhz5aH1Pvh@tRxu z$iFphdNBeft$rksUezRFBh2QE_nRZW36=ATFx!dWHMgH7LvMOn-MX>7t-VRn6dfu8 zo0!+{&1~_0uFYF*JFAZ$Fr8ZZDx!Jca0{BCNs@`wdm%?fQn-9Em46UQ*@k>L4tZeM zlsi8U78VT-&I)+z9sO8surwz}fx{qZFAlFMt5Uw4r8%ZZh)#Yt*k^;UeH^UH>D#|+ z-lye|lo_9d{NUSV2xXi)h_%Rmh^nX??-{XWlf#Bz|^ z$KfX@Cd$N9mMhy;e!Dn*G9TUnzJ|`kSayJaQ&Ew>Nf^ZHOE<5ZCWAm>V&@nxH4I=F~wh++oBzRB9{ma8JmtHMSKPIGQfO1 zW9E3J(Y5xn^B;l`V}*8ym-1`FJ~YsVH_&HSJGk zkkz<98|IbgMP@h1op{Yv8zGS0aEM?r{kh7kB6l+pr&!o_Po@Zd`g6|4$N7iI)n-z# zI*BJmL!SK{kz_Rlnj5h9NOGJ>e&1YtTtgYL18zGL2W;c=gFZ520r+bDa=@+ce6{U$ z#8(R=q|NVVV~Wv_9M{dWSZFsB*2}9nlIDAjT#Y@VjNXq4(R6cvT_&Q5!JiCX>=tbn z)c+c1V$MNTrC>7=nKbzoBpPg7kye_ZcSk&H0t(w~9Nf#<^9%j075){p7`XsuLN@2r*-Btf>oICRxTEq}(^0#TL1CFLE%W zDiPKcHca4TFPmJ&V}aVU@pSw8;%5i6{wOkbs%`?@9#%l5+uUoheGBCp(< za4u5CY;3A14u&IDdFR%xCuGBJG%zjFsO2zTX2GONO$x1ggL>UG8wv$;Vx`|BDE$i5 zmku6??~%8m_9R9g?;lFtT4kkp6Vbd}hT4}R3GEWNHmt363ObB+2W5e%&CZ*wGU5zTmfFq&f#TdNfhOB-p!Ny-=Qonw?li``LQ;=`VL?{a&Np@`M66+Nm#a z_$LM~T8!mrC<@dt?fCOTf!gFS_w`cag*8M(i8;d|>~8U>Mh>{k5Z6qN+f1-))l)lV zH|2Sb$It0?-4g7>0=V5qZi`S^vXw|}W`q*-YET-(n!i`K$o1{L2aN#Qo^UKW5Xw_Q z=Kq!u%ER8DxzegU;v{Fp{ar|vzX_?ybZ%nOQ(S~pj_id zD``??s@o($_@+Ce8cM2<-FY2GMDQIM z1tLm}8a)Ou%HrdRDVNw5t`( zrq0aFGBE3WQtwiZi3HsAx8K}!je}w(k?WuK!{MXFiLi!*;r@(dcUaI)*IbGBmw4Jp z0jeTyBFsldjaSG9$Dizq0HDbGs8s;3ZViubqJN)(X$6P zEQOYyaJR>cSr_ME=&qn+N@ykT)kxcoK<1bTjaGZ@y3;X(UL~>{uJ`Y0(k!B-?rA12 z8cC(yKrRe@rX>{j->4fJF6WEtaY!UiThO!8nJNWMw#P@WU3N-@w0iC69Icw32FWKh4as?+%Qj2u`rH`N_S9oab^`x2=W;W(=vse`~^u`n* zm>TI3GFG}4AAjnl@bo-qetlC%l@rM-p?%m|sYJO^uC~3`tewn)=>)6#y{f9jE4d^*nlo7lHYlB~vd$MW9yBX&cZZ@x1N{y*<)htrmkU_V zc2E#-g{NWl{q|xiW^V6(B#wh9$V;z-p+c@+KpGb+B)9WdKPav(} z@8sG50_~~3UAday*R!=&voA{>yoqzq%D)Bfz9+WN*7$0*Umo2~bV7GTgKBJaJr z8m$dZ4<~Vv^PStRFvU^ZbmN+)8FW6{P29tw)Gs~az1#9*_;S>bj$$F*P1Ouc=1ie) zY6y%a$JkUW9o+iOj?1*!gDhY~1C+tcaAjtj@Jz}<@?8H;f`MF%pjkmzLsBkbd=cJ} z4Sv!homP{U(kv9gc*1ud_O4@FF}Mx_Nqnr|DjM zyz2@)vD54Z=jhG?zlwvVKdcG6SF>;Xfpwq#Dk`$eN#P!*3SP3&ZERlIWji9jVjK&Q zc-E`Pi%iHr27Fb@0`bN|LCwzV^v^+xy(hd88*!NJ2$&4}^ zU@n1>!BGvmJ(G%Bb_5h>DbbP1rKbfhCKC{3hEuSq~un)D{UgVfNG8WfZ!y@VE` z^bjBd34w$pZ~T|Nw`HH_Jm>wAYle`y@734(tu-_Ka&Ao94UWKYhsI2X`zAoSmbdv@ zE>4s!f_gmRqfnR%9!W7wt0o;Cpl}#3C3O3a#j0d`Fhzy?NefR&lw{~hnLnk&9Ao|@ z#b5UkzgoBVhg>h2Yjuk1JX+{zW}=NqY)f$-xJn#zq~4xt>eIr&Nm)@Yd1i( zU|yDR@G=ZG&RlGq6u$=Huf*S1)@rn$m66w}XYykgAV(E4b(R zdi9MXW2sjJh5FD<)8jRMh>zoec*2$3Y+p@YaRaqTZE%~9-RQdMx{-9h?L|=J+epwQ z=*_94)x&nI>#cd#uOF9+mqZ*kef!R({oR*TCDHj{X z1`s<+Ag4&(zASt*^TPK#8kZleahU1Con0RHdFGGvis}<4n6BfQ)M3!*96y(%-^%8)7RJQu))b~#EnE?vYbKu{NqB$QU^chA)lh@)%K?R z$T+54KK>-7<@@)n9wSu~k;aGRUu@vB5+Eo;nvEkNN3zVyc`)%MHyR2r{^2^{_TvJ}bI#D|be%?Hu zue0Cwisw}U=DK8mB2Lw8?^}Y)H-^Qq9!ksHE>^%}tX-YrXIWddv}>(huciSu(FgjK z2qp)ZicrmgMiT?JPeH~LP!A2ihSmCG40goG5dTlQB0e(<*ZOMv?KfCs+^)i$$5|G& z!qSJu&%|JQRLeEYvG>JTkib=Tv!a1os7}W+S7b!j@)M5NS9??{-R`;2ZsoYWQ!l`l zG@T&Gyh9Ke>m$;nTl}MRkISqj^E#gdN!xoT3VZ_1K1JEJo|Jqw~oH$O79y4YEXWrU4!1C2Pctc}%zA z9wz~~j!Z;O?Y!SJv&P+>+uciO-1@5R5)J2p0~W2XKcQrC^)4ZtE>a1rPr5z))HR}@ zk&P(DG|OB zTrbmap*rp9o~qtL<1Ejd_SmOk7vNdS;)hL+jye(k8>yHKzw&URUAc$(vNLUhPG)jk zZ|w#-vi^f_ha7rDJov1-c@W^ux&s;H1$ZEYv$|JM7kkiENqWudOJVra9VwH$?=i)?uQWv#BYs~O8@OC;^Tn`~IZWChzW(eVxbliyx!K*6QT0mbR= zeBFNjb%}NHh_O*`EgX_ zJ3B>SXOY@|lh-IBp%DYI%$_f(1RE;ieHUGJ!h_Zfg$F{yo(|jhlTRc>PIqp`33z<` z+HSc#{_*{X9kt3n+ar6tA*)7SCMzReZa#NA>qx?y4UZocu3xb7+_Q1HtBoXuf5lx~rq0 zBgZJFKK;C-*>>+p6ToO-_1!%)w@eQD(R75K#EXG@J{x;&`{>hq4ePp2fb__aU4srJ zDeV-vz|pjIP4%^GjhK*M%{pjm{M}%kMij8|Z&pd06Sf_5&@7iE`c|OhDB5BA||Q} z2I8Ksx<+~oEU+7m&=Lltq5(hfy$+vacs~89CWyQa8q#;Oxwj<2WronunVy5qecCT- zcn*_7-Eqd4-~=;e)@%E13#SuAPmx5!ByV$-!P&G8S2IjXaZ_ivyEsZm6HM+|x601J zt`rmW5OQ)dd!Is|cEh@#b}Ozl=?TtDX8N&!SjGAfdgs>d$+aW5NJ6 z!O-6Z#5~lBo|3+*L?V`N-o!AL(rj+D3@MT0YSz)<5plwU7U{}D{4HkniIj?b`?SU# zyC~UFS|hJ$oK`gJcU=EFD)UM>dO&40gK$5EjL|46d2J_}*$Xzbr z>CKNJ7^SoM`^Scq%#d~ahr1jG?AbtQ@9G{G_6>7ggh2vzXJDZXXbn>>^y}ONCNySt zZi>>(?Ge&{J9(0#6#UW==p`8Cf>8Bqb+j6G_32{-XZkf)EzLaD-wa15-)^+5t(^Cs zvM2K;VDcA(Sys2U5jC@8wpAsy;!?HH2yeBG(fG8^k}H}~-UfXusCD#+X}{5 zV(qqV`-X>~ZRczZf~^)?ibrUY)@uGdPLmRr0N-km_j+I9u&pbU+hU5aQ390-`opP;fPM^6^^onHFIKH;P*#^Ge)Y4?$I#~s z>U>f#Ni65LE{@YB2$1}@QH`T0k-tmt$mhDY6RZVEOv~FR&d7y$YjqsElEE6|d>s6k z))Qac%98XpEXvt9k(R%N@dTao+f(O6CGPEqXepjuT&j%ggc|F`*Tn%dWv=#@_3!lz zdoLngxtzx3vS;2^K<7TxZ9E*!5F>N2drTXy%IQn@L)GN!+|!zpxo_JwlrFsh4#+yO zF3T3UI>mIDmWzznifC8Oz?`5{_GaMJjrzG9j2a8Ybq!JGfXl;A6BX4uErNql#QjIE?WJKBPba_L-Fd0G=g$Y7#E*s? zWL_;!1;Y2u(&DHa(1$J+6<{w1kWP`{$&!Jj9l?vL(;%TWM2iLww#c~HGL3_+)nFB8 zSPAz+OF|`SV-_wbWUsySj?ab*LehC;xB(SLR(CB;+&5kDCM8|0DmPvor-`)1elZ6H zyR>}s^;0v?B4E8ySuKA-(m?O9s2o(PvmjRG+3 zPL6`ea;dbJu=GZsqQAGz+fLDoq|9+I3e;MbTa^ztFYxDoi{3V+rKPsyo&nBjc$GvA z`?;pQ+qK3-%5zVAb7*8?LWKc=(`*yquM0STUtjLo{-S0| zjRuyK6@SO>CG#ocx59_`4I7x06i-T>)oUJ<3uBkN{^GRkCYuPzBp`pL^ytC#N)Vq)tr#?g9j z(%1^p3yM?&2Ha=q*12lmGU1l6wMfkGId(te9N&GB##q7#jri$3uF|qVyiVKha zd(IrG$mb#@Z0mtH@D+PPK{D zS5t`^$s*|-iM#n7+>G`6?Pj5fPWl=<-9d0Z!5a+a#@O?i6Qxb%c7pVGca0%J%VXB1R z&RmIb_G87F&$x%Cb3fmBNl}~%QXLcv|A5dOG_||?1C3h>v<6^CNxVu%;7;V@a-GH* zCO@XKWrsL>1$b(cY)))po|~#y`9^cMzN?t8AyXq^yeX!UAP|pPW-X0=l|_o08kh~= zA#|e~p=YY2>T2~QjuB^b>)I6YT|LPomr_>n!9VKH~ueO_ussjF93 z0?b zakcQ^y6~nO%*;?M@cNi+UVzzS4QUuhr-Y4bruu+Z)FKO_^r5)9Xf``E`bZ;uh4209 z8A-f(Uh5u$4O&|Y2XRf)TTJp8xQjr#cObJ~^9SjXim+(M`%(<=bXFBQ-5a^}-8{-L zvG>-^85?b{i)Vo%5aJIIpJW_{i-Bbef&gET(2DtvjveA#cxxnNS)zw{8c=ws1}@+T zJ^ExI&#t1?^F}f&_-|$@|3~1}qx59R@<5zvzEupI_B@F3s+RhgUQ-?d`-8`KG_9L; znTnUxGGhC|`h$%0HL{$J-=%We9V(o9Tl_)}NcH-Xz+x}HcPC>*bnzU(6DC#K0y5VF zv+2QO&@Xg9ifjh0^zC(y;7+hEt4umO`KPHfFXu}}&gPA$Zp##2coO4)X!X=gSs=VT zOInF=pOzTYaZWsZj{GU^sT|Z~(6n6n=BwFnOoGG+l-#Q(h}lHsrYE^Qcp3%cNV1_; z^brqsa`b{SPuIE0lPsFW=?Y@1?5Z5d$sIueXJvE*57$;v3+BkzI$7Yc<4AuPf{Xb9BeHyO3-%;OmOrsSSg(;^Fp-nFgBU(C7d5S(| zuuW4h)_QTNH$9aXw(+Lhf}co_lpFF(bQ*f`knqllELYVxtJr!q3ze#A5;QvYjJ2>Y zGC#Bd(!E&AE)q0tIhpTlC;-NV7I-$9LAp~X9YS)o=5x@;nc7s&?~8 znqt?Ry%OOhS4K6v1B*gYlUW$Hi1lOpf1~?yWe2C_{ zwDwsU8nSW%&&*h_#f!I@T-F8f0MI>OhcA!485usoISo5`;cn+i0b4c8V$0RKd!ZG3 z@VNJsja>zaZD_pBYf$fw-Xj0x3VVAQ{LSP0ARwrnEITmsu_Aym2)iwPOsq?T1 zz2#-+Y0ZnXvCqgeMAgZHS;&yw?OL^WU~(g+o#!oW$_N7TBIB~sZ#eYQ?7ICP`J#>T znF)=|Cv#II53=QjL;sCM|BgzWP}=JYi-_=PH26AMc9t1@umpAg44}Q|P}O@ZM|@kc z^DB)pt>r8&^>3sV;tl|^mvM_8U+Z|^TV}%33q5Q3Hkz`L+UW?-o|SyM%gQK=<(U;X zJE01wvqZa=DT+J4MqCf-ksV%%EWz8hMAjYvuK_bXiIAI0a_cbXRzwcs{f2WAG~7{z zqE!y7gFqPLa-+Cd#UkGD(dvfHaM@v-G z9j+%F^%`;)?C$tyW+~~xJ)bYBvi{(_qDAQz1Ux`R{sa2Hs+#R&w&^k!)r?s4qPNQp z4t#FhhAJ?f=c_+YT zj}1ei1ylSMsk3@;^*heY*i#iqM`CViUP@A`9%jQ9lj7hLV78g)8zlAiHpl2nZHowo zICn(g(YgYddL`l_L>r~YBjhu_;TBOwFJ!Bldhdah!A0Gc0;kyZlsAafSf~EU$nTPN zSb7^dud{Ydr7rzKg&DQt8$7_XzG%wKWfg5Mp@}Fhp#@{}naQBn4OBZ$J9N7<}-Zux0+1Whg-!ZfVgAI>EUvM{d(;YG{Bw`v@fw{ zs=Hn|eR$iR4>>dOV}w$C$f>Ux-S7AOrcYn!pdZ(QfYjB^Rf}Z-vub=iKsY5g@3h_) zzn01}2mUsTYr=DLYgtEaIK;C6y&*LPX>6V~V|+^@a2{;Lweag#J*vyaG|kWi9{ha0 zP$Gg5y<7ESR=vMq?MixP!l2Ns*&R=ZM9&s}4ueR>Y0h=u1Ld*jIfiXjm7BnZ@?A?t zZf0Q-$@`n{Hz3tz-o357T&v+X$IcgecUG71O{#WnC0$-mzGXWf+wC2+In`xlUN->E z---4?Se`-wikKVe$jcI=>Zj9NIG{xmaV++Hu$8_ZIptk4 z5`VuLJTvAuyhe(go-BbU+JL-HrtGuowO*zL5_rbO$3n~1ksJHuwML+l2kZJmynB?& zIO}YYQ;*EuLk4)ZYdgD-dJhHqy^=Wj$^MGJxQX*aZjDFF#p^^OvUgf|Olq?71AqgX zu;eZGEi$3+Db!R>7E-*u4ufGG`emkDiDG5YBh?6ju`SfF=okmEN>-eF`mf^Q`w`KQ zb7eM^cUU;?NF#^4X$so^VI2uB0eL^YYz!OVedy&Zy^78Dd@ZbrtOFZP?dK2});ysX z++nI`jLDEfzaS#vD{iSM^=z^8148Fuoi4C}eX+y)K7t zS}3;K%P4VAO$#G6)tQLK-{xsC)*f)XY*@5xFYuNbk12Gc6ultf&lpg32N``G?^{cb zADD$4Y%P{(5%amVX64uf6tmDvnM5-s!IW3-hfk8L9=2X4ok)5X-Y+`FBm|!NFa)^5 zy`-EbnC+(FWs2l0UFqA@2RyP|(Uthnvt4!BL9U*xfi?u(2%%JdSjQmnzWIV^^8i7s zfR~#JxWkoGq)rzE`~r4*`X#Sq)BK?^_?yKq-%Z&yjCUo9Yoh^!aj3fa7Knb2+(6*% z?ca=`${=!mtyEVT+lSaFOrdU9u>BkUu_x%aAOuBBtu|wo^q`;%Szl7uJQ|h9$(3tFZB0-M!Df1W%f+_IAgEjo1j9ESjP$FmF=H8Y zTFzH%ky}g*Y!t6`eIVw!oZ~_EO%Zt1Ypt_M%NAbN-R;^c8D_7^tlNX3vo1d`kJ*r! zpq0gL`9GG$A&q}DsG5MXX)$`(@A4zKm%ZQR1%9-CZ}4|_jZ;08`GTgN7NzI-vV;~W_W`m?;cv zZlaE7bUN9Rgz+^`2}#qq@EUPXe0ya3A^+FRk|s)+1Hzu&a~rwt7mhn72ue8N1cH}C zg)UyGb4%HwG(vnQXrcMmtm`|-?qMC_=1r~15^I!>F>?_4Y@RHSjk3SWohS_ZT?~V&mGLdR9Uvq0b(WS% z3Lkmg7d$p23`#f$6*4nCrtzu9`nlddL~9~q(;8}OFAKu&M6H)QN3X-TiTkGGWY^kT z??{WpsfqcbgxPTSsC>LFH&DdIx=noAtI|PDzb+}F4%W05ztdgh`Pw;r3!!;iRK^aT=c^MHl#eRu3UEvjF6RuNc0vM(>H zX_gfd7AnkVC7C$Oz&5(&DCNt`R|cNqv9Xn$K8h|xeVOLI!3H9cjXsE4=DmkMO6z;$ z1Y)x-fCik!Go1Hb#`iG{1F+5 z_&Q&KlqPR#p^apcv-OQPBevH-to^ri3paiM-m>ULQP`m=QYR}p>f9UWiKS6y;>A>jiqA`9Iknja+B zZ#Ju`c96JBvlG!d5?DNoYPsp?>7pRd!8RXM7gSw(6kS3tlqA;>SwpiL_8fzKNev~( ztdy{FMAC~gsuD3#*~(Z4>6Uu#T36k0LFl`b(gx!X;Ns%alxm*mOP;At?y3=^WQcF3 zZSqK=TzTzDXvPbugBRJd3JKLPEs^ofhr=TL-sxI@Ot<94vmy`Zj8ZRL+sWl8Hl_OW z$-u7qufN;Q`!Ug42%;Hk5lp9bN#NXFS`yvh-$irH{QLGos|Zmf`L>_dYRlKcHxoHb z=PSs%!eS_w2feqYe2W>0)2;D~4)`S-H|9&)5Un@ATIcWWXhrPf z8N&}AC+RK9R$U*XG_La%9agR^lY?cli*uAeI+`)TOSV*!hC)fx=)=}BKWJlsJolmg zgok_Z`AIy7jpV%T!B2yHT8YvmS~n~JvSZZfuX^VUckLS2SEmtTU*miJ%$P>heMC^~ zx1K!mBN)6@g=@GZ5 zxY?8qKEx^o_2<%H$Zdur4-qge=y8qx{yaspJztz#7Sj(DojMojbK^9XP_2hq5Xf(J z)$Y+3ctwk!7x$u!o78C~oyTq2 zCoAxZs;;|virSFS$2;NL#C+ARea%<%^y%T017GArDI$ek2cAb_>rZH1kIAkolWlil zE4O}E7enZVv~#=a-wKqBn}=kxFnkV1+?(ipZT;A_>CFnqS)GKKs_RIj zWS}GjPs`94nd|4CJlNU3Cdn4X4btX(jRjD1wMU0T1ao}wE9HB_N3@ZNeA8J zj{!b%-et$GQy)F_f+o3Rn7Zp!%&GU&DAJiOvpbe)>l;SFe&n@c-p*FgoPVe#&nM*< z=UNDLDj(Po<9H^1-`V7NJ?P3H-;6om*Gxm+`KOsM6?*Vvy~fl{g?X*$Pt1&dk)A&H z8%Z|(|1>8YMXo6Kv=np$e2(H=wqHaNrK(H8&GE?Ox?HjTtVKkO;;v;nMj( zAy{`<)bi8@<3yJ3FnVdCS6TDnvz&!*~?01+y*(sXu0)9{1SQ__6DhD5XjBJWG4 z+AgWUm?mFCm4&J&cN~f>ryN`Mf&%+*uEI``rFRux9Ui}vL}WNb;yzbYAjH&b;01zH zp7gZnIgcpsgi%TKA> zSM=S5i&6xvV9u(3!6TqJMbYT1ao^M$ZhcOAv*a8C!?XxeDS@-8sh5W3rkB{PfHK{mO302K4#s^#6#iR5qSNbqI08^suDyoldysoy>bN=oS`qiXM>P ze#65d0gqOZZ8XG1bV6gEk$^lHii?|1nf0#X5XnDSIgR@(pFc!#YQ~2wKdzfVX%?rr z+*fXI01PNk&}L0_paygI0gbY$!<^|zH?T-}`u3tdU$}0L;13pe?Jb=zQ;=!?jOp{Y zivs#xcK>pUEi`0-CSanyP#D5FQhl-odd?s-nK)WERqT`Oi}myhon3%BiCI9!E^!}} z<2>y?=X=+~nB(MaUb}8ww0}wTQ zvwG34Z50i6B>0Ut(fy?ITk|vV?^}Lc*Ku6VMzy(`*>nY9xOFCLdUqzl zlFvp#tiDf)ozL|FMVH)Dc|n<1V)|;0M94uIT#utXVjZxK7eZ*+buTS9?f-@x{`xdj z7%)ByGEZ?jRd?k_dpuhcKxw5uVr)rGBI2seK z!ymNr4lF#~{fF;}VRaDx!xPs9djOM))mFSNdfa(Y@4~~(xvb^Yq+@wsdm)x>zR5Lq zvD5qjqScZ5eG`M(E0ymZKUV-j#G=&Mnsn|;<2fG2mVRj5%0QL+VYnLCO8Aqq8V=EH zSZ!wWJI9O9w!Q|46)k8YcG z>8p~(nDa4){JLYeMaj6rto;XI&bfN-t4H$Ay%CTtkKfIF!CT%Ulr5nsc3mtrM^v%p zeW{kYr1qUNTH6gzhS!3*%M*6h{EIMmxx*^emdP3kUpY!1|Ori0K-o4|8p>5L1&s&DD8TDX+^)Ca_7yi~K`InJI`_Wo_ zuFgFnM~=S=05(6sS|%`kdN0NMgX8$YoWQ4zIAuVWX=%L17aRt)-mAdXjHORDx!=|h z75ObK`QktS^e6HC=k&oTI%15Ep-PR~yY3R=i*b-jyvo_9d!=b_{Q9+6RzqFQfBU=R z@$%jBY$5YMQ}pL%&%S6=mJ06%%=6%`bg{=zT-9E&ZcY1Q4j|qdXXPGOdGJ$x^Z(~` zOCc*-ZkV5AKl#e<$v@lL_CV@mICm@WkjQ4d;@KY08=+(#Tz5_c0K@aMUjLTfAL;ZF zdcf}{r#iW z1#F1dm#Y+}T*V|g=6%%(W#OqA*aHlxy(1kSqY8sp|6d3_&3vB6O-AL;8QQb7p9Aia z>sazKh4pnd>uswciELL}FA4UJfzNL<&3c#ql!pDtu;21jk@iTb&kASmW0RgFb@6`H zBRNH1jj33)F0zt&wGItp<8HnEmdSGRI)U)X>fDc}`wvF=cP3gy)7x~pKm~_*AH~V~ zRX2~OQf*1sW(8-B@KxoERYglRqq6+`$ZgrU!#gJ=gj2{en>C>uef?x<;%3|Ox6o6xZ|?WQoTvr8~Dx+)mWZG zcImXaoY|P7J`@ONS}D{}m=_Pm@5E#kTN$MK#|*&>Gpl8Xep0E9m3`R6g&RZ zjjm+*!KQC+;*tk1|LOaFTv&z3pRmt z=hz7`i$eWj&&Tpjjz?89kdGy#dk>~Jnt(gZvVY8v{@V9~vAlDs&fm@HPlCTkJYUgM zJ6Qo)+`|>Qw-_AWunZPxq;j83^foiRqdIuTCl;aRYvsU(Wu32RBalBdT~=Yv$ZVoYN;qr48=e4*h;Pj^r&6oKdN;yO(aseR-C`r z>#I6}{n6XyN9~xNZp3~{%sRn8g<~uYR-O@YiF@G(msnvet{v_a=6ffoN!Bd+epl*@}-{)2YMRbQ34)eR&OBl{c6fR_`J z){m$WG+5G-0U1jndo&pMN)>W^cF0w~G z(o8kTbRQ4OXftL(QOqAkdJ(5qQTbcNW{*OD#O3}pZd~CXS{CbCW}klQoc<>h{LUs{ zuBUZc7Ra*!_+xf%akPiteFZ?aitFU^sG&)ES{AbJJ+~4iyfL(pDGy;B?RED`ki7R; zF|xosIoIX<;a{y`LbVOb)n>r<SwkY3uZG`K+v(TP4**}T8FCe(Gc!?3&e#~0@;%aIS@MQ6iKNy9o!Esy zp~wwveG?|7SL)s!CHx2m$Dme~XD zx*efE74lYo@QfCDVZ>e6`S{A0$ZQE1-lmqflDIfOTrq1mnmPj+vU+)i#~sbW=9Q1? zlBPSNs3}h^uZDL7bDvD;64y5O%>M5EvguNyZ%DZdvi_aH7FmawKQZmfKBSn;aWR6= z_!cTEoCO#$U~fdm-DQdyEOmg>*V<)f?dUl+_Vvwbjv>tKpDco@4Ya zA~zl9@tqja5rpakxIcuvGLYN{P*_f|GZ7g8QRS!d4ys;wJ@3WQK!RUFjUAB~5>7Tg zgcrn-gL@fQn`Udy&hL3;`VgQ}P{Z6p_Jvrx)W}#nR2F0nHD8F;Gb`cLd0zT_stRrL zgftKxhbY|EzHw%7p#s5;D;NUt&^`nhkJ24ETPQCAFj|L;nkkSrktxkYzEHE7h)rTy z)hJfMiP4V^!+_M(B%5fa3(z>EB~L{bf#57^>Z{A>+TPuL^4rXq3+zAXXQ0rbSspU< zic3bv9sr*7%f5-nC##eg(8sN`eeUF*R$=6?xwl?rnt#)f`s{%?fDE zzP#3FLzH-{_j3KcG_l$QYEZa61R*+8D`K6Xb-~>p_47em0td`Firj~x)DKK>J-TL~ z4%%u5KX&%Gx z=UhUC;M|*MXuyev!XJSjSh_x*F+wp~7;`zYsJ=cqu~R@Ra+eGl9Hs$-%!v_q8Fm(>C!l=oHVuz>ivQ%@2`j{D}`bxD)8{n59bg$2yAj!NWb z3eiREdHi*4?>r@x2oPr+}_%v=emtvyipHRTgmm~KXNu!z8K71 z2f=JDPntyK-RVE|=sCq0t*G18j^a<;FjV^{hH3jHOlB$5+w}OD^H+{aq3#vIL35gg z!p_Cq6-yKybRRdkrza;MTdZlWQl+n_7b z8Lu3%y}LKmHJ3}~i}Hb+pAz5piLz|l#l`7TT^ZT5&H(Yo+JARxv>fiRp+)m0%JI-B zNiqq$sM-94*+jP%}$M5Bh@2{H^F!A6ewlR`8yv@$45A*@nonE8OEV* zO$wVynMnJ~neox!zP5m)E2)-Fac|h=ih# zIIlgUW9Z2iSDnBf7*ICLEWh)cLfrHKc)Pg;n$;`8v#*75Bk77-1b%gOB5DISj@^+X z>dA4vmQ!b>`)M@pk3g=ol1BYL3ACK<3%06UODT`029Fs14QWsMH?GISZCp}_b}}hx zPYEZCSNXQrgkw}53Z*~)^lW;k&x(`CN#i2p+U5Jhg=JH)k<`5YpzDWUXZ})CM0?Re z042b{T4`+mb4G%SMe7>5>P1U-&BN7Y)n~dYr+xu5&Nj5sOF_XOvRj<9)XWfkS7=#% z)-`%u-#Vr6#w2Xdb5fi)d}O~8S4%o94C{IU_Q7ktSTU~&uQGb-gQm7{{ zs4biD)1Z^UCr4Hg6kWiMU+26IEcDapn?t|oilV7J_3+nL{yL@c`q169Wb>2sHr?m# zxz|qQGvt|P2Dx0I`zeeapg*c*5uY|32iO5V7ZDX{9$9#O>Z)oe;YC$hrGP8Pfzr!~VBE&) zf7g%lL&x?F=-1#UHbi|-{PB~Yp{ZLv9X5t-UG}iq?8J27gys^9A2iAPzI}iP3N+G@rw`-ay~1*)ZP;6 z-uK6)xV!%)$DO{%S^%p&0cA=%m0ooH%&*(i4SbqR;AQlKe+;qAOSk z^9R6B?)*@X|BK}}PG61F98jAe9XH7X3dOOZ|2tW5=yF9#+Do!m0A>A=yF%JIzQAAh z7$p2GSG{f4ZBQ$q|2^&gpw=CUTs%=B>Z?^|FVwux!o-F&$M+GOW$hhE$d%g>{fc>rUr0P+S$9U0ikkFQMyS3H1vxN^AN_GEJD+hi!_x72lW4dl(*e`54f>WZE zVdh_t%hQzv#YBy{CgW!NJ78X`dcRixfvrbo_+*@24^Iry-e3Q3wV+-5iI-*sy#YfI zEs_7xryRjO{Bq8gUgJrG!&k;qv}oVPu=Sgfb;xhAhn4UE$m;>O{y*H(r1k%!qE}4= z0n4Ko|N6t<3?vIi$C`G4-D@?bB(^Pu65EVGZ{>^{N@ISz@uRn~$FCv&+V$M2L5Z5* zGzJ25&Ts~bk$Ey#+`pnrP9Fa6PX1cBD>Q&aKdvoxAx7aB_w$#fWEmZr(So4EZqU&P zhiNvAEMEUD)m<;!WcoP=B!4IQzhou<{%CYt3NnAu(!XjuN8%SgTk)Mjd)=}162SfJ zs{Yy$?dZcHWs4K_l_tCGF$oqwCg$G-M;2}ahftVp{C{x}e^MWIpK}=M#Ti)ZZ61=w z|I0pp68}%9XY1ruTGQAWODD~Wnj0!6)0ta-)v;eq=Fi1x><$e4E}j;TJdW&C2od-9 zul~HTeBNRiXAdDp_uc-@ZiVp^s)3f_WdsJ z&#{X|0DJCMK9?fJRC?=ZdI#PePBx(ENmlgnh2~RMvHuhA{{#d%AH1duU&zB3B|+mQ zA3R^sSI9cv*k+XcUJ0P3BE*iHJiAHnO=o`^S9aw89X0@xpc|fRAWtm}uys z!68?e*XSBjBm@&*v`+b803d-%VYfwZ))_pOYgQf4E2m;8P_L%CkG@lmjc%po;Tw)f z4NugDwL_fK;OX_&%}?G;$UKuKzmJ`(Xjc13$NEmR`6ohXL{WPu0g*vDFRi$L-qWO% zzs}j=uEYL@6Hh@+&1ZKH51vox4Gg>JlkzDwRsYXb!>%KfV;4BuH>O)^wxi0V+(Onj z`sKv!2+glQmzge&PlDg>?!2*FOS9~&wET2?eWKD1|9R3FW7<2vJ=*YO$U}F^uBW6& z{pCIDqER&seT{~qw<2N*!v+^%hGU6pe#vO_#8iM!v#qst{=<5wbf5VMiO=&5|M(fu!+#Dn8>q%M`&tJm*n8N?ou(E{!*#Og+o9nfLHJvE`v+X9ZPoUu<$ zow6LN5ZH^({rq?v&qU7N=ko%$Vv1NeDw%3;KlF`=_l7IFZLyc)lLBsYC&k|59F7{) zxalSS3zukc^fL}FcsWb;?*aWl_CRpHZJHN}1N}O-1woWT!edbe@-h%XuYF zPk-+?n>_{lCNt?$4M}Cg;bdHb{md)6xA%$IliU@|ZGx|Lf2ZvK^tZ#>7J-*Y2B8C5z(MMBC8Q(!{OBUK7Y>I7lxsApuPzuldv(ZKj-@nyo`}6$P?L#(GANj8tYno(^HN!j3 zhF;$^674+!Fur9uV-sPk%QSr-{zm5Jc?k!q`t;3pKHbUHOCFJIjS69rBdL|X)lsq; zB4f(E7MJA4t|O)Lr8~168)1;BQN*|)AWFGL`8V0#?{kLNd&Qh<`KR!Bym0-DtD2*} zenPpE{=$Kzv-Dh|*B&jnyR8O-#N^)LFUcSd=5GN*Ssf1c&V0|%W=DAI?6g`F3J9xF z>_m+(w?bQZo@a;x^`^wK6Q0XMkKXF}Qm;0{y~z-2@Es)oXXW_IZO~l4U#|-EGGxw7 zV&>W1v3D+&43*5ehn7v{>B5)TpRz-CJhx#<>=pkXW$zi) zfE1skMVfX? zDx_*s2vl%2fZU2Q>kJ!T$VKfpmm4*vEi6{BF4lPd)Na`qF1x?^v3^k~48+bFxeBhP z??Nm$gy#|o=2muW658+Vvon|7cGh%ug;s52)Yr^ZPvj zMV^;k*%1@d!wd2670IhiouES^k#OMkR}`@Q>!Y$V(+1aDi(Hlyt0FgPwD0Zn$4mbl zYbZK8Ih(#7Sv|*VR3ojYy+*L16jfUs78O!;qNj0 zB(8gu8c8wpi?YW#89$#@k2Omb8+L-k#ni< zv;2HEu1zD?>O>Bc&q75Tx~cYB+8dI;B}iE0kf@@_b>mi07hP|#bUA^APdL}(HBG5$-N)7xooF4GSzDTB$5T`z!p(`Bdcc#@FO zuC#4(e7kI8_&?FLut6)*)1+c9dhtD`TJ~W$0jmBmi}KoX3g7$ZT{YdEW)>KqceI@zI!tjac8stI=d&Y+ z3vfXo=mvRohv1(=)GH(OpjgM*WN&N8G{XMO}azU4;*Y8rYVm`%StYQ%}l z{9|3gi^--UcnJqMi&;Lj(n42J5XtTQ{T@le>8b1Z%#04>CAwHe$)DvJ)rxLtQF(=Y zSR&Y#+S+(?Qv=v{cDWJUuQj_2t z!Wm=fE93g3b^8fa477+nRSSjTRrSU5wKydfsqR2(UyUd&lv+1_RIL!=%>zfhMe_yp z+5HQJH1=$^u6aD^P2GK?-oWsW+d1)ZgUS5k#n979whDk)(oAF7LM1miVMBpD&`$kH z&ja7pIPckas6L~?zAE@s|o+%<&pRgnnVj03+vN~g?hR$==&S> zRQhjKnp*F@|SX(!>o2xS0DFkZD-ag{ImAhY|pW9_#+v2eY%Y zN<2I~5#B{H^>vrl8U-14=6DL@HUNGLwPHc zc+ctHzkii>?>AMIiZ`+yYS_>oM-X|+)(RVHM^~TqZXTQN{}JFQyI|%xb82S~>YgOgu_=V9=q}v$rjKBKSNBvBNxF6wC5QJE8{n!^W2|=M=r-`CYyj?3={( z^+kg2g8vMsU#(zX_Fm;~y|f9K03+Xc-hNYx-=;#F7@mVVsc%IsEG(=fBswn%AC1D+ z6w3K=ibg$_pB>rut*x}^9Ifp6xs=RCHJGbVkYKxaxWr#AjTBBmw+wC)i~y2M^+?3Gva z(Y?F4%FVy>QfMdt!ZGE+-f0F#+MNgD#_3o)k{VEnZbhzmeWyUro)_guj5WUK5u86E z+D}w`BRNajk@Jc|DD{eNmDRzODc+T3Rm9WpI-9{$4L9h1bxxS2sW zW^2G6x9A>&sL#nG4*Z2BGhBkeQF=M%e)lIiC}SOMOhO<6dM(U}P=CzG6F}~opi^lX z>G|0xBKGjlSLiclKyNY=*l-TiU^fM7=vAX%AN84ICpCG@Z}D};edalrwQMLOBWv>} zhO4VFbjl{Du6QfIQ9TTLnIrDIduz-LKnB zt+vZoZ)P}joNFccdj<9HZr!)^PjsbSeNUxcM#enM!$-}U9DdVwsRSo})nLtC9t4ip zRwHb6r-*}dQFgf%ugCDcEA!$JN4|OXye;*cNx6=4Vzw=IUBhoID9;VJygMd}x_aY> z9);Qr5V?7cUb(G{C^*YSCvri2`44{C904w_IfpaEBok7Adw?6%*xiLyw^%Q2rp=T7 z??J}O1lr?9Yi*Z`(3@oEnJmrwg8h#_?s*Cu=<0sBfuW2OeJ}y*w7DSjoEHMQVp()iT89k&o&2nU1D{*Zxm=M-wg_!AGVQ-<#r$2^JtY{)b7QSI z=D+&cZU-&|=tu;<5ULU&mLa2EwLboOs_nGN5_HPz)dD=+0~p*rQI>%QXe zN!VK`aB$m%DyrM6j~-a3Y+z4Qe7#yHeA6bh(Hl=Gf8w)&kQ#)dmA{IZEEFagdg;B>=<)HZ`&__*1UC1NQ<{kcNVq}nA#$I-7&J4@)(f*M`-0f$avh(a zFwx}&CroaHeO422I3pZ1yKNHOZUo9FBvl&rFl!Kr4QwqF;+;R^5~wi%?x#%HZ-kI5 zxo^jA4k=l`oF_W7;r$n913MTo_JUA zx~y-A5IMQ9>H&4Rt#VlPy!upZBGhC(7nuKrjka=-wZdYUw?CepuV%qD$xc{}X(?jb z_^-EIR0Xe=FDi)ibM(J$H~eqE$yXYihN8}6+9V)0CN2(&I(mlCxV7W7FR|p^)v8Ju zB%7mtGwPe#@g;K37TjaC>N*XBE0bO+&87;2st@OI*Yzv-!=H+ElPc(Y0}xzg3w`p5 z!fS72(Kj=BDE*Wc4UazAToL0+KFXerN@2np792$&@y$#(a!JhHbi>Dw7fCQ69@e|W z?rss{t8)cXOoa?QRjBn`S8;UtEKqWUbLT;}RzaA%950s52uN}$0^xrbzyI^S-!nOC z4RLd?_xbG;>n^332kQBf?9^1)cdfbCU)OQl%_Ph#E8FxA8$B1GIekBW5B#-BDAAx= z4=Qa8Ij_2vk8-r8g?b1YmhDS@Fex)UM^QcVq)^45>#V6Zjru}w+6%uw;PmM1=NqtAQG*n49}QZshT*57TXL6jo5(5*(y9$ z6hN<3)ZKv#9wv&mDR&3BWD&B7@+*;rl;<6cImfx7T(^yoP%E45Tl-{{&&F)nwU{|M z0AR18R00|0g|FSB9@CsmqvgM4*C?I?W#*O<=AFN-g1#pzqiJ8<-# zXRN!;f(@{%qYk*_-VMz;4pXcP2ThpINToKkH(qVD4Aytm&}jDiP17aat2pCB`){P> zB9z+u?Z2H8oC@o~B7an_dxPI#3jP(_qy&LBPlY?I1J@_*y3Ks|R1vzyy+cy~`V#J@>Z5Xfy>1zfNaaa~?RE?|PVKzM*Jjnd**GY3P?x3rg4@_)=hy>$NpEp+n^MF-Sm=qs>gbX zOm875fO-E{=8Sh=F>wAHBdEg4&W3mCVj*#3`&KAL1anYp7xkqj^(RUN7j&6W3wA8`TQyErgE1l=N8(83Kh2c#;|y za^&La2(*_xcHssD=!qC{f!j!|$*o3ZTmapJ-oOq+t!O$0>pACPA?A6)-6-||G^?r5 ziBd-e7D9_=8)dRy$0g5Fqjze>(I6%V$#da3;|n|oyGlK;o9&KZsROr*9+Ubm3o;u= z;S2F+o5U`d;r*+fX6}?L!NQ*Xn=uo{dCL@A?nh`RiuB@ z!2TOLX3T-*TJazST|@N%+c*;l8WCR&K6{Xr{H4{vfvs164GDx^|*upH$ z+NZv8UCF zv~#pciR+0EsG{x0j;sn{q3Z82x}v zrogz62O0li_wT<)dW4|wA!m$ak>zI^TJ7{nPt3y8G4qjn=*18675Kj2k4l-eFS?qu zDb0K?J=k1kZOP)FcashpqqryAhZo~2g)Md{0wKt+JYO?6z|#r0BsAPm z>^jZ9Hb@zqA9C> zhK@$4{PVid92u6p&^am<_DWeRnWLKc?NuOI3w&gVKk0Y|5&!_5nUqIO`~dyi**yQg zB>xWqPAWuKvW9necL&?9H%^Y1kKBHtH(Bu7%|x7jK__Ziwy;_k9ldba3t|2;2%ILX ze%*ffw%34XPGM0ebrO2DJ!w$hY%(q=b3G)yBDewZ^yy1>9ta;?6r_AHS6#mhK87!) z04N|gL)p!{d~g}vsn{rjvGwW?j-=4_@vgb07uJ%Gk|He0g)4;*@AQbT{?MGi>i(W0 z(>m&oX>$ixp@~?V+Ve4@iMhY99;h_qcVPS&U=X^&Ko_6ON)}CqYt4Fov@0mVAcX0h z$IihMTLqM}Sy^C_>YrSz=|6GF`}&_4=>J=Sf33|)UaObjFZ2oD18n`>P=!Z!8*mar z_eZYIqwMICu#Qd)S-<#K0`4wOn7a`>+OrzA2?~0XNgI=cx%E!cdj&9iPaHV9W`_$& zgFYe|Dl!TLt*R=s2PV`JbP8F`pb&o3lJJgMD6Q1`Mu3m${;x|!Zj;$=UJ$RXZc=4Q zaYSo9k&JP>YxG8<8*gzC8VNz;ZLNxT=Ms`@c}ly7&`%c%mmaXz5_nx2+nP}g*gx@y zc35la1c|!q60aotaK(;=5^?p$1vE3AOpU2o=Bq=-Ci1@3qQo zM_xWYI`l(El)g>m!CZT9=Q*9WYNvPHDQml4HeU7uhbr%FRQfg3;S_;EHR9LH7WRB; z7l#-dlTyXkLAZrf4tbWCNFH^@7#=k#xx9=|<~8+$QYrB-Eq9K}0`riZrRc z60Z=Hi{Y$YEH&6s3GsY7^R7d8%ZdsqfV#81^p9hE@k_)vy$aD>{4}V^^YRS(n=qah z#`g&m_*6dnGcdm6i$NeQYm6|n-szr)NF^ePCz6Pi_@I2ZbLNYNWu28ZbUrTEcLv!x z18_VrcXJ;K`_Di|463}W?cH@G#`xieE!0*`A1K0db4xb*Q#97VH8-pMxed3p0=+@A5@T*Zj*@V_2nLu_*^yE!} zyYoR@tw@mwQs{S&M%!HVH6xe471S0A;yY+WdfLY3yjIrZnr|HcrJ0$-=tpT7IAkb6 zNodGq?^aNhe_EYw6l)(-1{puwgW#u-(dwt8nN8zuNBdy-!LLRuJG(0Kvv^$e^7_&K zkwf?J%O_z~m#;!2=v=YosV;)Ca?P<8h5HL*yNuWKR+uQDiA-)Mn#6D^5G3(cCb5#u z1yk2mv7zsv!V524XZX8cpzGYKH{+*a;>FT!9$+zaMt#dLGn?bBv6Vd;?Hi$bZJLIa zGV#3pD9`7LiP0V0*0a5lZ02M7yP@N?yeUPG%)xhhN`s# zdI|H$D~|@J?sNGf2AyL{X19n5w6i?zj?kUd5JY5q7dRB2w&(qHZLaQQ&TH5Dgepm> z6JjBuWb;vhQ0c&9k7NTs{AR+|(qk^B=k~;LrD1S(>Se_*LaS(<+oK7E@&kq(XvPo_eHzaps^326WAr3+B-~3ye2*|V=Ox4bYybzvYNoZ=(;XWHlH^>f+|AeoG z=Hik{@HeX#26NJ`UiXe*3x>y?@PWH zA}c31Sfo?2w=*eMm!!i7qb809PJybLnyUJ_`D8f@#gB6zkB`xm)p^dq`af2JwmIwu zQ_8k>f?T{8G&l2=I9OR3qv`EjX{zl&pb&%>+*8BJvzSK-tw9ob03^<6o{_lIM^$Er z9u9!=Eqe27+S}Okcr4N$D!pB-1o&B5FD5)!iZ(!Fv*~X8*AV$i8;R~RiSF91i|D~! z8zYXnUNzBhgCiU+@ZM?JAauzYmBvUnu9x9yT=*iMdZwQ7jc^H{p9}wzRz3L5?2w|O=F^lUD3=LPz)0;<2sko7LJ zJQ5p6=P|jO$xgRc=OiQ_&>vTaQ{-%ZDIUmo?Ty}d`$q-nnucRMOwj$vTulR!^44mO z)4=)zjIPMq$=cHH_niSM`p^0$h!*>b7$`w-yKQeq_PM zcyEB5T!|+XQBS1vGtL&XgH)eA(|K<|gf|Hm*()yIWeeJ$U8veCuP-mmoK}AC&%n@h zrnvRwg9kfTwAZFkdsCeoXF>MBm(7KxtT-g;e)#2UV(z4)eq{@WK|RdPYUAd zS~1mWr$ZF$=ypHK!X-{7_Rauj`)0c_CY))4>+axvB|(vKlw*$g03bjciJI2OR}sWS znt8x5CqoACkk)*7S+9&c-O@9#L)3|7ZO4rmsoki59GFuXl3h9gaJ)yXItyhRqADU# z+CN&ZU1?%SBY2B2M8ptR9XK~lQzqo9ObPs>5-l*EVX=+>(o`vY&a*PkHANgnw zkSG?#-Ye=OqaIq-(_JwBNqx&fcuy*kk)baD^NS^dzBP#@`6`ri)8~^0ky&>bc%PJ= zl$f6{^D*YDJOAJc6&EEcb=?~IzD_%>z{3uY!7YbV;{+h|rI?1ak~QtjYYc>ieKF#m zq9!WNaojB&?Ad=;U6Va zTc&GATOO#Ss%d&Tsv)bgu|W7WA0{kX_>*XLMu4v(*FAKigowEgWh9TSy?gp-#Jd!nG~<=!J{)>Y zYo6G0!B>ihRixoPi=l`)Nxf{Xs|lp{``*yUFWW68a;?GG=XLaBZoBV>Z`3E+Fh((d z{@3n)i6#0yb!G?G3YmDaACOn)I=X04oX^jVG#!mOjmWa(Wz>9??dnDm1PD9$YdA zm1`)6ngcTzYHQSw`t@r@R#t^X&iBt=8EaU+dVuld&L3Zvw%6fI#^_nrEC&TzSs_j7 z&2VDnu5Hu4CS@iAjl(x@vCY>F(kqj>s@hDBc%w_gTV4tlU78mht6*gnx~Ix<X{N`Mc&NcuyZ0f%VNDY@j1P)##>+tt)exI|ZJ; z$mKe?;B5Iu#t%eDFT$=K0OC&&H<*2>yc-{UBh|Dl$IxMeRYVfM&Fa+D_KRTBzCex2 z*e)?`@42HM-#2dU>{RMQ_6^0zMItMI=||+SOIYWg(IPLbf*uz_1`X^-VS4#7SCPc6 zseH1R&T+L9liYVlm_p~w+6XL9r=`9veNJGu?W&>Fk+5ZDlb^EMzVI3K8#mOcQ#jHG%Wax=Ptbs?3hw z2|qThqQ?zxfPwYo3LfFSz2h)GlS&pZzmSQ-)h8E>NWC2QMpe;<>OU1eUVbN>ct# zrHpaS*K==}`U)M^)h{9hvAt@xAylubCULu`o4I7R3f87ywQx{rlfy|7cEYuvZ^};I zn5xxPyy%`lUAR1#c>2A}It8!YKV9Q#DKTDqW4v&RTprs?x=C`!(yv<>86-!?I1%8R z*ScH5m1RFKEea|eOaOlG!fJaIxdA;dQ$B&$Pn9e$zIm#%ss&a_H(;$3^Tuc7gUCQS zR)jN&jbEl~lZk$Wefp$9rK0fIUx&wm&6TIx(`ryK|N5Jz>6R4rHOp8Nb=g6MOtvev z0?D>iWHVs_T>8-qvDi+5s(qve8J`N^YCY~!kiYcky5f0d?sVc+AqZPAF3iEX#(I#m3E zk*wc4%4o`K?~C0d8X&PDlZrKST$^@|x9&IdD#xpGR>6m2oVXCC5Tx9x6rbFji9g-P#IkOM3u>|hZ(!p z$(WU)ui&||gw<{DV0|jDN{S1r=f~TsRJxA*+3Am;!2Lv{jKwt$HX5-v<$kfZ{O2!- z{KU62wo69sC2uK(JlX}h>(9XD>^!^N=z8Nl>tI|5vUR<=XOO~N`lRGbcHjMK3pZR> zCCN{Bu>bthH~hWQ?cF$13<+6ctFhEaOxW@-9k{^ZQ#L|vceA$Zmb~TJ2hKlJTn>7= zbZZRbHVvX;HI03DqAQ4T^CntlAoQ2T* zu*szayJpzJ>8RP!9sMBzVPaA+JbJ$QKEU5B`uK1fuYUJ|XT_@v%E(3=nK;0)oCGPm zstoIQJPfB47)+`<0YS-TM20$&P`fLt;qNzR*f^lpwanMB$fwjl!jZFqOG{aSy=0(=6CFRN6*HJ5#kq zF{?l+K;?*L1GaG#*yqy>eq+a98Rbe>lJPTLle@f|duLiV7Nh|cuo-AKf0FmEWT{4e z?fy<8M~t`UIwb=&gCu!B^;8G%wSPWu`WUKYi$A5B+>n-AT9ZF@0btffI8leb4>Yb@ zGP~$)KB4~p{#S1OG*Nd`y&0@Ya70^mkF~xp#3s}P>uEEn8Z5X-ed7-g;x@{b!>MN7 zq%j){ZD*jO^L<=Am_6=!)T5i}cA?&7P|V-*{l`mx+PWcy*qu5O|J?ayRtlz3lZzSy z+~8vu=P_qB2DVH;7Y)|UykxpIOmfPp@rqlqLN|yEy(<35HwrcXPduVru*_P}lG{m3 zcDyumCeI7V8W~Yveq)<_PX2}=_(m3{#@>&q*JCeIE?ejJwv>q`!tc-1L$sfMB>93; zqqm1G?ItcgPN@N7KZ1XOJ1j>F6+F_)usyQVms3?wG*b%lY9Rx_K*?j>U#*?#hx6u2 zyr3bH72Hi~7nSaXBfPUP-TftZmL|s~T!FZ9C0rn3W`hheO5g`4dxF=)X9Q*$->Qj& z6dtChUuSLk*&D^E_x1H@C(Wp2-yIB*VW+U!7<_9)fvIUhovM+>QhmuDsUN7NQ+F*D zsr~eOaiZlhAj{9y7b6MC=cyptanxLmZObcKK4E*qFOrrEoO{8d|Vqy|RnXTq<64)obvQ}!9Y3(cN(iUeb zzQ%oy^H=!)-}&tiUo5EJZ5HX&y-PuET8f@0dd0qt9%jf8pm8f%N8mduINevA;ehJuTCn}(=ueo3AV9Yx5O|1&Oo!{F=jNfn($c}LB zb+V2#Z0)nRa(EWE)~|p5dW2p5n~{N4V(e3*^KiY}Ji?NrH)r8PUh$^wbE9ep!}-q{ zWLYf1kMCj?4N1e`Zr`>GkQV+ZsHF2#MsZN@w|EDUT zwJzSmEQHX;@K{x%wkyheEV_)5qQsVVl29a?leLc8(1Z_w0hc#Wp-0Qb7 zGpW%Peg)tAlluCQJGpql$L9RB$5@aS_osG(SA<~DxYD$n2>ZO|wYia{=(7|Br8qz# zJf^R|-~W5T>TZHjjM%+SBDP`{R~|BQTfeKeiCqWZDV5JtA8%IYwq0gTb|&iC2OC%1 zCV2Y}lM)l-Wht+IkpLJZ0=mYp=HjK3eib$1wGn%T&vyIoeRe_DQNto*$qDGE=ufem zWvyOfaFSNhe*)teO4L8t&~VX8N*$Ib^{I7T@)M7`f1PIHm||_SXI8IhoM-@Ep;2k? zax2r9V&A7gTw+tL=+2C2&gTy&>YOcRn>=gEm1*TPbFy%pYl!FHx6Xc9`f|U1V%#)E zBq&=%tl9KXO8hV>h$*xL$P#QYY~3}PH?5F3(d6ltliwfg^yP^LwzAF_U538opc%*5 z6s==_ZH+b8B)waq(S_k0%`1qXOIXeWMZ1i1$^}VJC3Q<8zC9zwW}r{K=aH8?Km2D* zXGxuZ;xn&9%0~q{^kBUpT&t`}F-&R%CyDkvO#G-$XoshULNQa-=BW_RC1j@Qa#{e8 zFn}gjhAEfhL&Gi*ea5HGGgO+MLhMc?O>Q~JdVpAj&vX9=oywA+pz;=PSq%PvKu(FhNg$Y;z7njyvH`Q8vl5l^Srh!s@2#^(Sv4qC+aly-yaHyDj^*Hn*p%U-rGz zdUy!K+%!#%b@Lr_8hVQ!I)*k5uI;X}3kHDGvBt&R84hmCnFB*vej}T_4rE=&m1Szpru3 z?TYi0u};Lha>d95VXf$Ga~EFSx!X}s)nWM$a?`;Lb3gai_ikZ>k6EcmIQ5e~^1KCh zo88RHM-01Y1_RqvQG?fYnr|a)f3l6nuTR|iWP)6+)nI!TjnV}ZTuLKDT=C+Tg$xa^ zelFym`G1NToqB0@IYbjYLuLHv2#+wVFbg4*gkPNQ!vYCeyxhlk;ayFbUCe7|2*RE$ z_+?cqIijntE6NTLh~H@Pvd-dff&#BuWb^h7ZZ}B=|x>BS`%)k*r#P5@HVs0 ztx^k)#!z`Qo%A8ol)Y7|1xh+#HT^@Z1L~K`4hpY$8&j(NM4l`VD$E6P{*#pYBt_uB zcdeBD;muoVRGAv|HzS&e6t(^6x%nb+=#hf*)XvL=-qwmSxR$nn=FSH{GJ=m{dx1-B z4mYao&+M2}PRUqyoKt2i5yKMeMb#M;01Z0}UR`Y76&^zKJT)zo6W-q(756Ya-GNGU zJ!?(stl99tblRG810(b8XIrBRw}Rrdsb;sG;CkNT6XeHx;1HyQRHuHJQ1=wNW}5BC zY%;znPE(CNk6|D^H zpZRqqgU#x%bBJnV|5=cOLTl&U5LO+f28vM4zXpphvAF0ZWt8rS-Lj_W9jyO!=|B0s zQt9R0R8us{MOt@qUb4LsCSEY`Q=(K9t_eU>)O@|U_s>YTIBExagp>rsigO|Kyan_$ z^x|%Wc_iVn7cbnJDEH))(o1QKEIRTIz_+@EBh^Dm=AjFuo01dcSg(xX=Se>lTe|j2 z1+=rfC7?6}bIwO~Nc_OHmo0>k-VcS6?0PA&Ym92e1JQ+Ld+kCeUV<&JjPSv{yxwEl z(dK9}#N1>2kX(1J#mvVa%ykV)MNz#vc-u-uvHcJhA z{UVbc6v&Z}JP*y<9L-N1cd^sAB19xgqM(v3dpELEoAx-lOA4O;E7XF0rv+mB=`}6u zC9g+&D5hX58pUafB5jGJntC`X!|w`)Gz66+u+U12+a>we z2p#CJ@zeH-1R^E6zK--9mc_(_a-tiYx33~U^eHmZH4H-5)Gn1kxmvv`^mf9PQIAs>b+-$@f3=sHBN_j4Q?WgXqom?YR&u!Ag*MOL&$wGXBY4gT7UX#3 z;$_RCfBmCdWc&is--$Ev#-%;~ft7f;4HG1HfANPs4gu!8D7=~Z`@-3L@}=LWD*o-M zgW0Ry%ZW~P1a3y=>M~3~;3CnG%HXy>Be3I;9NM>nhXsB9rfvP{-((ii1sM{p_ zP4}8*r=JDE*3Jem88A?62!lVhVl8`b{ohjU*;Y4Y!!9=3PZ3$F*ZyXqH8!-y^NeCb z7F#h-rF&8%7-$jdp78K~)<*^w!a83Mx`#PShUX80u!B_C+y150*7fj*rdl#LU1nWB z63z4%IeE2&Wiisdd!OQ2NAj!T2bvN8e2XVheVkJZ!dP29P&icT81IdyCwj+XEb-^| z0Q!@aESUS5w>TrFs51+8q0ClnLez6IHnqf*{8tq*PX75k>`iFnxkx?1EGrZ3JTsQ7 zx$BJM=uhqGz=E{gOJ>fk&~gETq%bsgPB_;P`E|Ut;}8*Mvp1BDOdDH4l)&O;dfzI# zDW#T7vQK?5ainPa+4~ap#fo?PvYRhlE#000wa+oOS}OjvGB>*5K`exx!JlrRiF?Yr zz9k(RJXwv6VPhx5-(@8-T|a0$I~pF?e^1M?S9SD8o(KZAQ&grCZ}ru9uVOaDbeYs8 zq`O)RyI+bt-&&mNS4TY8LS9vO{qpAPTZ1jtg)S*4GE+p<7@c#!2nBL}e32Z`AxU{Rk!8_5jz69p9G#zZE#6Sl z1Mydm?f1T)sD~M{W(Q+Ygkwe*C*CV!)+|z<)i3mmHT=zA{`_K%6jj^yaS77sM4xh( z{y~kQ1IEiVM3{;2N_tjQZmEH*t<%WVQIyk}EZ9+)BLP4+NuZV*4V1z;we?%y=^rY6 zj3S2Xi+U>AjyyuXoYK;=50mu>Gi;ru%9Zj1ts3>Gj^cPJWmBXJmUtJxFBu&PW<5v2 zWmh!TNhNabRhrI8ahs`wo!$Z}FW2^3VS!i*FC^$OpOgwrD8-Q5YA2~7D+1VK-tFUX-T@?XS>cqvQkj8JFu;lCnjW}{7aI^$l zmuv?R`SR5alAUWV#MlY=wc10aFtL z6`e8By0Y%uIWcK~oHiCJeWFZ6dHKfWaDsQ7D5pnK53ok_;$!aPIzM12L_C#KpXE#V zEXNB-K1ANlPOTy)TJ3}_3ozlZV%VGkq37G#x9QJ_1+f|nIk}QRDoir!Erd-xc18rZ zv$9)M>wZyo51HA#Q)(qGioPoKC7=5-d+rUTf*!EjvVw>)Cv`=S(y88%QfvjmyB*ti8at62Ni`S_X~e`Vsq&B z%@pDnvPQ=8QrEwsu+VZr_w0_@r5kwfo0-)5BI6~jH0Y7Gq6dx) zD!kO6=F=(@ zUFjI+_;eNXCK*|G?~%KAX+#)K{? zG!&dV9HfahN($2dN!NrmT2yVv$0hIG?N!(l+&HrNR9K19C&kvj?tbPKjm4`dqeXRz zNPPC zkgZoQ)sW%xV?y)7z1xDjei$_&fJwK;-0Cd&N-e7J5$htY8TYogXWanEr3^v^5$q!WP8 z0$pmjksc2;7kT}xWibl#(y6hvZEo`CXFq6FKs1{c;01ytlusW$a%YEKG8r+nq8D_i zRpo*24t8mda9$;$#7^nYw?>@ZJBFYYVeh+U@dDWQ{!wtn$)*>eM7)aXNCBr#oD+u3 z1w%GFn0VS%Pbp*bHN%r2ECK*xuNeJ$y)^FDMvfBS!8jtg z4Gy(bYDxL|3jyG1Ib-B#lcxmo&;g{$X`m@#2W2MVSB`JSs z=L%g#w}W|n=#v(hI-y#Ru2}C~?wV^m?Nz@nw4BMs|3bJ8Do?mZ^Y4;*IdKPCGj6b* zmOpJvvjm!fMH$$fvgLT@HA`!qK-qR#!T^z$9z!}*lc*dD95Du(SE{E-!LmqVmS(m6 zXtIz+x{LNb{CqQ8`-^cO3%LMw5dLX+ddngzwBNIbwI@4ZDM{R))_o$cUAlgEr$t#? znK_kCqG}2KG~50wDKw?Al23bKEkwuSyLj1xQdf=<^YzZKZlHhmn{u9hO%=xjHvlZw zH-f%SJmmc)B-%RrLpsUs`-6^H2&`7%mg!bs!U&QCF8`=>nUQ~>U`Bz>P3IfJC1Eb8 zvu*VGA5$SOOa=JuC?VJh58+m?G&o#DQcBwRQeeJq<( z!2y=L%QRq6-A|l%NvZO9!gYSB4JbmYSIE3mM*nPNu+{R}Fq4LE&4C`(X9LB}3dP>) zkjCRm{cqfxim4@G=U#()mFp3!0(0TTxhT^svNO2)W!=x~4T(z2bv)Atj~7e=tG@A4 z>dK(aiENR@E2E%C_IYF_>ay_8nBQ$B<&HvGvIC{3rOn5KgdLfhw6nNW9KkB#sS64| zdQNFfhU?zM^ZT-ful-@Yo2LwF3{u!`E&7-E^^g@x1aD5M=1&HOp-2R|lut%YoUr`x z;*p4s(NRBHAIH(t6&+V24eAn(;2{77iRp|X1+5{^^F{``(KL?`yu1iYO3tJ~bzNQ9 zyrvqSHfpk^k8PeK;1lAJNQ{SVuhu}pPJEnCZz`-C%A+ufr!CasivY66+TY3E%vG+b z_G94YkX^JuHPS+SsyA0-r6Mh@t(aOIAAhqqbD_-1tB zH73uhQNGO?F1HQiv(3k&!bm_ABZj9W14Czt0EW6aftkPH}-F;Hp+^|<5i#hbW z)OSVa#M|E}(;FgK*_eX0nqau>vCV2Ek4FQTQ?+2$&0ObNRRi~u6o@zo1XDqRjj~e* zUK3X@sGtThm7+u?)(stGOtQ}HM3^)-yl2cL_F>j++`Qi(ab z<^V95mq-Llh~tU#)!O|`zf|l!gL0aOnfLDJGbYkc+@LFLA;4~VWZP~Q=HMYjzq`lO zaI4;#Tn4cq5bQ6%b3PY}=YPEC(;-Bpmeez(s=T|N+h$GaV~-njk}>j}64%}=V=L^& z{IrmxI5;pcBbr``=^<3N6{bIhbp8#2-?Z<*zBJre4il3V?a?BqQJd6rIrE}-H{Q=y z(Jd|cKeq45h!HzCud#}>lH##cBR`?wyn-G2VBvNb+h zx|1}88eBn#>Ne0&4|`v(MniM@UL!XyPDr+jP+nc-5@<+>cdAOK_TqcEPcG^*F8G1& z!v&6=_oSn%(2dz4TBCS>1Sy7%hEEOqxq$Xx`Oe>v=l}l)TJ5m818$`9{Ew&&5O%F^ z?K^6s?iTuL+FmB`QtD@r^(khx*DFm{@oNnMUgm{+UnI@izPTKq7Usi-sIXm^U&_sR z{~AzS7S&~{6a#|iGh^|3m}X-fIGNR;oe2b0wm&%|I`^c|0A@W`3W`Sid((-P`q&2i z@+%(rik=$SfK9@NIwxB}RjN;eXtb+ofh3>4ToOIyEZlRuF>{Cy{2+BKNF~jh?fjzK zGJnG((C%XQ>oF?z_G1_fzA|)p4SKc@8;+>S_yBzC{u3#%sY5&|@Sq_dB$z3}(KswY z_#J#1xBb-u+l2Y}3UH=XZkGD={!DpZ($Hs# zKj@?3mC0j@&r~l8O#1}12DvsCL{v0flzf&N?O%{Xzzl2q%iMyxou{;@!Lbwc)sJI@{1L z9iIJkh4OvgRz$g5inhL$_&;GJuSPO|-|g<#7#9L5L!JUiY)JhD&q9-UK$qNdPVuM& zU@)}=yfWNCe=QMYXy?idU$_k2s_4BrPRwIqE6>vRRoI%ARtiK#wE!@=Yu!7Wz(P9oG1?7?5M9&LrpA!e?zBiy7;Ul}v!PRybDRMVmZ8o*RAb(pW zQR4$X>TJ`vYp-&)PzGiG_QW^ zRJFHjX*Q)!DPq5zy2I*huhx!Tr_&0FFeq@)D1X3Yef3S9PQbCZAl85+0a`f z^DG#3zfSZuOPhVoyZ3xm(x+eUZy zT+nk0%o4NSEYxGX^Fgl~R5WSZ{MvKm9(L3+udG>z4=id9<&wlgYjF~^obzSGV z);iYjSf}=oShF?3C(!UvE3PpV^rPR5da2!$(PYY;q{7;<(e)YXRH{hkdL3@5Z}jWP za-{Qh(PdH6B(xX=Lj`Fudy;oWL*jMlv~K1=biNZhzfzM_|5^4K6N@~5{Q;(y{iNj( zuRmWZ3pck|KS#Pl<#IQGKA-kvfsowARG{Ys4o}OCW!r|}aV(}{jP1X*)2I5)aoM*XI8mWSG?gSMb{%})aK3k$ch{;IF`+6yP21?J$o?-HS8lHkLTH! zFjMO9;B2h=^o00C^3g-yN!|*r&3OY)7kt8$+8Zx^VQIE|ZXR>- zlwC0EnX?tfiZv;qYb;?wn=FZm?c?6@*Izz-L_aNRugV>3|5fI@)FC0`^GONc7j#bS zMG4S%?O=3`Li3xtg(5<@9GdlQw^r)-CJqW*FZ*p*9T%4NK){i;F|I%ve>y1q2%@3( z-1l+3%Rz9q#_H5EI}@|b;4Z<14r)G=p^b8BMH>GqZYtL){>7ANWnAOp`{V4>xAO_v zLy7c-oBC4dl@I3!TH}aGR-1p57?11@qfP<8VT^~Ir1Rwtrg2`3((1K( z*3osm?YdtiugCS91jw)405#1o6&I7cR4&_N(vU`oFlpHVhlWx{B--(h2SRUDWvg?Z+`eI>*xNzy$CM4*mX7-NIf61&rFoYX?W}9rA z&}MXCsU+#!rs8Xgt2&xkZO)_2>gQgIW`#Ud(R(kQ&vXQke@M75FN+-bs}1%1m1m>Z zxM9e&e$yE_;=?ZtgpVy{0^*ppW`NsA^Z|b~Vb+Q0{B=;-OsQE%e2hB0L+f)L=>_%+ z8;5wj!0tW%t*u7@dVWId{JA}UW|cA`xJTO6YIL^(=ki~*i1w*Xd%vUPb6-BH zMml>AA+BwoFnhQpf0bRm<$h*N+_If+Io#L#gyhu0sFaxNG3wvclW4lHH+ZsCJ}+kk z|7Y!AwB-?S)1~Qm7zz0we`aRx^PE>(Q8c-UhCaf!l>~aKWsQ2ZW|-gexwW6B0{wk9 z)>ipP4A^F*<$M4ix%;M{aZqI&l1l)zqiH^$II&FCTXq_-MirIO(c!h~hk&xQq)Qe~ z&=1JY6g;v)XCM#=CJNH83Hwy{UFOKVSyR>)XhrGRr`T2fn%N6%F zSQLD!vt0K!ou$mHmRu^`n%wn98ojM6JCbTQXxv|2cWZ+(Yiv_^TK(+&J7Ujz;5C{7 z?8rF`GrVvQQ@lvilAJFSeW>VXp&1{44FqQMexq>U_RGf*xfNlZ6RiCAnJTjzdM71B zM~o|Px_N$>v)j5DDKcSet<(==x)B*T1ZcO6}faf5yDcaPb3$XKY=ZIl$Z^d|Uzm(sCivz6D#I`w1rC1uacu zwh5m+O&sT+!%Edc0mFs0Z=0`(G~4@~xx*(iFK$c7rhkeqssK^EUrv5c;Z~Dpx@T3s zPSHCxQ=3<@u^g28oa~zAJEs(HjhpgQF(8$}uG13o(sE9PmP(|{wEAtl-CbkWLx+_z z5Tcc^tjPZ^u8&lS$t39e+CV$Mh^pu%PQ;8>!p#=_r2J4(K%{+{%l;h&Uv(Teg?5=~ zr^q0nMUr^mBE-wFVp(}STWgd2FHKwiz*^jsfo zg10Z6l6h-Xp!ZVn4H)5A7m6&25=rs=(yD#rUiGt7t;7S~zNl8=SC1$ss-*;2>$nVW zJ06{YI13X!!-k}gU)p9erHB8i;FwPD=*yic`NWiMa_LX$P&5J2OKyaZ`A1{x@)DN> z40qAp;^q z^w3h_MdQZz?@@n{`F%Abu*AKIUfG-t7aa0^ShbBKJ|A0?^`-*KRC223@%je9Eb^!F z`RAjnlaJSE&x!^-$rX-&NBGGoMk78{wu{NpI$>AG=M1`2m~qU9RUqV;nQ}BhpW7#m zeizW&>&fhUpXDk2>a)uW*J_gkngCzvEm&T;$-5E{pRHOeXhJmkWAc_ZJyo=)Kabyo zfA>+kY!135O~lo*70}-bMJ7USKThplB`#c?VtRV?-M9#E$5Uj*xMwEXNTkl4U!>Np zreR%wys@5dI_|-M zmB8I~n6|y|eS~)3xU>2>bawlxUMnis!hK@#M`yZZt(=30)*)Ht?K92hexK$#w&`2( zX9WY(Saj%JeB(Gt)I0VpKH9tYx{`IuVyn@aChg%55v*jsHvHA1WzE*4(MK8NtiFW_ zcu)U?9ICPXhgOHFO)or}=&2l$H=2BzAvmW&tq2K7e0}VFoTVPXy3xc`M@;Z!tBS3c zO(8_*Hbx5z` zp-V3Pw%VK53Y765^7+%dsu@!}ea7p3+f=U-P!~Iku!G}^UFut^?re3k1TH)p0VdjH z0|c29+F}Frvb7cqteYgdU+u;I*(_pD^YfQ3)*_clW@xG-WmY8kU#OV|UV45!!h@LV zR{#QQH5xK;sg8tqLH|^P7G)ztOGFx_^+n^saKc)eNM31hTzVRue>O}21_Nu>sb`^@Kx=vk5>riAIt960`y$irVRI%X13(9aUay zwaS~>?sTJjrZ&tcHsmzo%ag@Nvy70yIF@@=%6k8WAVW~uGtZ(oho4jysorxqU zzM%==$JD-fzj5CsuwTfM05XI`aUJp+&*`K*S8g2KZ@h0)lnp@3qg%W2hVi8uW_E;8 z6^LJP8PKqtSZ|Nz!-J!%e2+PRO5Q4W_mg+*NYNmNTP0XKeSnuuJXted{;^e@qkH{J z4*Y^@pith%6o3viB0-i$M6|VA${v!$!?i0{^9-{!&Vt!HtL<1QHpJAam0@n2T2Fop z66kXPs3HX{0&=D5j^KH*f>PK@mUC6y?1t&akL9gVV#oWkKZ?T1EgoHv_=ZAC|~ z&Hcz`Hy&QJUzwdHRo(nf$yoMRE3WP^sBuE6)NdfT?*4cxtz}h*kdM!qD+F?)Ya>k) z?K)Y&>b0ZLKb%WJcfb~gQP{#1P<0eKug6y-a@&BOa(r4BJ(0OW{^P;a$p%mTfJCG5 zc^CfLH4uTDN#!1*lZ-bwxO` z_NB;n>Xn{%uGxI6S>x&B7q(`Z_wb4P?u1qFmcpf~6-!y#uOMO|yTXcusX%dkCu=R9 zUj#ngnlYIdIW4|T3MXH{t3~^JXV{?+?f2pf)@LtB{*5>58!bx4M_5Toj`bJMwDpLxnp)TUK4E42D&k^ENc|3PldZSm6|{{PLN>tsk@4)_uai1mPQ)~ zYIx4&GVWArM6Ev;j7|pG19m)>3cmosP@QTV9g4-esw*bOPUkSO(1`!3t=+4$^iZ|cquW7_X!Y77ahzCCKI$@=RUM3~FF zJRh1}B1Y-F?njy@mUXjarZ-YLaf|Vz-CH#;#~ajVNhrzK0+SHjT5DWhty7^QKZ{c( z%Xeg&-1d16hpIz-e9{qh1l-}G{WB#+7EP8aaiWpZXdd^@EFZuej)}7@)VbN^bdT@p&#$ z(PeNl!>^RS4|#_(1EtnYZEh59$fBr(`vET2f zx-^KDVl(BOp{YH)4S~<#u1!0pNA213WgzsH{^zYp{-K2N#jJS8*l_sEuB`zIZ?Pjo zktTqF1H8G;*Eg=^aYv!ERTD#ZK+hjV74O2? zCBuX*no>t=`;cPm7f4&z`6uB#Q)Iq?@FwhLa8>XioEA9Nm;8r^Vx{k$ldH^Ca%Qn9 zv+ieRJO-*KJBG&Otv8$2Ow8pU-stlz3vUhgcl=-V&IX);R6SwMVt}EN> zR@4)peR*!+0Muc?EGlU0<^J^DEEnjPE*>oaWI9jE-!!jqzy<@EJr&iTDFJ4wUAfzY zFga`&(;8xu=ZgZa{=dKn?bD~zb%RPpzG5y&QrlN?U7~c<_9kGt48@sGnRn*eG0+)W1l(8hh{csIK4 zIvOGA9_N`g;^!leYy5rFP`FVu^SsF*ouEWWRmaQeEjH7A^(hb-6ezxZ-j55N#%&63 z)w`Ome^+g#u}M@=;XsqUrsWbo*AvSbt-q}6 zXJ}ovOC>3>E1lS7^U$PW@9s4GzSr2L;CKc~X%GJN*JL)}B4;^1x4)Ie9MiXHW*3=L z3v~WQ@R`&8v8A8U1q5|h_rX7UqXAU#uU9dmWSt_F>-gx4Dx-@qJ-!=!1>_uJU+h(7mtc2OC;L)Y5$6Cja>^ zF)eP!f|LbYD?F!0ag#@vQEj^R!Di%D+@|}40rB1F)ACy8K>yH{tuBE!wf!{enx@$4 zN8zV!V_76a1zw9!W$zR{Ra;$i2a6!X+-Eu)VOw+AsB%j#c^`)dG-QhU0$rB0@$<8u zY-zj&j^{0S9J7(H+s9;kv`q#`ILvP&{=mfSPx9`bSKtrtZJrb4_xk#A$|qLLP$)}5 zq}lZow{ND{m0#c2iK{~f^-Vf$k+HeTjUPdBy4tl{5A?~y)eV!60-wY!l-g`ykr(a? zZ1HY=7;o3j$A+?7-t{RSuT0%-1;jhzQ*ISh689jEW@hPxSJbFEATxSauL?_gJosiO z3?eGpK|po_W>Gy0c;PZpAGTe=v9p3*PFyHtueK3@-7=DJW3RS7j9%TBV{_?xHofYN zRl_c7lh^|RTITjlu@2LFSjoLz5-D7%+$YVr)cyB>#5OQzaD)VvK-`?7j^}|1%m8=2ccg;aNUJc;mICwJEG@#Y>*(T|k+|*XgNsbcpe&hVg>lBwwl_5nbaun|`8OZ@ zO%eRJG>m_u`2FM50Co|1EhrIF&`9oTHEkJoxKoKYA)hszux{6Xk{-ESpK&5u-7WZ9 ziB>6}%Zr&p#Ou}L{I@L@y+u0(mz#s`n_GBxPYS8G9eQs84PtD*19S4?>DA_FtxFHw zFX!%>Hw!=nR%bA8%w>IsvpHtXGVb1BLcUD(Mg+oM&uGxluGFi=U&McR&I{47`C9nc z$Y=DybgRNJy$bV9dbO_%`Rl29XYZEL?T9h%0jH+Ty+&^C6719HQ5d{H4P-uDiTxWV z8vxIXGcNC|LqI!}7U8oB^zJJ9>0H?SeBK4B-##S3_C9NB?~BN40ZEMKeK-W|YIRi) z2ta@OdUN@l?5a^}4|FZEkyLUY$LyZWYr7t4zu#g4hBN~fLoI#Q*{XnFYUD3aVt~Tq zKNL|)w!+`3>Oq5JxVh;ak6{ZSZ9|)v?Deo(xqMK|?{Ncz8}iq27>leD&=IH&poj+Q ztxz508l-tK6S^T!aB4-ecvec~Ow_fubq0MWl?90a8xU*L#Z~pmZ{;7L?brP9Zh)h% z(S!}+y&-8~8WZvj@itxmn-Y4(%fn?oZ_bi?J*XCQ&W^iOcKS*9c7bRLAr^z#I@xIR zagUl6;mGg@c1wUYw!AOe9U0izXcPsKBx548*{y#?N8pSZw7H{001syvhi+hP(Nx0m zur+OF%(sn{T>-SCX=isXW3M4Bl7Tfb^t|rY^SX(ypScnM)BrPk z4es)5)!dqxMESL~t3;sN7gu&f@_qA-7OkX^je>-p@$99Avmlk%;$+e{bpTlcl#uuY zzjv9pgE_Oh-6HRXamx^}20_TV9>Uu)uo!xuD0n`y(9*(REXEe{SP_#$RBAW=I5ZGL zMY0;LO(eolmjU9L&?dAP`}6m-z0_k1E3rBteX$zt$}>pl|Ei>toGIw2^f5K792H1~Ops%{(W4YqJAZR-vwM)k?%1w) zhyrkSBruzsTeF(o-Vsip^ARd(-+SsBD9j^2y)9`EZmg)y z0mP<*_4+^6)1<}QddTv`@sAFxaUa+?+86-Qlh_aIDciz^?V~3*jJK`lgRGeuE7bd! zP*dE>WeR<)h?n&oadz!h#Ptcb{|H5|r}i(Hy4!p$9WHk`A1y;HfOU#j_}0g~SEK6* zECoi(MJYY9SFmz9950)pEuLM$8$AO_I1%GRe?~<0l!ZcTNK6o%&1@pA3aR+(q)VWc z#*M`czYuEFGsPT;8$w8qL_dGnM7CZPIc@QFFNQ;aLFP?Z_@;I#ZgJg{W2eS=UPs?H zJN|HU8**Uxrc)*2?BJGh)yj(Qha-l(E)m}=3cjs!sTJ?)nw(uLqZ|J73#xeFW7;J ze_*`aDv3FxWXeA8Wq6q#tcb3zDuY@NjxI8JUK--w>ZL~i`#sNi7t}BxU=l_FM?UBx z=<|_~raQ|w|o^O|@&thm~e4-C%EAfK3r2t#CMrXP^A==MZXW9+u zaug}Pr$-{(o>$hx`M}aHU(uagUsd-l&#l#-%yHgW?AMZab^rU0{D1kB?4M~WZi!@m z=1U`p%;t&ycAG5Qc(HyKbRx5y8h2u0AIdM zN&;?NE^?o|zBD!ao$Jkp`9}Gg@Vx@4VH7dXlZwXiCyi^;#tsAe#_A_8+U+Ga{cxB- z&s0=xD`J(}!OJdlKA-jT{Yi@>M`Hssqi>zDN@vgX6SVMvCg4xfRZ_YiP`~oSp}+RD zXv$SG>Di!WYi0>QlDM00^U0{x)hlFhb?N)tM%5jO{?y!rSrOG) zpf|5Mz*5=%(8uP?H@67}%7(a}43wJuKFz7o%ZOR59Eg0Uzefu20ahNdA9nB^1!!6A ziauK&xsLfVDgsV}x&!~oR=+m@9Q?@3=8V|k>T(m(u%hdl4K(meR^`CjvC{TFH*Z=l z;DT^k4Q%E~Szs3;6y+%YAfxR-uL{^&)<*ADNO)80tI}#K2hsXQ1-EyNU||=}ri$xf z(6(L$rnENXLj&iuYlIQ@+4}?TPRE1i@0Yg!rkDZ6fEBdSNfEca@jH&nvnd~^1gJ|m zwF1yUNVu${)XBFYA6p?iD3A@Y#m0%q0VWnOmVfK`>2~kFPqT}~(CE)kslu=&(nb4! z&qM&*G!S))?v`4rEkRQ6LctoTU$n%3H7=cvH*epuD&v5H5^NqL&-2#tg~~M>!S9C) z-2~E(;XPbagiB9{9s)U#x`KrtlJix-r7}cvW#3Nzvt6A^ktY3;{OOM?n_?mIG^7~I zD*qmI(B&_4*X74CtwepRma%?Sc}+6v_8~b+?PQawmcPD&oaWiR_=w+w8AS%<0)KKd z4Z>7oWV$ZWcYtQTT6V_HR4Sz;xk)a9%Xo0v5tDrXpVrKviM}@+U2rmTpI2P7`lh3 zI`0aO&Mgsm-7Ahu5C6*s^6%cZ=VEf^-v+;{wB`!K<`eVs|0aY2#Mg(eDbFW{R}Hrc zDYMi-3+Psf(?zb-jn=|1>%`4*g@Q`NF z=q0*jGUJ;$31Ji|K+L71_KT6Q!#t*26n&ZUx~xU~?522v2bg9*Ju2Z|1m^>`{9YJM zSt!F2d!*Pefl8!N%q;@}GwVo*OEfSj30P99s)53ZkS`Ac8`ITYjq4KSUoeO3luDXu zfW6ASvSfpu0(a!zaa!qP_xu)RfUQ6K%+Fh?Wf-7!;7&jrF@F|X zb!%fN$RL?69q7dbur9C4hW2(|IW-L?h(vT_DY6T=0DKdH2pZrEXB_3xAg-_@{Pa`5 zXG#SCfS6jAo=`n4eXYevsyiKcG>hM;R48X>9HcHkr1?3?Kgg{KOnl75ac=OCPD!P> zQ4pANpcVZ%D&gcf-T8QWWG@@A8&wTc08-BOss(T$*I9Cyt_Zha5Dhoh-!$7yiT15K znxni@)AD~qk3@=oEjk7&O*irF-oTP;&fzB~&)fMam6A*!Zwf~7!s3k}U}J7_9`II& zBU{xuc^^T0hvv%*Ok=2FNX)gkqSzeMv7^|kHRtvn;oVYauqIJ!Ypa!*mR1S(SC!De z&`J`%XjuQ2VEl>eNIJnhFWbotSiS=T8?6(xOpoeBFwwq((qu8z28uXStq251u+mpahju0zR6CQ^?Ok7}3VV zTH=j5&UoG!&yS5gcyuJ6798Q#oOdg;==3PsKL9{Agi^+D?lgv7Z++L)3yDO8jp?%S zE_m|F?r920G3r9{ff5=}5s*k&>EnIw{Me7KAx~hb5tl6t+c-Od{!@T$Ph$)FJCiU| z3~2vSt;c(B^&%d?sQ8CFu`|!fSC@J#eg2n#^{?8yr5mbZ_T5sNg6bZpK$}=nPR$Dp z|ACahvLprUC*V#&$7B#b~q^1n?D`1&-0< zcd-VsH_e4Tqz2HaHL(vWkM)1v(dnnvnsDpXXx@tvCPO`aJFDP<&wzc^#dO3XBi=;M zt)NuiC2U*#zP2m%_-vs1d#N2smJc}ZOD1GT6tnFh7^;>=@f681UYTx>#G3)6__3QVFJ;$e(wHLcY_FL=^5KveYu8E=TepuU7KjY z#5~I*e)W9{Vm?5wPh%HKoZRYdv$eGq?^v81Tem*i@cBZ6h5|Tt#yr|5+2GQJl{jG) zI$Mo62OWR6D*+08g_MS(8gu)=6^rPAY|etS$9&228q^sVxI#DIal}SbpoXH8GbGn` zOK3>%2p_=h)3|~Z8T0c^R|eo~r-cCD>iZ#b6F`LzcU2JMEe_FYi23go{I-NA8t63aBH5d9Mnr5HlqRQ>qTj>X&J)ns zg;(r`6rV86^}G8`aZ|IRKF@1dEi#mG_(>_Eg#6b#;->k6`BH=(( z1Ijob;jkqBB~VKxg7_~GBu)WJXj6f+Zi{H_uL4Hqctlx-?0aFHL9Ib${ zn{pw0av1?}Oi9qN|E#4&1y{j=U4-FaO4aO`+T6i6|y7AvlrZ4$+ zWxHM<0#1&v-PpiHi?@}y=0?lsWIZOL120rwKYqY*IBa8Y*;f0>$qW_F+{FaUZ`1{x zWOXSbPXK20^gWa6wE2P}3mf$gwGu$Yd?_km+<6V2a6I0z<1gD+NU;VhZ?U}|j^_hKJPgE;itDo^Ssugge{x^12B6pGknW#{&A{R%)5|_l>XuVD#3@`z@)7+a-?2^} z;5NwWs+R`Z8_&N3L$h*+QKmf9>|boIxiC5|H{bH4N>;;gYQgqJZh)y}>QwimGc>5dbtdMp$ckUlQ!mL`+SWcK;8zsaTCYKV$On+(zCV5T@%_aUv zA!st|NSgyppUYt_2MQ(fS4^(>AJ6 zSilV*Pcvt}lF#y$m6t`y4bVGCy)?qX4688PDA?ef(AT1Pu%JNxZPyI2V6rgw@J3%4 zZM*57P?ch7AB9yRTYA@c-wN_dxzFxMTyBaKs(5&$AQr}{O+dPg_pSraHa$yX1W2n* zk2(L;WVe*mlKIbcvX7z<2zZP1w`UG=Ijs&gi6(wJI=zwCIpmOIWb2f>NC7eRR;d|B zMTR+*0uX;;EUIoeN&Te`YTqV=CT+gB>(zjI@Z7uVZeg*9Y!42`4iIlJ2^#$Gs02>IaRk0=Y1830rrcM{R z7u=CXAtF%!tO2IS?k`v=RxDHcA^)XxltV$#sJWU^o(>&stVji{Aw_!u*u6J^S>^UmQQu<`Hpns{B%jW=qa4!P?@eNPB_3B?9`l)6*D!Tf@>OBPc= zxw$?$6H8Wm-{b?NVr^K25Nv1B7+kCOLSOahuL31=Il=?=|=LY0lvdoSaau3ka@w(zn~y}ENy5b z2T3srHU@|!Kaq$R4Jv0x{0T%-Sych6nNiZKOPmp!DuPeb14|;~E%F@zW_c$9^y%KD zS=^Jd!(_z6a8njK5%#p#no5jpY=uDm0eDTZv16$<6HIN8zhFQP!_dx?SCE}>IS@e0 zCNNB+N64GmXt?_bJA5V&l-m01E&Y&qJL1?dD$%8jbIlw;s{0JgU)n!tRl|xyto!gs z(EQ)ry#^S%3ryL4As5LVzAFBm2K`ZGadeh@sU7UAs6}HjbkZ8-zp%#tTmI>fAT&bR zenjPaY}tjsxs#ge*qB^3lB5DON7pRCKywtB=f=%c-2^K#-Q8~{c?q;`BG11`z&uw6 zl@P#OP4EUP`E($LE`V?MVwlRG1qf40WSb>88DPaoCq5fB}ap)776DeB+5|df6;cuSrx9p<*dSxaQ$J z7lDs9;3{!M)eohT^Lx2CyMvn*FS8QU(<#izHgphJ{r~Is`L{2OwxemcGp)>2uDH0! z(`6uZM7IjV0;|7)`12zgo1H|e4ce`LJ3UP%{trs=yOHW9taEP))==Dh+9h6d;t-3_ zR9bW47D^%UcES*H-=-IPj!@p6b+?XFdfG=%*;aq9#Q(uFjo-ZUc8wDlPH}v&w!twBXYO?04cFuU{$G(kA6 zkd4a{&5aClqPZ|~J%%TLs}H*yI=9Ts`u%%b#N@}(NnG_S(cns5%CnfA8*36P;1Le| z81N&BKK2ySpU)!CzBxjTJ668GUtpiLpw!k{7tGC~Q1~fb4HDfM9Qms|R#`Lk9l@XF zmB_S@OYL9~NRC6;KsHz%>sK>al24d>EYSVKhx`ZIO&nB(ojO-~{dRo&jB+71dnOzf z=B#&>9*Q_dL;-fiNx^z*9QJ@w9Vh%DH+x)s{P z$1S1C6_dn9Wz7OhSK(@FU_#UxgJ%~ylm%Y@e4`((0-gBsWz1PiF@L(v=i}|k!CI=) z<;CLh^l6|cv8{N3sD}qSRxDfqCbK%s;(FLsAP%LYmKzV`^YwF3~yT?@!{FCUEtE37VhQ%Rr}bR9x0yeBjpDF zpp%v#6_EL+)E^r%N!&vMTe1*ckUimC`9m{^!O~@$3{fXkS}`TDAi`enyP1h-Yw_SlhNR(+@d1dEaXvhX-MQ z>asn@(bM~UN#^-@B~hSQd%5@~_u-3Jt+b6QYsV^lfB1=|t%8mco1YKQ8ZoF9x79<; zhRhks0)DhRZetnW9x(oHH68TJN^#UD6ug@s@{`*ZNBv$P)p zD!%sT(-2dWzb*BDKYHaCDcTQz&1NE)QAl4f_c&wA@Iko$Md$Nn6V7P{QwA?w`<0OV zaxxFu8hNA`Pt>&tX_}__LF2xxyJ*7U?64o$*^`BZIeT&HuGcIdeDVEw-bG{Ev?O)Z zmL;vW5GpMwMt}oKSLbp5C=_#JCBP_j^#o>#01XWI4R8;!kHbRc4tBoaI5S z)(@3yj;Ygpvmb2rO}0vEv9R5hDwnx=?@q33?>3(wMB_2AY^izURE^|jNLyJ#hm7C02z;@d>+wtw*SwNIlRHgvLh7FxfOohRv^{Sfy6 z%3GeufUiayhP+8?R;qHPZ?jLg(%721MSE#tW4?et1S~|`sl5Emnz_vJZ9U*E)|ET; zx|)Sgzgp5YG9x^~`-3(bj<8)#%>L}I%S#r_&?iuNH-pCT{WxA|7*P@tQnO|^vBj@p zRs3ySN|rV6N&j^d<+L&QrL&4p^fmuSdJ1jw3kA$idpH&CIO-dOa^E28T279(J;q7*3g87O15544*v$yL=N2QkIYiQa6EGS}*2ep73&oDv?bj4mXx^a8bLoCwop5C~g%)%Pyzh z04uKjF@ zwG$>QCZUZdW*dd-6Xzr+6HXuNOx%p%g@n)WCGiYzxli^4Re1R9G&eMhcz-1ugKuhf zJ|Ex2o<;abXLy&!)XLY`d(@~B5#vMhXk&Df%~m;_^yig;$?(SUvN^K0>PgozCDs@q zMjLp|1IoUIayTP1Tek9s!dLR)8~V_=oz|BEo8ugNIqR6E`iMt03KH+1cKwi6bMfZ2 zfBAv@T_t7=QId8;WGSMXCp<5i4nJ{1_Y@4+^P9lnKy_osOyeX8jrWgTZ5DgFf)qBNe8Co#e0A!H z6Aj9zkp171#=m!rzTu%lC#CLlg2rYaZ3y$|#KGIpM0LxUnZ#IvS?xrfXTYWNA?rHe zsCyosndS+&cUNtiWYS2}J>%eethJ{sjbs(sx&`Le2L~xo1g&K~0+9ET%PHf(^ z8vR~NnjqoR*k`-Z{Ve(Wi4@h;jxmmYM&=G3OKkJNPi10izq{ft$wBYrru2nt3d z)ZrTw;RB3|BmU}YGn4w|&DmmP3Z?s;V;?ptE9YTkyvpMB`ZTVGgVD*}V^zEocdv)` zyF-_ga&mH(JyDdC)fUS0d62bezHGDj0#@{bi1V^UqC|B^6oK0HBvk{?dwlR|RQd8> zy6t0nYY$$kNVK*q;D)-K=tRfzR)p;+&q@m5mbj-3e-S3FT7hoI;j@)c;x1`8n5+20~<8)wao^`w8MWoYF>#NNl%3xwx7+$vAu!& zpHyX^JspO2m?1iNc#*a+@iK2r8A@eI1W>ORZhk|u(H^xPzF^Lt?>sjQFMten8LZ`( z3gU9u-LfC$#W-aaN~8z!5`GCMmvm#adi4kK1{jS29C*{qY^tu^B!iK5R+9I&aGt5} zOVWqayUMuODw9+}cbZZ}>&I@xa|)O(Vz`VcAz>i=19UCl%AG7F)7q?n9~>Rt2=grJ4v<}J9< zu|T!%Ti(Y4)(?SG^lbS(W$6<5OW*f{7=$6gaYtP9r_u!^*)Y?6w0^}Iv97KJy_YmJ zmaDxnBG0~lagcw0K0;CVXb$wqn@z4xy+l2bve_LIx-QpdH9^<7nG?>50qLBP<{eTta-Zl6oixvmb_Zgpo_T~br4 z9cC-0npy{g<~JLisS17PgJg)s-6&^72@4h!_%0Icvqi3cw2g6D++>)SU#9~Zox)IR zO`f_}WDcR%g8fS)@w59X>Yh%wBd4&<7XS@i0Lri3ET+-aiHqnrIu=Dio`lRb)e`=Q2G~-7qR8eaiPjxfGViqTA*LfF&)Ha(?gp78@paIJkF?w94>tRP!m_(qN(p7oe(oIu zbKdeWRSKP~G|%wU_y_~p%&4bhcTSCvV~)l?GiJjNcV~UPOsu8C1&5L~j%QAuPKR)p zwUA)jtojR&i}$I57O2q`ht4XQt=g?-xRA9co633|f&4Xsh~P_TY^kolhUR=NZL~H4 zyoTmMp~ulxu;tp6N9s$PIPK}pLRmTt5N3q?QW4L@MWJ3fdSILqP@ZXEzjp7&G^NN* zl~WPW$2HpJlQNEDa`l5k43U_MDGQlR2u1V<^EZO~922O{-&M@L80)1Yqo%~t719#lN9 z?yF`YG)+wwBVhZVD0bc*^2D|Ql?ERb<9@C?s4P=9lF^35xLXJ+0Z*}&)lHl3uvZwt za_Y9uK&iKE(J-(`ft`EVglyC&S^ie}_EY?mee;w$&(za3t6F*A!}L(rVW>5wqr#2# zcN>>cA|*W}P)&j{W6A`|i?7VqsS1jY3_*%ATKK&2drqqUk4kts{v93guM>d&@!DI` zC>t@hKJQoj^NV6n3hIpPUkwAp^=Wi-$2g%JP#xbzUh(j5rZHK~tUXI8#gcR*pJCX`ywkdOVBGwSJ)ZVz?tF+t66mR^^ zna9r`NO>l95)4NVhpOS$oF)6(WOEiZ;X~;^vueYI?@lB;DahvovfAtY&;FxKZPN>j z^iErcD@%q{mNZK+lM8K1HM;cbO9=sk2g@5FEk-9%k~J^Z@Py=92xrG8c&?+V8kY6L zB0|P@%+~S~hrdmbv<`2GHGt@uXi}+~k69poEtLtg>)W!n&_}(2l1jb>mkklX?GgCX z+e7IdpY2|2W;pcR%2S5{l1pQ%SCE#FRB$03dW5Xz@l{N!;;=!JX4IqwvR|R23!LK) zt-8488*K?BRHYH~izM}sXTPP1j}xpd%2WuwFQJQh%kQyc&}g z3Yuu3*N4R4RBn+9@AXL;5Fkmd3b}jk>+tNn0=RKs*MVjxs=o$dFP=Nj#4v40J2(=9 z@lO(A{~+4uG5x+1O*1LAVZ+ZBQ`ihBU1u6!grCi>p1!ejpF592w0XT~)i$|n=-+v9=F(u&z$kpM@#x*6~T=+-l{%kwaKES$7gwL3TA z4-Y33|P<$#=K9y>*7e zQ6uWCR{Pd0r66`{?Hj1a^840tv&Sq9Vvk6wod~%uN{ViCK!w?iNsf@|hFjmHM}Y#J9u-#Tih02sywZ}g{-E^9$6PI|9R$|<(K6WBTtE$SNQO| zGvj0b!NyfmeJwCCeAM7SPwbNsxiLBfKrm)5Gg<)Me1 z?O#@4`)~V#YLZ`86c0nkVk(c%9QW0y-MHFTheltyoMlHKdy5N?1$(|#eb9%kvhOB-deTWXh>S3@P_SA7AQ&^ZG{Tu&*QC$BKQ5&R+i=c=eJLCB;F!fqqYvmv6vWH3)S50 zhDKQPn(cCbFhfS;luZ# zQN;rX`pUUWH>?v?2bH16y~O*YZNjbTpF7RAH6tK*d*WNQV|*f3K?&-}P{78m;G`{>4K}oqb``TBf=&=$ka2JYiRAOxlV5*={_itoT;e zvx}}$aceK!fX4ZwNQ>Z1ozwNJ(OlNTOwLyq;X378I(~@a+vrxI3!)R9AVfEaUS{;(38IhQYjj45-gz(2v-f|$&wloOAA39I%lI(Nb^Wfj&UK#W zT1&80Rx^K6*u*&03$xJaHFVZrtxt=4PfV)f+*+-#Pi||BpI|H4`Cew_b>l$~PQ%I9 z#sYw1t;TFslG05_`2+8n3R8TS zYInlWY9Cw_7c+np`eO;HJe!b_69WF7Qw%xD+k+S8rx zeHpb>hfu#V9%HX^-LxOibEjv0q!%4^TU8}znmX{1MBMhT6|i1URgQ%{f40Af{bzDS z@d58!d)qCwCalGu(^GIl8r=BW^Acr`(}+bBg#}K=sO!Q-V^9VN^5?CG?$E2HZ4h<# zZ-`4~%9EI0KYtr>A)ChGFih;jr9VWTK zF&z7tGxCbu!t3kl7~awEw7BVCt8fk{+{SUj?xFcFEFN|{#j2{*?e;R)@9UPwQ+0MU zP1oc;CjN2oA(MRe57MQcsE`O#u_XMG@f*zy`nVA(fOuTByeG?8HIA_f6Q7Tg6U+ax z+LuK3`SuMfpJTG;=*b;Ykq_TH<=?*zpHq-X#wh)UH38-PB426W$s|lO+sD1le*!m( zA6NtbFGbqlzNL~BLz)>hnaC<&)vO5Q;N%3Ba0^qY(?;IFl>3A}ejtI>TXw-{sFzl!*l2HtiIt1l`P9c2&vL12N1vwV2DATp*Gz(_H z3g!G$3qzV49Qs}Ccz6!!Co)(yDb1;fFBs?r8xaGJhdMhoe0Q~E>{)K1C&icG@ z);(jyDWT%)cU8KdRH6F~j3%|N&oopx&qr4&^|FGnMGxW-G9p`PuW7ouNq1_zp@5>J zZgCK^H*1IghN?EOoY|!PkuOsUc zM=H;ro}ucs_v%xvdRvM}AmN8SFfYR&-jAyX7qUAFo`m)|gxgcdV6tWvXr{ifn&e4I zsgB6pfNhBDef%hfj!U5`oIX{z_lWTSget{;++p5yDp=vba&vRJtS4DWdF{x^XVVpX ztt+;dYi@qHs22Oioh}*qD7oJ9?ifZm=oO==X@TEEYuUL9u*tcYDi25oIiDiqQxNze zL%R#A!r^+si`9fS)wD=ZDggcaBQuYMM$5cON+O^oqw_kN532$&4yo^N=kW?lL1#i5 zeenL(r#`u{eXc(0pV?cP{daNz7_gg$;4>Dc7Ahvb(~1Z5H{GHl!)Ah&l!mz8zhX|5 zC zM|t~GlVJxMYtp1OJo)@G(8=6bPf`!k}RV%G0hDJ(} zKkvYmx$_@kVSJV{AW-KOI^j8@Wc)y;G2xdo`JprBIOd%F2kpx|ZFK|d4b4fGJ`BnB)c&7SxNNNF>uGV`>vWzh25mPcS5iz+qdC4^(q09 zuifp$*5;Z>2NRm^`0?8gzd-eWly-^wp064yTZXKpLx*!c>|d%RAbP(o{_5g#O!iHz zY2E^O!tqdnhdF5WKKFRdhYJC5>KeybqP=Tvq}?&Pr7v75n;oZg1j@UUwI~6IDGRX3 zavK)KY{<(4CCFMiii2JZ&e3v?-Qb0wy|*0jPnse&?z7 z$k$YrA!NQ(%?@*lfC5J$NiK6!?@}Ts@|@F?cWqe6)g+D6eBaRY&iDV!I07P<&Y z{e;$kJb~Ee3}w!9k4pQGlaUc1hh%k-51Ov)uLZ8(dG^u}&B3GVUnE*w z;4n~;Y)HaHRIX9$2CQ|A7I08bZ^@$1>|IZet^s*`VLN&Rv9wsauSL`RB~5Z&=Ig+G zN0s*yX?7D3cgOSlU(qZTX?n(Dls4=N)#)i(feIUOz=A)+#rH6!a3}jrqt08rd2Wwa z`?t$+yGNl*^KwV+3*p&~dhQzmk}qSW`}wzGaVO+A+NrN>+Yb)=ZW0;(*DUQ$h!_g* zSgOlwgd^-vvH=F(0~9Qy`a?b!+2$+9^WqN1Ght}QfD~~3HE--%M=b16X1w?olk4v1 zW`WzLKlTLqtQy7#$sm=n$&vALb)<(%4NZ483Lkwxvuam1BfqCdTHv7RSVYP0oh-zbt*kuYR3010FSk04;W>`xB0u;WWW61Y7BXs7tnDv1X1KEM*Pjy+ zN!Fuko{Yfqzxqtl-y7x>Won{p%ry3Gut%US@JH} zxF|ZHnM(-U8@K7nJLWgqYY}fuWqL%!{J*>bKiE)STMd@0VfQeT;+7?-uZvp}bbn8S z3%pN0!2yZ3Uj-Q*cbj-qh#wNfV92iY%zag(R_VR%#Rp?Nc-RV1T%9Go{DZ?B0Tyr9 zI#%wGZ0+faHquIm^H3N2biuahy`Ltacb|e5%F?Hv-ZgC z0C?ya88fj5wJz)R6y{tJ4qxO%R5|C)iWBI`e?m7hy60No$D?jw|1SX5M|+fDXF1%; z^bXS8Dv!XijIx%#h74{%@>Bd2Xj!qq&(8a{%iTGpaDcMn)cYy@QO>@p+7b@qaom(W zjzJIsblC47x2f~p2WJCTMrJz9I#;t+xpAkQcRxlj1A{`8SOhZx>C(tlN_8@+?g7{5 zTP#ZU^VC@N<3Np!&-1io3=OHgZ%?OwwUBwckO+3>y+nK6ME3|CeBN^-5Lj~k`?^Y! z>bwTx5_-~LGhsiD`1)FgR2i)G4plQZjy*;9y+pmKYLYs*evoy7OeX@XuP}*RS1!D- z+9LNC{Be>-VpG+x0-);Fa5k90>g0F%sjxI$C1F2|+uB;1T5bDSN^arXQOHJ{$6fD~ zHN42<`D7$xKVM(*#Z&curQ)bez%!w2`zE)cW>4XoFWg$jGDfcV0*n4*BayC-O>9h0 zsj*_Kc*Jw&%m1jlW4YFfp5dxQXJ5T^mpR$?VAA!Tq&b3IDrtLHeytVgAe$1{Qf-Jk1tWJXL-d=^|pyAk7&fgR{cn>ugYbVEkEV z=#Y}<=~bEcbLK@{J7f$qJPbpmqnQgL=*HNexhr7&G3ihGfL2g*7UXusNtRn-wQfZ# za3aYee2oD*lGQ+99e-@)GCtAQ16{dcicuGKw7T8S&tPC?mJ=VTFqH!^;XIbxf=gm+ z)=2mI)Hykf4SecvjD=K9RTkL6Xa;1Tj=6Fr|&B zWLjBENz;<&m(t+(u(ksfRMWVR?#cT13WX|}P)VT=L%uG8gbo-kx&!cRqeKgsI*xDX zXeDk8Df#JxMc|K5qGjW+Zn{w#^(OKAs|%+-=7Gsn&YIgs+}t@yQE}xVP(v9q0!U$5 zysf_9)_RjV>t2&`UVvQ-goo^iN!>P(Jlg@Cf(aGjNi2W|JIkCXULnm~+ zc|kv>Kjc6E;IsnVbO|gdC^xZU5)!C{#0i!u2fr~oXv3KW%YX4k=LF_3Sk2M_g+f>+ z5xMb<-YubkY2WkgE;;sQu8cH|uF`f&+5t**Q*JuGGA*U@TGRpm>WudRi395t6jPqh zN}VVC)oSHlPr?Eu;v9cSJIhZ%RJz`AR|bY-^_^HfZrz)(eAUe3!&5xVlQFq>l{QP~ z0tj9n{g;X(01C)cc4ga}yM1car6j9nxs=5Wz_8Mj?mT^qJ@`_A!Z9%ZQP=*#my`fT z%;qWrBV3!Em+T*M+3D^lYKg$c`xkXCCu3Yy2VGhjbLXY&cUNO!-B0uP$w6fZ$;u`7 z-^b(G;-aiytR?N3Xn)bi@U824o{1JnQ2X3tMcTY89jQd#HN&8}GGHQ-Tb`C0sxm}GJ{K4T1f^RB;DGIYdG zj2tu0`T5ih3|}ES=XqV#WE{C>w%W$e3QZj&krUDLo6f_{FZ61O;9>59z(CQHoLGezLo^k&Pu>coqBNq6Am zZT47al)7?|PoCqG`et5;U+OtwRWMM&w~NVID8GxI)EOQem@4{uWZxA8dm-1DxZKj6 zg!mgh#a39R+i%LW3niyq$-F>6=w>L%o${W-Kc_ zyfW(Hu*jp2Bjw;AoA3ESfR0I)>Xh_Px!p@~*>3-6u*lB*$hGPq#P7d`*L%q?Gw&aS z7l{llvr^ccA3hdyd${a)ohU88>vfeQE`{5#g>)lskt`FT?qwO8Pc>|&w5DJ7kALhg zV^_3@9LzbIW;?&wgLv7x1^|6TuRdUeDt3S-;T%hKTXZL9%@GW2bnhzg@GwY_^x56h zTJ-+Z@GSY#Z)MOcQi7GD+2**Tgk zw>|9NQFYXa`c?=r+ak_j=hi4sxTMkrWzuk#cGr6^haprGl+Vz5^CkC zS^9hkEXIy*Cb)lHDc#?02UX57YVgC&9=8a#%gvh9kMC73Odm9*3BJW6xhn*v9T&C5 z93$kwH$-@({{XT7bD*F=%0E)IT5V!N**A}j|9}_KSncMUWdN8E%+BN&v ziL7&V9J>hnZ>HVV&`93+D>#ld?Bx8+)5DamdZ7SpSl{ZB${RjgPj=9n$h>e!Rr&UN zbgn^Hu3CMv@kNZKDpG6lo#t(3s6}|B2N!AX@oWo{WQ5ZK1I?LkzH7wmBe&SyMnk@{ z@uY{R&QXYqN}ECS{hIoBA&CQ!mJ=#Hy}H|V_TOxwX}iam-#3%)!dYLLcFPoZfAhQ^ zi*5zi*`~+X!tFp}qj`*yZmG@q+w!Xj&Gq6?^$NWqCi_+0P9}eoSHP))qtH2)f3Ti3 znas#gk_g(_|B+~bh14#y4A@*4=vzl|q3-XQN!_-w?@lJC{ep1-kP@PJTZR2%$mD#? zYM2zCYIwQk^EwP|?`h;5!F0JkMI1EYz0VV~)2N^cv>eiSF)OgF1Q-;|f)?7d^!39) zKp@d$ciBO0^YN0VA#WM>G`t zYqRgaM4;s*l$66iB@QoEa&;zCxo0xr6F8>rAJ-0mtzcI#1Qt9Un6QG4*`1TxJs@2s zYt?;YEW2-jt#)r>^tOh40VO z)=i+}@t*$UtEfMZaIe_Cm#tk{@_w#(2}cy;VkXsxIDXm+tb0uv^+Z^Kl>-+;`B7Jkk4s!626?hTVI`EF{=8%+fDB6smM4
;BaUAiZ(c`;xbNw{B6b{>E1_*EeY3l&Kpxb zZ11PyzZbtOl*b)6wCYmLg;Y>Kfg-gpcnY$@uBye zi$eg<0>HL3?8de>zj)+~n=@Tu=gZN~Lu-KUv^at@Z5=Pe56X>2q@1G0~C^`|o7X|8m z@c=RL79_eC7+2TS%Houw`T8D$0lyzLCL`;uuA+UDIHa*b7#P-a_A`wTmym z4=KD+WW4@5<~kCx_U^J>cELdI%Ja|HV3A}+=|%IOm!3nC99x_7E zAU*pX^XZ{pE2tgbrQ;BQ7!go2)X2Q<>UpTvQYDpjDy6MPg~C@%Q=dy1=5?dG?$5jP zadTHkzu$XF8)@=~;LjT28z@OfDNNVap~u=QZzc{kU?zh&#>?>rQX&fJ`|^9bpKQMQ zm$^ond=Pm=dZ39@cTPCvHcHe8*iF2;wzLv5i}W1mIJcEm5#t-Q@rV$ z8PT7_-EW@nv-73p_18Q2DFm5MQ|x%z47WW+;>Ng%U^Uj@W)H<=7gCs-cc86CC~;!c zp_sD{u;zY&a4@ad8tRb{ZaQnJ!xJA2f$vDB2;xf_FfcBN{Q-4ZRhwMNZpjzh3?QlQAQgEs`%9V3t&6@-pH22eh|)cF7%yL1>T-7zHZ5%R zUuKm`(l7Ti8<>1kEcdbDoUNQ;D;12Pmkdz4xg_l(?q-k6_;Pl_EmGM2d1cBFlyTI8 zPf`~WVvAw^|B8=)AD$@YfXA1fKQeB{L}IgK$MpTW&J_n1Any{uh!|?;^m2SEez_O` z>AtoFnO(kA3i$PD?dI7eb?VifS;BF{z!F-}Aqw|n_vnVt2fFl?^9?S{-dEfCpT-n; zWhNzF4<}w2hT?a9bk{^RC1=X}dK;wL&bCU6x}JgzSXm1<#&;?>IbJQ(qvWxAbiC-5 zpeY$_*uRUJK+EoACnysCt}mLC>E3Zo`~E4@3k>P!UfE%U+-I;ML=B=#U*SRUG^=D& z#BDRaYHe1(Vxyvutfu^q(i-1dm$sw?+u;5O{U7`E%x4zcl=m0tx%v6ppoXuvtgb9W zS*VnIx%PLJX=bfy@0nXfKco`pkELVt1Hc{+4oz4yI6(&z`F%_*F8EirT&OkJab{YM z{XE1Gb?(Ite?xL>S;?LRr1fj-+wMa`w>vVZ>*L1H3%H*E;}!45=V4w9KKyU_-T>9a zHCIk|ul!pY7RH**SgsaVW4kC4tVKpBSB|;wd(Ng&W}u>^q;yckw$|HRXxAX_=q=7tSTLBridOQ-Cw;T z<=T2LM|N2KQptju;r8@pB`yWVYML3(2j&c08z|n>h=IcMAlO(WH))v0t zYRq)Uw~DINtUO9|UZG=dwa|VY`xPvYfU+opKOOYHMv!?8Q%t+l25*$ zyn4RvEQE-FT?uJBTv5VqHHVB{Vz2nl2lx6(TgHS;y|6T|83ZXhw(rZHw8p>~VT*lB zpF?Ujy7*mjp*IW7?F|_dDlx6TjvLiyW+zpIJT-mD6zjcZ-6w6w~2%OTEv%o;H z!vBVqmD}L%wnH>q<5vBE7m!lKfH0T4$NFP!Vy0?;il*wK()*;tSQJsap_{@}-e-LKNSJ=D`Z)jz9E21n=B6&DN7}8uc6+!W3+mmaJQ32^yw*##Hnn(0 zr=%G#mT*hK!XAxpw4k6O*Q?OlsXEJ7AM5Iq(Zc`6s+-idjq|+If1HJ808QmiS!I!t z`CLfEJgv(E6T08jCll*3k`6mB?~7a^(A-;l%7!Xw$j}s{Mz+#fuSVuXX-eK9 zke~*6O3Uyw5q1aztyuo~`o?YzZPFGtrg`7%A%bvGyPR^@URsGT-m?Akls~~q{!>24 z8*a-_1u9&EBnf~etMuHm|?9I;m)Ss zvM=p+ZNbMvxzS26bD5v-cM$W5d8UJU8pWmx=Bx&TX1eVj98e|>crwgeH9Iz;wO%T| zDPQLD8V^Tc2?V(lL`Z_&GB6kFqHesyzt?^hjC7%ZE)vmGy^~(HO#e>itRQl=MRT=R zyL7@rdmpcWCAxsMsTP2N?=NWp=#UJ|iX!mPsQqDR(~u={T8Z(EqsI|>QAF+3qbnbg z8K7@gaA3#()NW|F_%=-7G?`gMtx`HbZ1`DEKSO4*9H+&ZuZl%)B6>kx+XOl*4Gqm+ zwCLT70{I(bAb(rnIcWKPLnS&6>d?p00R66L@Z#+?QZ%2`gB`ujuch|xkDfAw(UIlm_qAlk1a<9=k7Za|t+gF>98Z$FFK79tXIcYnT{z5#xuJ9>#Q$Xz~r379Xfz z@V&i{!BQb-8sD&A%;bOmmrI%-VQ}I5Y@khK3{KGpBlN~ zyEPS`5lU$`+gI!C-+Q++fr`LR$SJ?rnO3i_`oU?kbDVEY_jSpRQYD;sR&vVo%48{5bZV1NxxtzcM|xgo)OxYG!3Mum z#Yu124z(IPTy%&@G^f=xt;gz=CS#DhQyU$8*%*sjZLYB%a+9y-zBmrO*tu|T41N8* zDai#3>UHk(ld1dk5h51`rj8&_(@Y~ehMf_l?s{TFh1^*;Z68~6Ak<;FWdf{RPL9PA zSL8!X5%C=Cy|o7M^tbK;>pOzt zuX#b#I34u-Y9ITe)=w&B7j>Opt7~*=Y3Hwc6OoT^*0T@xdkY9Mt{@%mg_+z^A~azj z>hpti=OP1@jZgNn@Sw*;*S$-XWP-PMwe@|dHh)eQf4J+fAqTV(3(42>LVJHjzT9rP z-p|H99$juK5^v{O##kd>A59en#Gh`aRvszbR*!$Pp zYmY3eL%Dup6ANyBfG(s|D2W7`m+Sp3VuST$y;lUg%i;iZCTy>CvP0)_%Tih2`Sl>IS+gsq3JXC9D=oY0n~kk*$;IZq;DNfQk4Ce| zr_*9$6_z@S2G6CpH6(DSsgb#vzH>oE^a7vr!o1>4DNuL&4Oz(^Qpu? zkpX}M<`90r$XPx9eSEWp4H5{RM=QkV$i)@G=PrP%dHCAEZ{GZcwcT{UjW~~s39NNG zj@;b1mP2&l7Z{9c^WBpQbo7`w9ijS;@w!NNKTnV=~uo*F-BPO`&RV=pC#&#%R&9!Ls{GHm`e669t__WN^AW&tz z-nO$ZwO|o)HlD=?zv*NW-C78{SyS zZj^XkNQ3j`SVPoo;=YD&oBG^opNLp)kF!O{XVFI~y=Sb#`9=I{yg^FMe??T&cIB)O z=cd))&veQ{Cl<2S3`&j#t!?$XX*Q*8&sG;$GFFO*oPD31Q$aum@m(^{94RPzm?GX? z@v}#j9J##-osfZO2<~lKYNm@zkkFv~^@INE59XG2Jec!Y$WM+4BuCYkT^=u?Gct$b zHNpcsukp8*xkH}t>+qyj4`)Tesw1>ya%)4?WU3XuIP}ckmrl?n#=&M5_l*kSe3w#B ze_Ktdsh={q1V~lSs~=6gJvm$Kcudid*3q9qG&XB*IHmwS3AAKJ>5Lf_$Dh)!HOSt`rVfzPK_p~G<^-p`%`+0;+ zG0@Z)(bKSV3A3+A?{5g5kcF3XR1DhL>P))oSIeoKSyKj;a3fCnon3Ox1L5+siGyjByY*{0t?oxjfE`fPGCFO{ zm#lo_Czk!}!t!u!JS!`ULJgVk=bTI=&x=p?VL(R}~{L5#1 zAhzF6jsgp6kMIuugm-~B!0tOIPa-N-S!`VC++)>zdU}=uHFt?(PCO_beny4qNiESi z8VN!9Dkdeo+;5t0jlPFS?5;^gd}$`)S;!HXL-5S>eTin_^Po|8T)!{V;x|X^^{gcB z58LQ4Zw2YuMAYwpQn>}70gV^osX zIZN2~H>;cRKBAal&x1I2pU9u3L5NRPf6b^uy3gn}PCa+8`I}?afI%PI{mB`s0U_Ip zb&LK*4eL$uF87&g?k+Ts8Q18lW->?j7dzZ_rlM96I%kg%G_aw{6p2Z=X>%=`(Nt}f zW=8kNBzGDRPB<8GSFvbM8)O49RcqsEyp~;&W;)OQush3rz#LUjq4;%oEvQEXgxI!H ziFDl8=D+AmT07W9o9u#Nv(Ct(bEI3xH8q+zmkg(bfq3@d*i5#zKdk1WP(q79%FDoAqmH1+S^9iH^J&8lcKNmwuo zknrK26ESKIJ!Oe-3JdoTL{t)){sxW+S0KUEHVJMmuY<5=Pf^6h(LYcwmg|wJd3ES< zepgO$RH2@2eEP?l(UOUu5Y@|g7gB^aY{#73ygGk|eYtpopJ5Z?MR8_q+q+W;9L}$% z0#F_Gr{;UTS9I2NP&VKRW?%VbO2sUD@L^_uEyY5cTg;yx>*@+yzZ-MYG}>?)r1=c#3Mu9 z%Vm}nf1MYlmrs`%Xj6h6^;C0t&1Wn7K3g$A04O%<-j&D&)Pi_%D}Us`1J*u}*_;?w z;|-p0v|3GVMc<_n5mZ0hiLHmD#jlJx*mDH|cZpT?$3}V{j8`I=g z&9&xa(>%~IvtkZ8M%NTf*^MQK3v`>*Hl7<4<(YQP>`Q{Z6+UOH(bhzkm3uxE*C%Ir z4Qa^GOFo1%_8384F(1iX81Q}`F_OzcmYxn>l=UsvbU;!HHl`R7v zln7Q>b^I@Z^WtBEVUYw62ZjeSM|=uiqac6Hh0$ZaIRkUntRA`t#+#gU=KE%QcDKk1 z3!GtLKS?qb!<(+QCf4=btFAKX#W!wB6L9fxMNAgob3B#42=uE3A&Zd2Ww>PNf9osT zWvjcf>G2VSuzxlYJRJf2(J_4Gvobzl#^Tge$;w69O#Adjkn_@G-L8qB{(oJU-J+2j&+ePdXng;Q#-LGc=qC#GDrbDJw0;w+ z6-H-cwaW%J>dT`GOiuewyJs@9({>}@7PupYP=))m?i`7D-uAjPz@gNxwpx2Y7{BtF zg;U(oUvF)~8g;$A_vg|QomkzGp`~f+LD2+IjlxHN)3}TA8R@j#bq^b*v3iPra?Ew$DlqPFot6(|`?`fk&@jV~wk%SL4Yc-4xDPOiZ}s-7tKHOn z!dFxMe694njO>krC~cYLF@bur+`iMaGv)i)<@U?jmD_ug<2r<|LUwZLepV#bXjKoCaNXzDP1X43jh`PdFrzd~f3Ye0 zdxP`hJzw+3zE@OoZI+LJ+WF_Qm6^1g#7Q8pCSov} z7U@(~uW-GceGBQm7HVU1P8qVIrGb6bo8G-jpxe2=NREzdGz8*k-%1LZedY9Fd;>Tt z*HuxhUnQ=A3wTiWUcgg{4@@#G_z+Agy1laZj6|;X@{#WQsoj@~Jo3OIfVf&M`H9A; z34-arSrduE^*F!UI_#s?lF{j#R+#0z$xZ>`=Dlr&40zIaJu+yqPdSx*ck*3vUEoZW zANUW5EJZLM<)&WBj_(@wrJWOy^n0m7y2Ed!7Y^p z>;!uV`nOkzQqqtoEuVA`Z>eUhnV84x3|atcHIe&!#rJbtgeo_87|(?P9*98YD9Udp zOmSgu3^~7x%n0cArpg^b#xAhx9+Qc6+r?xf@TliW06&(i_>7qK!{%?xX ze=2n2?2n<>k34W)+n=5|+1I{p3W%@F`yeH3VYO=s*Di*({&cku;FBIZl6=El?rcz{ z#d{vteLEjBA>dsy@o1XPd#{Y~xN{e3Y{5;Rf2jF3?h5SB*=fg%+jxy$F{E`lBzYIT zCt6uz&>Vis3BH9~`Hd;>M~-)i<^kE+JL;PMZ3?$G@A&cL{$kAHpy+8}1?$EMHq43r z1{Eymj|?SGUap~B-cIXd+6?}HJqa&=u+MU4EQtw6?P$NXT0S*w7EF_8P0{MoToJyJ z@0%$sY-*CnG6Pdtf@fqMIED8biii7{Z9N*kBAim@o;8NHL{FOW*Y@8J?Cp3<-)Bu+ ztJU<=49_xd-dgW7n@-#!Y`0IR>ou%PNA>vGWYiPXYoMFC-h28JA|8#Vrt^{-8iX0L zbh)OqHcx0Q^=mkWb`Fzgx%r;xGpbyxkHufh_>P*H>YBoTO0DJ()k1&~NHV{SXn+6y ztsfT`H{WHN3|=q03>MRE`Qgp^^VdNx5Ajf>sMS5WzZZ26>OK1v-sWJcREGB%iLUb? zca@JGDMa1n%`_U9fAh9>pJ>`)EQPz;h7%3+gKPqvv%5oJzQN`t!EZAUDaXq(n0aN5F=01fn8+11f(2YDP0l=H%%RtPIj6wW zyKk0eVtC#eXQom(ipVp|j6cp5L>IO+MMGqz3|bOdA0*16gHv^dIIeeIcz7PPhiA$c zw3&fj5pdVj+~gpbQ*`^zJic#&B`gsTJYxhpG`?%rR(LS)-%&tjD@ilP&%5f`Z$pny zEGC;{Em24YyweRw4nl0^G))sdU2Iu)LJPwp$v2kiU!5228{MX#(ts})M@;4xs}1MQ zWN!qs%)(?Ox<75}J|`#akBZ3Mo!5K@-rg$U4-Qrn;IxX$wjoBBa|N-m;}YceRmg@Y zfYb$qk9@RJ`aieIO34;7@5NiXvk}t|$fC;>M;3W#&2gVWi}JCb-=ktZu6Ml%K6Mdc zPeAL>L2{>f7$mgi?(`0!8xN%6gA4wG|*1#2FZIQ<1(~ zW!KwPW3<`EbS@0*v|E@jDzWlZ8B7}O%4!X59!4^U@Co{EWe z3=A}=ooWs2d-QrJQfdR_0j}aL3jYb>E#wLq7ce4QkE)dK>fT zaVrfTIq|ze!Jsi#MIG-~G}mQ!G-iajCrYg^Sb!>UCgP@g28NG{pl+NHtgQa8*nbrH^(>3ds+-Q z>$l>r?s>gzxKo^OXUioJq<1d-Qq7L4ib22L5`S|Wj$3HLsQvQC0x=hfD;6thA9WiNfHVqzW+2>iHB41VriN z(Fcz!)-6J7Nv$>3jJS28exYiN_h$mI*Vsi~G7&UV7F?kgCpQDZ?h3g}M}KH&xszJh zc$TM=I+i^;qjriC{1jEjKP)1>T3e?A+WO7ydAB+AShj>dzg)IciJwjx&=)wMEN2sv zyF^_=mi<;=E0!q6;8TuwuHO|Zj%I{=f9{9LkmXBN?w{sV;O_9TJ^?gZbo58M4_yNP zM3AL671olGU|4x`i@8pbBl=FeC%X%0Ij zos5JnO6%Sj@4`j(A=jzR$vL=)zNH8zcPlhYJO^$s^Rn6uHIJ7Mz!QbR zzrpJr`c~Xq8&T>vG2Ko$h&SHyqAcDSQQOmrpnXO&z&VBMAJvR+(RNwPSYr7)NNVx~ z+TYigWoRBHQeiVw!KRii(l%GA(e>;J5|Y3JQ*)c8Qwp`ql}|TP%?M1O+;)lz>mwN4 z$p0}{IVd~clk+(;nvlvf_2F)a4rt9jor7D}g(-Lgf`n#4+}5`K>@IzVx15IQ?=2E= zKK*$8bn%D!VnhARa=5p9?BSEii8JwI_ppV-IRDNIFRm@TzZMaGP+>^_K^5FKQExlT z!!cDp36gM+0cs*~+g+msUYCVlCXkHFGblz<1>CZhuBkhVGyiQ5>A~{|Z`JQ5X`^S#-6i_oQV==hDuH+7#gb&CPQK{V3ZYlxkMT{=w%8>5NU7smwEa4zbz#q zJOG@p{WSlbJh%OqIk(F15kB06bNoW7MO0Ww2`FovFOYx=1~r+<}Ick{VzM7P)j9s2wG`Q77%TH5BO;+jfQ!`|snsLfWVaW=0UKWNO$r3>+ z5uYP*1IqD$KxWVWsyHfJz8x+z0w}+ha&Th$ZdLwL3X}MumPXCJU!4+mxoo(a+>`b9 zzCzH91^7TMAtc>Z5(T_OfpvoRm)0I^i8jlYZbjj>*y>tpuFh1KdxZ= zsixxDss_c;Wain0jwS1n(C-1`Pf8BpBcpIf&i?;6>`uOmFUy^!s~Wq9NAeu=FAE!= zJ8f7^GHWi7V`1(d;y`(o6FqT(tL(`sr9TlrgmJu7y)gI8o15+g`5*rl-VdP5jPwUO zTWBz=D4CtorV7vt&92b+oo6p$3265CqmM*OOM9}BYOj2McN^h$uvmQmksxHZQj_=M zpyzFcMjw8F0nWPm8+| z>k{fxTE`;eO(D(1sSAKkoS34HT`)>n;5R+zs)qZL(Gwv^Qi~z|_7#1^mySFx^T~*c zP!qSi?b1Wb0%zaKMj7t+^<#%sSJ(D|R9SL=Jv75#0DpR~taYKG=>k*7rc$43j>9W`~$n1-!cJ=zZMAcVFEZPk=KU6{j!HX0Ts#s`7 zJ0^z2Jgy@SY5S!bT)C*@eCb&8`eBnC3b`tXO4%rx!HJWurjc*=4|FYw3`ho4@w94 zE#Y1K4x!w-SdGs2JZMe?mOW=d(P|QE12m>C2F2du`O+*y)!TZS()3J6JJ1HuZMv0K>eA+b9*5=_(pt0K22 z><@B#o#|pfm~_ zopS(Z*xP0&d}ckbNZFn*U*9|pe#MNihUJ&69=Eunv$92Z(NbRoc-rM!$0X_ZC-3sUHomHEdH%hZDiArByRblwSAiEbo}5t zcbLVBda~+I8>7)EJi&e-><%0hBtk+$V0agN(UY*ssl~8lPB-P!gFHnrME1yMulWg%w`iTPs8%0L1X&Un4WN^ zO!iMIR<@hQ>Mb_HZFf zW)+2+nvMEHv+CSVTKn0G2G!1MUUB^m<>5V72gk6L@q(y`N;`=!%uV`ga#u|~X1JQs zHCnvvHHEDqd!P|bQ(i}Sb*wxOoi^cT%j1c+ao4!%SHjZE6pk7=wMqRzFJD^Vx=`hJ zRwC5wX}T!r&g~=qkOL#(Xcsmp8>Qn4>m5oq5)yO^-0JQaCV0J}L$(~1I zhU1OI?`~HzF{xV7C~6ue=vGw7`mPtlo%(tMO%@-Gm1Vh1!zlkqt_bCCr@Q=kOP4y- zMetPVfO{Ln9z1 zARU4rL#H%IcXxM#bP0Te@jUo@pX>X3;kk}DbI#s-#eJ`}*WUX)YD9DkoOiH&M!zjZ zhzlC8nvb)foB3iT%Mf#===#ZfP8XU6oorAxC$*_#FhBwwhcRZzuSz`?ymI}{iiV-QQ3XMlVY<-OR(w~z$MAd zst*Egc6UOKc#e1 z^PNLwV!+hpXg`{(Qln?E2>Fl!oYM6 zSg*B|-Tr}42E1hJx7w*EOQ>|m|f#t~E+u9(@wOSRx?^w_Y#@9n%;_xN^VN4J1cy!z8P6mw)?Wlq%x#cfuMNjCt{H zqrm$vlpO6(A@Z>Y3E$rCoF_UpLQgJW|W8&9uFZ`$%oyQs{Mw*}m;u;13#71-W%lPam8mX1M!B7BQ(GFVj9m&4_X+E<(!Qz-ZD@6- zt9Mrq_ezMv+AF2&i4I73%kFcfX%-=md zI2mP2eYcSy%0lKF4E3_qXEnJ6y4E6b>c+ds*v-kQlA1or&8TrWBCing=jyb#|MeJH zd0WyBKFj-M74QnifxMhZ4Sp-4QZce{R2_=J&nqAf7@Gb#@)r0JzAs=MC@5@(0KiXt z+~;ou9LX>pRF1r*86)m8OU{mIdz*fqfu25*k(ADst`H1@&68S0Q^44@PH=m9itcBF zk)Pin(DK+*TSYSsYx!FcLm{sI8-ZDp;VQ2Qu1GE8H$|1w3(iVQPzGuhnhWPk!p!P0(DIe<3eyPWl;DZp zF?4TNNWpXm@{TsnfX+u(_Ldk=Qj7W~yo#Z+?8z+DHJ{Tpp%$oY?}VnAJKnIn2x5{z zVIs&Z8YQ#dY3*2J!rlM-{s%Nk4TKq=^lslJMT~reI0^tQtOTxGOMz8791D&BvK#w6 z!cl=b`d^to`8vtDOL*_>`}@w>v-dv^w6RBT-aZ=e<|&qMTnUl5jt7j~9n`dfmnSO9ou#`quS1CDu~ zKc%k=!U9cP5U1hq3a`gJ`q>2}P%euO>gkd^ok3=?{0$Hw;ux;g>xLsP25ii4K&VF| zME)WaO^ZLgks0{>^N$E{%=i^v_vx*+kM#Soy~c6V@c;@K#7LjVP=t>zYDF82!h>!f zeQ8ec5)(v!jLZ{^#klDsr5!5#Dbg6=GeU3~ulk{p$dy=iHK?11h+pHV|_`O;>6m+QCL$z@W$N?t>54L80x7C3Zdzn@9e7yFtTb3 zU!EDVn^Tqp7L&)#$KX6Y=b=6Kl(pdN9ax0v8u{3&J6^xUQ9kD$>o8w<_tt-|phC|q zGMOOD=|&5@Q1l~BH!`!A>X%^u>lzFVefJXIF(+R~e>KAbX1~pGCvQ;P@zJN1P|%Wd zr(6JXV>-~1eo;Z3`#j*Qt5u}Ey6DTw9 zJ3oXtLyvhNEA7z@pUs7}4y=Pj^Nak%)2|u&w`SW>Hr+QlLXQ(;;XScdlNBp@auJ@L zINHA{j`6yzIGrA7zmm4==f^l`cGs&WK{p^Lv_O3?SPEF)r9NUGm2eGQ2_IO< z&vq)%absbpp{-8;G4vIsC~@X?7m~3H5SrrWPms6{DkGbqI6~K-_Ebk*N791G@Yz+bVpZgp$xsQ>*?UROj8Tks5y{99v;6cPBubL{P&8 z=5eq4MF-%NTT6L>X);fLFHP~I|LeQkL{q{@^W}PAcvRnyCY7{U#lhyZrc$^3_SWzl z#?7Z$$#I_DMH&=c2fm}(MF|qZXh^CR)l^#7&3whT4AFsOy10a!@-2pRXhvFD-csAe zQ+3$)3d^nneY@akrVm}z>}%K|`LnYUP3^Fops{xJAZhY@E2W{bc{XkJ{!SDR4ANap z7UF_c(<%x*A-Y}TSmsHIDMo6B?30{fQ(U5YSJR=YxE;?)V0w3Ce7ZPl((yyXc28@oYtdnS6_R?1=roBauzIn3L~! zdKlvS!`e%P>D$Itq8U!%^CDH46 zY%n$q;8i(N4#`Mf-8zG>R)BBG5NmVgA#SjJvsR4Rqc45{JgXqzxjWWRFIVq+5#A_A zAb`JwPc*nOIKrFGEF;V)Dz_1#4EHw~JG|Be1fP@sv>F6SAGmzd!|xP7JM{>$l>!Q{ zxP4XMFz3D_P0wKWfppLvP=179awNKe#3+ZX=SW{*0(2bJ*A9d)=SgYNInLA3a2&!G z@?_W!;xJAVbA)jbD_M|LTiyD-@wJXqcOt#N{-C#7>B%cFzHpvxwy0I~2q=1tjm3kG zVbMF6N7R?3RPruTywAqz@zZ=p@DCk($_%R$;g9aG5gkhws;6D{z%7w-V$I%02$<1@H^Df&mcK3a_B#mIq|>-A$Z7s#48PsA^z zo49g#3hEggsTuF#ugj2MFW_dfU8zkyTX!pK;HQ~*@lsPMXPT64xS9>1r5Hq|h(Z z)HwRNS~a*4Y9~vr8itPYr|Ovm!}bg3_}x50iMwMf9BPiugJme1cnoG!xE0fw;OXr1 zMJ3ycn8-xf&wM|Mhp_2i{`RsI7<;fLO?Z2x6I=9zI->J2R@vo7knplYwAj|4|?#cHXN&7V|qNGqn_2 zf4d{O*eml=3u!@-lo^D|>m)pAXM_Ko}VLf4CM5`#`%RHnL$lx>|;NA zd1nXKH@7Uqsy@Kwv)uYj@!vC{TybdU#rgH;fi{OmAI3oMS`h1P-V|fEHSgCVPn@9H zmff{*gUuZ=wrWT=dXGvDX6$1}UJ{UpIAWaGd_(?9 zqi}Ot5iYA=PB+;UUU?4H$+#rRPVrRac-Gv>lzHZ-nxiKBcnLyAd1sXf!a05_M&wS5W^f@*enhg@Mj-JV-pm!`0UOg$3y{kFJFFGFn@{O6FDvwd68xQu26h2cA@2 zpwOU}+UIr|bD{SP#z-a3+MDu1u0rJ9s>X+K~yJVx5TE z=T9XRFAUIim?JoS9dJZqm_83W1=w&aq{z)pG`L!)Y;a=^pM%@oG$w@VdkTGfCxY9- z8Ph*zK-<4z+3X|4OwDmjJe8cGtgO74cb=2~A(y$5(A$-SA^&f~Dfh*yq6|SkIe}rz&kcG|x

@_KO89AqKTpUW9V0cZvDT-P_$l?mhO!ih8aO( zAc2uZ2c&l^mni`aGmiLa)AQfNwI9ovP#FlEH+>bTI^6Pgx*-_k0B9BTFnHM12Dc2+ zT+%uq9Bt&L8r&iMb`+C-kkQ>cB$!0{P1tj#?q4r8UHAN#4==~am4X>J(f#HW{Y!Zu z*QOhThX0FkgP87u#2 zKai&{#}!m4D6nVhV|C%*p2ATS0-}zhiN7&5m5p>Uk0z)SanJ`~r&P%# z38OS4NXGF~-zCzC5Mc3N%KNn^q7llGRvki`INP4{c|kQl;@%!4nq(mIG|S0puKZ&# zbC9>2&)KYN{xc4C7MsBOsw^)&Io}yZkVB9ApN2!06my{sVWhHNOZ;()|2#uS z6zKe=3~75&=ffD{delD>d4%Xssk7q>4^c}`FZzEzF83JPGfyr~^f%}G#r*#B3Pj8j zt`|wiAzmhsefU@fip5+PlLd0TVx_&QmwkQ(YE>$UKL^PNHt#hFT_-j*Y1+&MEU3le zw*vjr;{UozLC;0*l=H20?LPfKZ+ZCw0#||FHkD2p_zjozzwj9dT_Nl%*^L_}zPy+l;H)!En5u)$elph*Es~3 zR8Hb&E>wWJ&;CHh15Nv56TMh+;#}bw>t1Go<+P7q`8Zf9>A&5kAwlQa4DUP9Be3@W zA;K)qt8^K)Zh8|HS3%pcvlE!sU^R<`PfEl(;%0HGLDsi&y88S=O|m^SQ$iaDinKV{ zb(&Uin6H3*xKwG)h5u5uo{_8%95NxxLpwBJc_$Zt9&w_jv{v4C|5(EQQ{1nckFpkG zPp$`K1ow`luD#nylR&T%>3FdD^CM%qg=dV}Q099jhJC+#kI@7BHC&cnQ1dMOw-)~^ zxql3U1P2OGw+ZElE@w;tKU0B}^BdoPeN-63W@#2q;`yP4-?$LnP5E4F{*lf25#|l}*r#Rnk<`yr3de{+NIb;z{p8{LOukpU-W|5u>exKT5xwaO~=8N#? zR#nOF?0EOemFP0K6-xZe^N?FB_A2hta@u+Pou#xN(*2%fFQB z_YmtSL-SDCP&mLj&^kQ*0IkT)g|(s?UK-OAJ%UAGD-&YR^|++v$$LuVBL=G)qk;H%U?GB*ZNY{=_h>KS@f~%e_)d~_# z%HKZ-7Ad$QAwyJhG^@(TGm0{fTB`<&K7OVecQcAa5Jz~duy{7%@h#Jn*!7N^N_^QEfK65^6iyr2xjMI0a}X_~aUS**G;eji@vvuItiz|FwubA?kDrO_)7GpEcfA?D>JJ`1jQe81ujh@Zvq_nA)o(v6z$_mnT+ z0-!fG2C#ka5)#B$vN0*4e=1#P3)%mH-;^Qr2N%Nuw>5z|_bN9bVuRaq_zsrbxaq2U z;fQ&p@#s=tgw*$B&TdM)wTjRDyyso*r%8q_)8VCNYj~=4CL#1{Uptu{P&bPi+2#F| z#`+?m)nPqX#^_sJG4At!;fUXZ-d+Uhn z<1w(pvr{^t(hOubj~T7Wa#hu{r3fr+-Zl{U^BPFxhz}Yrm4CrNS6Fo|?w~*SypXrj z&+jV2SKXr{NJ9KwzcN zeWN(YfiOx*{1_ZT5RWW|xVgXuP~u0#gGTkuTP8(m>k-j2tE2eLL&-v^g-Ig2+e< zGUw$w6)b`bVC!&}regATa23(BrwPW%DvGF4a=2aonAggzh`_)Iy`< zor*iLpu8!NnU>Ym-BCdxpZfJC+u4tt1(xpPusy%xxro~w@1Sr7?-T60G@aIy(>PI9d7WvPW(EzALoL0_>8HLqw{Ub9=|ijp~a@U3ET-= zYg@p*bAO1|_xCoFu%qd^Tn>orXz~MKC86gE{gSU=CkyC)^S^mM=Oxx?eOB+GTT)vb zpJOB7ov!vqpXh;5$)KDQ)ED=()X-l89J1Iu=4!g;!!Jus-xOM?)yZ;@_s^*{rz<{x z@g>M}%zjWd60fWQy#RzGK@x{}A!|PE_CEX`X>hLuC-XGBh9T{76ivss%2L0{mGL1v zD%=ismT=m6Y8o9w6+ZL%)u+#r0@>~({S~_9RER93u&Nt%=TPDct3B8-X0m;B-OFZ#uzpDdKqc-Ok`1+MW<+_IW?t#76E z(azL57kPQ_z+cJ|d|tV5D8lGs?9`y-$2PUP{G{*~QvZ4{8EIs=FjfYBIzcDx=0QJG z62Sbd4|1SQis#h{H%v#Bh$-D!U)1s(sL{V5Pv^m?MFKUkOlFBVh~ygIY?F%^+?_$I z9=BMjqFN?5Dr>Z58^w&SOA)4hAqpA|Y2O*-9rS|C*GQzNA`JReq&6K_LWT(dnG!I_ zH6F7k)nu7_f`mKDRdM`jN0fw zA0Pli1OAs^z>r8sXSMQW43vkFU3TF-amtZN5zuJxa7A{}7;_CY&M>&|!ykD`kRTx% zRKAXa(x1VlePP!VsD0L*NbKB`ci<#BzfX2Ygxb=~+^fomdVWPrc7u0-~xy1gVv zr|#oF9vY%ylIer7OgraHl4?v+Zm+PmT(4pZ))G%@kJPi)d!6@QSMMYaIa^MZ=m(*Z zh5Pm%cw8A;l`Hd1XSL@u38e{b8^wWOGgBUQkY?T>mfk$Hr&DPi3cT;sCjO!#Xru@F zCU#aN??JXd%0sd5_SIO!QiI-}jlZ!lUp z1cuT60@^yAq&vF3SM0+@z-owVZ$*MqAK--yfRdKcE<7ST3iRGkA6=dNfJR(~WBlh~ zBVgVt!Q9K~^MN@3RhJ&SUW#83`14x)97N^}%I{7|gCh)zBXsV_A@AWaI@6rvsK0Kc zPQB<*?TKR;k?drVv$pPW)Y-Bq;vn~$0!s(~&X@cvJRexRXP~!3bJrQ9&*rq=)j>8? z+{$L6mPyV{RZ8=zQU9R-klq7PFm1R9hHpD9M98go73SOG8R;75NGSufa=?bs zeuaWd!S+A0^UGiJm*%nTutHQNGs=etKI|O^0JT6oQQ!99y#9Q`Qp75^qw@PW#UKVE zMdrx1u9UJ+8g#-;C)pO_P8OnKRA?WJnYOA!g^d|Vgr(KBW0K-=e7%B(9fWAhm>JXY+Ke*VALM$S>k(<~x_ud`NJ;<(4M2GC+Oh=;W z@ez8GQQhHUc|TL#_is2)8*sOm>T`*gtI`{R-hNts@BSi%zp3FSKA+%!bz)qiaVU9_1I>%oUTSU7aSbp%G#Vng?iITf{1nK>Ah)zu= z_}p1j@>Z37i{Me`EGNQuM%%vuo!<_SHDnOYd{d7B(qj!`(=`T(!8u|2N!v{s#^VNp z`Q1KNB7m~HBed1Mit;CUbNKvTJXG9ECwk{NPjV0* zS!9@-RnRRg;0Sz-F??q>&42nOCui3FAnu2vG6DU3QdVHz_oi*-Fv$mvfx-+<21-V$ z$9dE!uHRbwty(c4)Y&{^bs$>aXu{&7f=a>|yaBV-Sq`NXoljdw22pdA66z5IEU|K_9Cz1jRhJn(bbQXiqy;9 zjj~hbesr@*{>8DLU`a^4i-!PRS9N?EC2mhdv^cS+!ZQXn11)bp%xzWa=hXA41%42p zQwaNGkJxfT46k^4KdM-WVo(PGMO1doMzQMf@UeKZ$W`j7P@vYVwAzUY1+tWOl&k&S z9`69`gI+?ib!0ZhE@}F04b>t#O*I6mP6EPH;yRsI#;xeU=OVUpoD$B{g9To?n-Gb+NS9dI3g_Wbf3)DJEyPm+ZY1SR|8e}we;Gy5FC(YY2tuQ%%I$Ub^3ghy-cycl?&HF2)=qhMc3|# zVU7~4_|Em+L;ZY&sE`0mCYOP$!dvrBDP-nHzryyDr~-15da|>lvbI!9&<9z8miu0} z|0RN-ig_vw+`yXmn-5ZRQNPC#a(^_fQh-14-zU z`>ZNcm}%@>RU}Aa=2}C`urDe}C?{&5;(9wTC!^sq7p7<+w~S#~?^1o4sv!w@NckXn zUGc@R_Y)GwAJCLyH_wN`>54cndC>8KPH+d!Bt&>lp0X43p#23~zaT8v9h!PME|lb^ z&nS>v;>6o?s=lpWX1m%_df)f0l+;{u8QT`~!|IV-7U%@YJ_ETPwzbdXX)z-_uTfO3 z<>jMion|ac?bcpZZ|8w282gO_ouTVW7@ON7L{65UMU%;nlR@1rysmUw{DS53)BNSs zVP%Bc?tI*?tb7C`JO?ZoM5rC2d*gm2pV{jAKG4f_HCPA0$riVA*PzmdKUf?>l@T?e z$(81Yb2x_^gZ2X*X1W>%={H?B(dCF*AsJ|CNdDnVB3JWiz>FL!E)t&-jF zW;KG%>gi>nQ3D{tZV@z9S0@Rw7LbYr)?=82)ynuX9yhwrG^pBjTk>a#PULwnUQe|A zs-XDAj~*d@gvDf894$L!vD2#!^>ft%gu=DgjIFBUa19rDShbQ2Ck3Yk$ON%#Ag3T{ zLV5m~SANJbM$tfLxo3@oa*f-LzIX2?l=+ygJolF?YLC$DWJb`@V*WJ&Ic?Zw!o%ANgH8O zA7CZ)3#3L6&5Dss>rb)a&U$`U|y$U57l6* z1;h@@Xrj(_svs87W#UsB=mQ+ynkLV_O3W^2gKMBweCFC7LQ{j_CsFgroM4(6>F})_ zT`oC0%oTN(mJiZ@I5$utxy`dYH^S~Sr7f`^@ygnsrzOv*Kh*J>AJ@92J~TiG;OCZ@ z)!~KViGgj~zt-Uu<`LMWA9OJZI+09Zb!U~oZd|^6pH*J2@QTpNHon?MGo<5d#i;(S z*|;fQM;v~8Vqh7OzA7LYFl~S{NJf}*xdvCdCRRbDuO9KFAnq-UYTq%=SmIvZ7nKce zDw$-OPvQ}A_||oA=Dn^h3)FOH5w@IX=rXn-)Mu0f>R3AWyrcp~_I?5=d+|;O>_jOQ z1DQaU+fmSDVIAK!^Y9gc)1G|;ryWg>*mY>c@sP+v;XnDVjcADitNZ=4$6}60g}p1c zY^t$iIhxWIt*Itr!Q~n_Gn{V)2b^B=YTDkS(D$r0RdKk6mSwpeoadg!C%4Zuc$Ts3U(A`j9f0cDvu$Usr{goKX{{e z=2BGKT7dik=K;A9%d^JU5p09VtX?ObhMwt`8Jzp(Hxt#-;YS-Rv zw4!1uX8F7(Q?!gl^~$+5Uhn(%+g4eTEeMmSDy~D8w=Vb(o)jcb@;JU8MRU`+`t~-_ zXF-2EcAJgn6v9cn6*i9LS9wT->+YjCP~d zdu>m|bK0n$KY&cRcv#3rLD@Kd479xP5z5M%ZTaA&vB|NOxv`DDvDz9{WqSPf;`VOi zfjlG4p?tb@ER=)7`SOzK>e(J}^{{FWUSOTdVKM?!5hC-lw7*c%`~X?*2JvaT8x>s9 zD89L17PUx+up6i_OaQG0nl z>DQzH8-95Cmht2C%YOspKQIspbb&T2zd{@xR#$lbSPNlg>@;{oxIP_e>4IaUXWy6M z==l8r-Rkj!20$8<84kQ-vCOtypuhY+5NdEBw#zCwOrJRPZp}*&^xydEvABOwkudmrUhO-TO^JotG2LC@u2&4wtAo&WzY?YxD>2Qi! zOYzfK8fi*h6^efCEY;qKdv|$wQ5Q&0e@5~DWIWaon>99)Y?C+{oNb{tbY8htA(X_nVv9 zX3NtCy+Z3Go$zXf&mK%+3SdOAjk=D^&~qkTh9=jPSEFg$Cu3RBBZ&X7Z5#D9@}S*{ z@NXw5_Z+tJ&IvewHzLgawG6V-(F^y6|KvkL^G8^;m?lz<_~m*fe#~j7+jkH*mIkRU zgH#KKOj6rG*Ib~DD%2(>B)nj%KZ#C;>;dhjLi#~6A%FI~jDCt=ies%HlK-(f*W_Ce z2Kqzm%}fC{8d2xtjPDIY?Gfx1zSO1?`g}g79W}lXxkCzvnTrI052b)-J->@>fEM?K zCl-9-XJqW4Ae$?#Ml$r4oxFXWjDA+Q>$Mr$FK{Jho!cyC3}-l%anC}*`7bqsrPwXI zlQw7>o^Y$XUM3LQ!@TmZxkF@(U$#~Id=>vu>332KN++8TvrQOWL1vD z{N0N1RArs7=h9BP4YtB;u}N_iK!U^!MI{9=xusIjy%4~N>ZO1BqD(sjVKyvA7TCTJ zO2qHh`QgReEcGg@xCyRS(z!Bt++k?5!9?X!S+u8nQ6BoI$51b92tPnOrcDbHeCf#;EfgI@GCe)3(^PNiMTV!2oM;wM069f zCpk^MB!}?xf4}ha9yPXUW%D9szJEt!rjHq7GcJsy^GVv^s1BX-+3yPBV*G(QW;%Id zxohoN*u%`sfbchS*@Z{!D01QYMNoO;MeVtX8^SfC&RWO(6JI!$%ffQ%X5ON0G=EaM z9_W5Z7c>2UpQt454%x)Ja=dR8<75|fIj&eKriC_u*Z0w%%|}>^n-xE}{Gdw(S6oE) zN6)6L9o5u>d)yfMbMChI<|C3q(ijaTD;jzFgiX-vF z4Ff2XrlR8V)|0lYG;}10k`ibN`L&eZ@F{4PENr+jK!4cG_BYg! zu+<}zUJM#F49I$QO~pPwb5IIdm^^W~=!gEU<&B{C0_JkGsG@eps0w94JJ?Deqj$0! z9!4(}#2U_@s^162)-pNH8LVXGqVA;yKhAf#o}Ng3hElraga1&?ed-Z+`N~qCfU3R2 z*3Y0k<7;Auov4l-TKFNevyd{5f0=MFA+#&yHQ@;7?Riw1vqzMpL+HqR(SeiN^7bqD zYeyPFH=_T6S&$0#cV-@!s;U^XI-aX=Z1k{%hRbmCCDaP9I{j82+p}MFbiY^w$vVOb zcSVW@`=U&O%aCJqT3XSKIsBFt+@}mx2qS5Idhhb~Y>zq|$6QJrPM(-VP4ls`{6It^zbLR4?NPDF~O?bo$eO_XF_q3xuca)+}ft>R8p~L|B zQ;GV>yhECY0)K?~k`9Xgv)$9`qk~p|3nC|auE#fWqkmnEebj8 z4$SvslM-D|m%3Bm)?8m7>sPew{;G=nL!U1Rq zW&><+S4a>JYy-&_5?Jmv#Xum0h;kqX*g~4Fp5g^lF~rDeoy`)zOBenIYbaK-!CJ&15t#I^pgFe z48O1HSQq?i&S_NvZNVM&4L@$B;`S~1n!B>NTqEPMT75) z&8(2a?72@z-x@;ZnG$26zrdi*316~7VjDeQA2&Z;1nH*nXPp^I{bdvwEKsLkuNVw) ziYj+g8K3jbD8EYF{cs~%Yf^{DBKGXztQMs1Jg|a@dMhNnuW$i{eEhk1I355EH|=9W zup@q-_>cDq4?q3W;#5d%Sa@{zxiV%7!oBq`VK$mwPBT?D39kQk`xVk8yw%yd*#ITqeJT*VOl64X9 zF_^yzETr+lcF;NLH$}qh@1ZATwQd)2rOtaB_`0v#elH>WO?ZGR(2%kd+jh!Vz8Msf z!9mLkA<;PTWFd!LM5HNC(YfDJQZ__oagAiZ(Tih$)fv5;6~`5iccewg;}D6Qs&6i5q$ih8r=&PU)c657_yQ&EZqa<;vVVnvZ`FTYQ)26M-HQNk>o2YX|q0 zcWDLzVp!16V=rKNjFpJ~rbHNFK4^R?gJ)#8^JgBtqiSB>M8}X+W5beNHKQBkII6MW zQw?j)?EuAamAfT=qkqcw|Hx!WLgx`)LChNBe7oSu`ccp|;}q{sx5+Nb%@iE?W@Ah$trI9=l8H7;;C<_&`ejfjI?~Y zD(H_=+PP(S*$ap4=k;-N@`mzg``xV4m4KDj3-io+ge-dg7L&z72N?@3cktms0zQxm z2DAGTU^8Mud>QC5szG!>?x7i0!Izou>#SGJ0+4DLWBODaDw=ZEpnkUfc~zAm9SyB=h{^-^p8ukY8ef zWK4FAiBQ5RG#3O zT;ZC#p|AoL*vA!CWB`lo;gHjZhqycv-ipY5G{O_ha?9U3(wFtBiz1%@ zNv^VpkA&BYm&Ua=H0!%gh`1}GEq1(^VcMjLJJxrPD7MCaSLs*bBHqXHY~fh#cTj8L zjBZDx;5_ON+WM;s-4pU^aK@+J$+FUZZ$o;;Aafbt`PG+q=S9xSyM{VMeP|gO(TA6_< z-hUC91Qd*@Vm-Bjdvdto+fg?6o*QV;UeouUxj;Ft|@$M zF}1+;L_j9Us^=43E5ECNpQ3*3*Y`t{92#}@8#2#7rh=aJ`?`tLoHl!>r$pjavi=iq z{l4*;L)?7%&0rJ3TI9LFb)V+Ios-Uf$Nn&;4D000R*B~?mR(6D_u(K~vCO&4n$4+m zJ-ZrG4W_qqR%%NDN$-|I%6=eO3sFnW>C>LVxPnk_bUmDMz5Z{ez4{b5^Tzx{X8gza zMhyuY37?oZI2{^mAbwj~Wp7zw?ng#?a^)w#M%GU)^WF+Q#QpzP>25v2I`-)aOEh-j9}_s-PP9 zUR>p_+3<9iDNfjB{4JAv+ojbDhKh<$SuL9^Or%h!`P9*)oiDn!5TnDmDB0d5I=zmG zn`6x~%@JFEy_)v4K?V;E#QGyeo7+5x{?Y@SpHwS8OWAz4-?3C5lMqc4j!!9x>8xbQ z#S{%UJ>nNYS<2L?a`a9k7GZbBLqY*L$0BYtA9jnQE&zoqr-1z+6aTRSgHErOh9TuL zraMj;Sg4?XjG`@DC#c#$zk_Rl(e8yr&MQ`@_fi~Aky7!ayC?}K?IL!kUwN}~izRsH zajuYtF9S~@?CvG%&&qy<=0;Ng#yHCRr4_70IHz^fs`z7-)^TB%QraOa@#<~Oe*k2R zDzdiVk@}?$Z<^v{zj1`s#!57qz7(;=hF=!~9A>a0G|yIKBxVF6{A->pcPir2=`4w! zeca(p^BNZ1CwT`%Fv>W!SA6AeD0-+&`C2QJK&B$IU@(zZ_~b^{SnafGxWSmugvw!l zN!*xy4fM48u5?nzdbi2xMcx*YSbgBcM5hve(00!GEFliY%q~Fh2GLbI14Tkvj2Ole zGX48^ChUGDs9*Hc{vk*2*jL+?(0NF}vf+en(G590;>**TqrFKZ^-9x;X+G7%J~Y*v z7RDBriqXC=66yjp9C=y@xg#k?%zAr#Ruo!MET4p(l&ZkXnMKn zr|ix-d(HCprA61>v9QP50e56`O^0o6BBqNrw4v!a1yrC$QD2UXEe zBGLD?C=64-bdGPV2tB4poQwt{ zsbv$(cbM+foNvoJT`kP;I2e+Pj3Inj?r7aVy)3nCgMuub_$+cQl|@t3s-NYrIHZB9 z2}q?UB!pcq&KfDhn-1X;l4rvTQyeUC(*Zch;Gr&kT45=TtpKJk0z*nz-PWpj4UsF4 z-}v2E#e=$GOVx64W-d=4K;?krv*(fN02RAbGX7m2tbXP40jEg)iUL>tnxZ`?m~?Kt zLj^a*EdTvD_Rx#NaEi7m33)8mtT)0=s(EZH&^RSlj|;XUO$m94P3Pu=${KtIAGPdl z9%IdWdHY`^!*%*-{!ZzF0>u_B9WFeqa~x-t>QC72YmZMvTfBUR>rm?uV|HMoQqw!= z?7ibA;^wF!iA#QiVv@+lDv5RPVpT|1K&wmcMX?LyPy~YUc?3(!)U3|3adc-QN>(!CKH3#L>MP5gn-3wp9MV?W+g8)@2WmTI*is4mk`QOkay;TVx`L^$+}>4!m? z$Ki$DosqX6Ooj8xYPMi(V7$`Ax6k$?3R%dz?|d5Fz6s(&7T*}RVxCc@9~_^P>O+_7 zcf2<&YrN|}TpzDBv|i$HubQPQ=$W&VQ0ko|rE8oqbQ!aM?jBb-ndNE$hd4?Hap&N>@dIT#GH#so0;yEvBrX2>K};zI5$iHbb2?pS*~r%v+1&=R zNjA!KDUM@di;WdoN&a#t-iGQE;t?ga(@|7tJR=F)&#@3Hnz=x@aOTl=PArJ@Hi`(w zc6ahaXS+it(Rb!-EoyR6bpGH1brcW|AKw#X zo*KP(x#QvbI(YNPH`%*69Sc-KuBynWWD=2DKs*$8I4ZjP4KI6CT*8a!WE@$C`?`*& z&uR^?Z=qGc>K6Sd^@>>1JP1F<``*u0X)GyWg|kp#)L1G8`8)nHy~Inqv0cz_3r-v|jly!$DW1u+T`k`N9y|zpdPfS{ zB>hg4cqMMhSb6dyh1c0iW1~OCaPL<1Xt>Dv9pt=W1u8j*PRdY$DYBko#}+3GctTo= z=1>(UwM>I-DkNo*AhI_5p;tgnSU~em(XfC|F2?o6i5AYj#n|>=H0(1R zQ!MjB_Z)Mr-}Ci#Viw*%tKVYp>uE7DjV3=dc@GsL*IOnvTFuq$*xT7PwAn^0x{pOV zD(>@5B{qtkJ++9PT~5JyoBtJ1I_6js=S)+jVkA)2&8%^1wPEJ)CR|Az-GX`pwL2=x zQ%y=`4xMHK{c9apM7q9-AVUFsc2|WKnn?s4+s7=MOAJP3;YJD$<(^i7(C>Sdz>BKepa6I`ZgS6Yh@94m!4N+qP}n?5JWJ z9ox2zj_ssl+j^_-`=42N=1#s;s@BSfU*(*Ap8a5-mj}g>-{dd2%&WCf&)Buaxo2qP zZMK6#yv$6&WWYGljE~3C78sr2-PQ~l*YBzNllc7Jiid!0@^0UeSlZ#UF~v=WMU|)0J(in(zXPBCjl58$O9ymcfKfX zR-gw^p|0bbW5AD8FJt$-q$KoR%(@N|9@=pdmS{?lRV-q+y&H4DHVV-qYwYU4KNN^N4hm=5iWk@R;ErlYO;iMt!N zqX9V$1WGM+Kz|VF?U=B%>11q&uPn|+TUjT=jmF75-FoT>(DWrRiR^Lr*5tIF>1?{( zM^vCBo;7-1(SeD;YIRBXSSg8n+v#|rv)Z=Cj##i&HU;2oGLKv>*CN{y`Z$c>1cvyT z&ey85Au5|>(;k-50|-k!AWSL){Z5KfP5ussCto&^`X~FeeWGc0`TGSu4xdsvO(5D= zCgyy#2ui_B@XD0CFV4R;#ovs$Bq@HZOJS*r|C3u(NaA8`3-~o_HMg=Gr&l*m?yk!mi~Rcqy6IC^IY14I?-}y`1Dy^UU>ik6w>|jF6TcEh^TmmUDJQF6A%hqG;i7fRa_q`bBbfQ>pNjoJVz=8ib4C)gRCIs$pOs~0; zTzY-aw>v5$UE6U15I6D}JoqfwrWpXxi*a!~h$?TvWWk|PufBw@LB)+wmFgR^Zt8!7 zH%7fh{!;y+^h(|bdo!>8sJ{SOyJ`0oP}yzEMIkg?XD zDaQMo<~>);nrfj}!z;(2NGb5IJ`YuRJ=xe=vC83w>mS^EXPQnUs!!vezS%hr;S6ev zws}{a9rV1_Y_00B+&Y~7q6tAzPeH*Q92*xRDgL0x2N&1T4Q>(e?nA#3HDTn<#pFl*GURg zS@{LSUCEZ@_<5Zylqv5al`m>ah^uRC{r^QI*9y_<_wQ`^*klb`%Y&raZ>x~Is$N?e1VL0u z;~;A)r)9q;SC-6MatN(+=CutokMR=HPAjdFfj%);p`G3-q5l%=*Xy0U>*imZQ_Sh{ zX>c=3mL;)0P*E%_z>4WsNZ32s#Mv&#o?LyAI^APinFFd5Gc9pSHn8!j@_oQ z?w@>n`254}HF}K6j8lR3GvlY#dh`FEAs|7D0)7w_Z`h3s#SqZ}Aj#lIvP! z39oTL4k*(?f~pX2XPT!l!YNNW1LDgkC`w)q+YAuF0bREP)@S=2sW;A3*7yvK*36#B z+QLsZiD>6c|Cm;l#9sUL#|c#iiC4P=Iel5-nq!gyzhy}r1h3|MLBo7KsLG5k)H}FH zUx2YOM04VD4X50kGSbrIAXGkGnia1K6Uk^-A$eGM4k<0y3 zr{171F!1d!#oS(b66vfQWkf8WSq*<9VU~Qbs znc$QbubJ0`%Q|HEd`-&UUl-l<)$K+ud1Lh%7^tExbuN`&T%D_3`iU+9Rwf7DaZPC} zuDJ?)`i+`ucKinCMoVPes&c1`QUDiQy-4@y$?{F$p;6LuH1Xy8SWy$iD{nhr#XGCK_g|CF^Uj>p&HpB&A4F_&}ND9lT?eY2f&ps%mK)#Rh{ z)qJ}(NlY^P6MvRAyTA3u*8Wqc?3|k&Aym*tuE@tMPJkew%b}IUYmS|1z2lm&LA74z zIkteLqUkj>ax>RIHbs{Dt2!cE54sDdgV$^t;22^SU%f8Bwem~8T@PZ+lg~4j_Kc*X;w4!Z8liCJQH-D7X&Ofx(#W(EHdV}?E9X$ z*E>D$j`Y9vBGG$(%z4qUOnd*KVEcEZu;oyD|F1J9@88$)12?;Z9^`e;8rr1tP*y@R z?fyP1lgx&@k*ouXyMZwY)uBV`VDBw;)PCEmlfRgh^MKStx62?bpt{o_snF&9aqg%m zQ5P~2ZsMT9K;!eO9c#q;F;3K09qOVxJS6GzJ)BV!xWx8_lcGI4laYS|Xm-YxT zxxTlK$~lNBJAK$yzy%t;554D3W!{jD1$Pzu=l=Wm z^PO{UgS4R$fwz&PBx!u~(OmlJDD88Pv2|HHD?Vd_NGRQg0*)Ymww_$r6BeLa{iudHu&guRI*Jnf~ zPQVxG`QSsA$#xEAyUL!UtwW~WBaVb6scOQP_lX2ZAACc?TjC6OSQ$1y=QkS#v7sy? zWaUPt&V!U{kc&R;q<8E`W(oK)_rEz8ki!ZAvzY z{wAwZOr^Q;biYvx%^-SP#m*{oi6-&M7L#y>d9lb~!?G^^7}HhCpF{BQ8^_zjhmoX}4Cli-2ym zQC`mToO<;;Vha`*-n_{0XHyOdiRj@b<3_`-Ckm9f!J9p#$?XGwWX0$n>E6u^=)qKj zS7--~*>n%Tdr;Qf{h!*8Ot)o9C#S0=!wGO^gO8mQWTz>`EOR*6IB*{@7satvdAZb+ z2_8L3Z8!PU$S>DM5|WXvBT-<^q5Altbic}Zbf+(3F5&^F9UJ~*uAi% z`_fGAIxadkCHCiDqhaRx2FYFCT3fV@_5exlwQ@7MUYJ3~4F z3Kt~281G#wG8Jgv`r`kSbdu@(1m5mW@2sf_whpO{Z{*{(aUv!CDT|Dp_qE7ok~YrO z`EfMq4Sk|jJobn9k10@q*YE4B4=9`!O^U0{wi)xz*Xved?8Y0bx(QnLE^PvYe0F~pJe-_xiakvPbD8`5>txN7crV01f3ydB)BRjV#$jP9 zd^_cO9hrN;IY*-&0{T_RhzeHC=k=>r&P_QgzslnEHDjeuh5VkXF8s&*sqXFYIqM3O zS?KwOKGIF(?jyTDTl4KKtV=<&jC1{Kb`Ye=s}AUOD># zR3(eO+sBs)NONX-_yuSs@b9?af6+IBUVez6dv*gw6A zxt{LN$y>(bqd%(jUlyKIRLlzk&8nV<@xbFeNuqZpzunYDxm$+lV6 z`1lN~{Pb&)@o(SAZRJ}V^d9C5NDyL=Qpb?k!izN@kPdZA;Ez4Qc*8}U9r*fam1`E| zxcZpS*sLc45%#C%zWs!zz62qyj{8foI3UcSb*YZV-huDE{CJHnp2M>7ZnJ5B$u0L-^0cw>U^+^941 z=@U%>Q-ja-K!1bVV7~X=DbKfuPOh^MRxvayhNmU?k%^(_8-<)c9eCdTzUlpX|LK&y zuiKEu)*D>TpVY$aM%3vJg6hJM5YDXkTRb^uPrXDu2m}VzfTb!Qmb8jI;?;5SiOh;T zch8o>S)LB?4y^bSZ-wp1V&sZ$@rN#85AP~!uz0T9-6!pf`}qXe+fT^*eRlfT+yPXw z?*9In;h)kU35={swX0HFzhH>dtwOl|%SE~~s1`EpeWx+~)SST@okb7 za`I{OZa2givF+B#9)Hn_FEK=!ZDON^hkw?k;;H6~L#e%Kywa?S)!9=_D&K^JTtlrH zKL%$(fI>w=vUaQSF0c=`NPYcWA@w#3KfvZ(2wn0RJhsa{dsQ0XVri!718~NYmRUfH0h zTq^@#y5V@9a;{w1?`HZ=?d6p~ddh=~ha2*sN=xqK?f7S(J}jsF*+<*kyoGd)I-Fm- zfpUO!k@kYc+{A+NGIEksItv8L$k3Fi$U4wd-krJPC6Jy^OYSf-o>6^5_eU2u>&E@ zV){))J24VIhIywrtd!2}=QOtI*Q1cuF}Ee+5a&x1ep2s%1_b$lOfk`veN>Y*y8X6i5kYf)#{Mr z^Rf@KCxIc{<2?_hXdmka^YdE7J-2qSUUG6Sk>+mlk4VYqekG=h+`%ez5VhGY1xbLz zuu9qW^W(I1Y&><$mC-p8hx;TQ=V8D!)#A%~GidVdc>*%L9#Tht*u#gmTwHvtzv&-d z8E>l)mdiLYPEhQ1aN>1jVz8z6R%vvf#?@n4ZI1)B*qGf1aDRQPb0a?@;QLa;mlS=YpY-NBlz9yA= zH=l?S6R~z2ilp~n97mMc8~+l}2x*q2oybI=5okwAprZ;dGlhh49%!pdz|0|%)U)K` z)LjxUiZl3@=4KOJnS=d8JiUyNDjwZGz}Xdzl4#4L&`9Vv`kp z-G96W;*0?pGj6{JQxHJ0Qh{`NOpRhsXq)k(;LCm=Y5q3bk3aioF%2QW(hJl(ZCTSy zssVg=qs@L@Woi3X#}PaTPo-29dDA@g(LPV>axace=H3)v-ltX;9ayzmRT2x$_=~g8 z^Sj&8UXroA?LjwNojd88N`R;GZgYCz$n3^rt&ggd(<0%{vJ2>AHomJ5!@Q&)DOk2$ zUs--m@yi_r7ufpJ@H|-;KK-&YA_D?$w7VuvO#%c6^rim@4z}}8p~I?mVr~CT`kc=P z^`9RG+gh#Ko2u#iGRY}rxJA}W-#CF$^MrVdmsKBiP0iJ>YfnCC+Kz$sc4j1fsIG(m zhwW>T@EVSeO~-wh8e_mV+u-;w)e-9kqI(J}Zp?>;ToUu^p=Mbu(y;|@xr9DHJ@qHB zRmB?Hy#=pR!RK|YNzRp71ACF_l-rG9LZ^cXeu3_fFWg+#Ny0? zxsDa)Z!&DL;%pyQ4%r77Q%Bv-ffL27pWbj<(LR_hTyWMYr;jA{XX3GVw5d8y;WF7g ztDQSKBy@l+7G}&uV7mkLDP7E`WjyWeCpt5Hsa_9zDE?yLl^h% zfiX#yqeaa(*Lkij(j};|=VZ^-VJ7A0LS>d)hbyRl)Ds0s7Du;4u zZ#N3#S>J~$A?q|g0`y_b_~wk8!I{zJbPGGhY!Q7-St--oxP-U_avBtYYP6%Y_va?J zQDK3q%1uWcFZ@fjnn@G{(@4tO1iFYsvbv}p3}t0yp)S|x*8nO`e@YJU=hoyNugyzn zPyH|F$A36P19FhP*)-HHcq+eR|K&D>c6G-~h8<|%mnSqQ1M$3g@5Qicr^)b5P^pzY z242oq!!5`=w$aIw41P-L7WUcx>p4{|vZ6RT8q=^S0j0nujK-KrtT7CK4$bxte(&Y) z+hXxuIo?;{xa;A2_gmhf_aMNF3w{T0X&-lm>ctk|ZuBXBE)<{&!NiGSopiac*P*UE zw$QwMZB>hDRjJjR)LbOr>-lA-{|5fNsI4WNcRzLAgM|V{71bx&3rDQcu=D?ueY!k~Ayf=>C4x8UZ(-w^J|NkF~7&LI$>P_JG*``|7U>yAUd`um@c zE(RNTeX%KViiRyjFtzW_P_}@n`C(Z0kB4v(UcZZi8tg9Z)uN)!xPN(qG}Da+WGBuk z4s}SaeZx}!$;&|jmaH*QNur*&MD+vcgi6vL;;WXGDAdY3{P+I1P-*LUuvWT+wW_V? zkeZS`srOzqy8-aVI*0dX$ABH|w7drhb5eT6AZ34n-kEptPvQl9J|iB0fXJ^x&J$9i z)mCk8NYcg#l#VPSko_CA1bWmYI+56FaYe&vi{Y)ANq_Mb?Dm(dfaCAJ;w|{pf%Izl zN!=ayxY-X%{ElhLVgeXoOTNCC2$d=1A#W|R0DpaMp;R~DHe`gOy$N^v>(}gTOxk#j zP$P>js|IlNo^EW2*ki;KTWupKyo#gx$wW<=$6%>EPYMx4Pv(h40hU?eL_Q6`(W>y1 z2KlebJfCA6&Ll*+_Zcw7Fdjychs+fKE{DU86$wB;cKHL9U5k@sHA~$gGa4J4xEG26 zB_iypYtQhEz1z3YEUXng%J{@M@Jun1D$JyMmj{)t#tDDRRQ?z>X!RD?tHX#!Q#wyNkp1_o(;TgP#s*iDgz)JP0g7APY8tRFw@29IQg zj2VYxJ4#q)+>e33f?8>!A)!WtGqVim;W_cOTChDmP1szqR+2(aT8pmq4GS5LlLM9B z9j6fC6xg_Akv3UhGYlSkhxQ`LCVytO!AbH*hM`zsLE!z@SfyKD@}mTLiLdEJ1f1s2 z87fU}a~h)GLT#Kk%HX$gLp~~C3ZdS;#lbc_ zg=>6VB7`aMxe~kbaBt?lW4kNvf0GtmG5DsgVt(&JqTo=7@OQY3&xq^~-oC zhh^_jRB)A_htTjOhyRgEWbH!&5fr@58QS-<>PvGn%c5agUi!6a)IU~kmBms1iwZ7B zQ&kxPjg@rHtX?G${EIb@HxjiM^4jZ5O(&5ok&NPzbK-w2ny6pwP&^UILx20LfgX%U ztT?u^G2X|{69?txDvD}ANX=yuo%DXDyd5Suqn$*)>?Z%i!$uWnTD;2r5|Jm(>751t z23kN9^ym+*kDEm%_57b?B!p$Q?sAMYYZ(P}0FU>m=<}>Y6}u!5ePuxsrMZ#kP57qz zZS<~g*zwBpbg{mqw_R;0jkt~G<7nMxxgP&X2R-ZB{JIMZLOmC2@j8%%!9wt?FtDM4hB2Q~;KH70FKW%7r?hkVqPRm{xjy4ajU-Th! z8|MmLGpcCtp>rGR?9N$p`qQW1VQT(tLma5GHt%VrD;KHO8a+X;_&g^q8BQ`0Wp=+$ zupN6%w;*{Qkk8yyfAww1FG~>#WRSN~l8}d&(bEMEp794&a(4svvH3#(X!itX^697^ zd2F(rwS+=6N(XS0yQdDTyVYZ=5w@!?Da; zeE5CF_p#`LU!NST`E~3rciU)>0J~4`-1C6*O*R+^o?Fy}V4691PHzJylBO6nwLKo< z9afMg%+vFdl9s?Wt!xUv@4^8tqBRz5M73Pr!l-mOQ=$C0VqylDNqEm@ z2OK{4her(O5pI)n2L>cP2};z+#->eb-W2$C^r3fzAhe6&NpsgoQp5psVQ~~P1%*jM zuL_v>M0;W(!-lw-;yySC*Ey}a;p;9N5&_BQx*J}9)#SKs{?CH-U++A^1lX23bQgZJ z+i$ffDk?sS3vwT4GvPGn=ujenyR_fyzvb48kd^RobYZ}qP<;MiCZoB`?^cZ6)@p>k zU^&9QzW;*E3$PI zqPcMUq&lp(Fu$aE8RPuh0Z}7|Ute4^Qn4V#6883Mv-uA+k$_b4lio+#LW$^DzXs&; zaVvw94sXNp4<^a6#Je^G;#?7fffjL~%P(EUp)#?Oro0lh)KmQo{-a)UL$SG_ zq_KpN(#Kme1p04&qpxM)Y3>P3kEM>qE4eliDZ9S(FcKJ7Lns#ozqpf`Cd z$@nM6*^HRj-CSc=hY5S7fFQ|(_pym2>Bh~N)-y_hoDvCRy>w_xP;51Rxbs*wD+KIB zxxsrwPv5JsTmM-%D(N*`b&Zz~m5W4V>RQEF4`lZ7^&Io6&I_R7r>Vqw$Q!cVXHgZ z(taZ={rJ=G4%FC3i=P*s*~z%R9S#)HJs6n*2c*&Aqx^WW)OrnasEJ_;|1e*FGCASs z2Xc~3@-1SYIo&ZLuhxWB%;X}u`|`se_G@_h8sUPjY+ziY4+&LxohRU%x3D)rS$Qe{ zipJOd>a)jn&ON<*KDCQ?^Z-9fc9p$5<%+2Gq{KL`LpnPxZGjrpE`4Rv9kRf9nlmdl z#=uA!XG&b3XR}F63ZG?l`K=E>7n7V-{nMTQJ}Uvikv8+wosqIx>yN5I|4SB(`|s+b zy4pCXFnH-ejBW06O;XSNIZ`nF2~_Yxu<@9YP*p4TjG2Qf_YSLX@fmPr(g=DNbh4DtqJz#Hf`tJ zY>ZqKoOmWX%;h0V0=i`!t5XE?H)NEc!;e~Ug#AlTlA_2v~(bpzf0RCZ2$kG^NeuT2roq=?` zP2+b-LGk!!DX?Pf7PU4U|e2tO8AuWe0U(%V%%l3Lr>7tDL^8rH_szdw;H%ihM9 zndv~1i^wz{1Ku}_B<$}uj2eiqp9F2u7P~cJK&lt^V?Rh+_`s<5rbBDsE8#J#$2efkD$i&F8p)856g+kcS~alRK+Uwn(yO9cPx zko()Q1HXm*-EAg-wf2?qD-)V{`To=F6iGh?xBxbY1n6NX3+12iHf0Lv$m&Sh0(zUb zopx82j4a(nb7f|hwBdjj)7pgl`N$o8VA*HsO_VaWu`oNMxW!tO7mZ=0{ znTgXYBsse}^Bx9Ux}PR6y7DM_Kz&2Uj=5#GZP~TA7Fy?&hxSFBrX2&ldSn6O3W0op&9^5LVorIB9r4 zCsFk;eGlkuZ0hp++R~=g&Yk}~$p5{d-Jxcv-iwxC4n-r;j$eX)FJT&p-0z9fexZR!wY7zrXQN=$!HP9!HAwjMa^X@WR! zX@3{_%V?-!%S2>t$E`+!%;L@e%uBVfDuLWwZ1-Zp+8IWepng7<`r}3~yGNI(agC~V zG%jK52V5x4XoX6&M47yQi{>3%`PbiR?L1h35g#bDsJjuSS^E-31!ADWW(Ve2EwkPi zZrFZ#sOSz(;hGE;xEc(iZZWc#mw+2goEC+q@U^PG{`BK%MPTV-Nij`<#g&2K) zt~I%|Za&?K&o&TZPxlbkN%ea3F0W0J2UAxgqpYZ_Gg1E|C!q5$Oj;O;3ah7T*3;78 z-$Z9gbKZs*-)%u^OJ%OsM!s4U&#j8BpD=n+x7%xo<$Ms)Oq^9kv>j>U<}1Y91G_cr zCDy9><#Yc!mFW-6es!|NErq+>+dO_d zbIEK{x`4d;;-<+Lru1oh3u&K%4cjZ0Vt&7AmeGNNg${hG`H4_CWolwl2AVVdqY_HrfWbE@0G#pi`)Y1U~K$9R-ITb9a#1N%-p``+v5dB6egol#%pkCoJQK>2_I&nC!q~* z8(d7%d2nL#NM39Ex1+Q2g~bm$|80G62M^&qmTE@bd4t29WdrRQZy7TVwA<;?$4?b74&1#nJU+Xs{5LQ()jkiU^vXSsvLx(4o zH|EJuz-*~5Z#Y$+RRKPZZ>5O<7kZG@vOf%hhDV z(a3}N&Tb7`X$R3oi(CXiks`n0QQ&(VQNvusxNPl%1@gTrlHHJFl{*ckUF=yrU3qWx zb2qn(lD1(A6|GS6vXaZE0=3^f)(~oGMtQTi!p8?btoyeIY0RzxeSwjTQHKjwWhW%X za6eE-@~O*Z2r9*c6b1}13ME+b{%h*5FViH_~VDN8uoHKtwC>3%YVE&&Gx=%iVZ)tvlqK@8j06>?^6$Bpnpe`Y3;defkVMR}+)p3Zi^hUJY+UqVh`C ztCz6;?GjT7H0dt9EZ*#wwmX!b2;#z-DxuZ!$fdbIZtW(`t%6(>LTKnTsa}&c>eh^u zywNWF$CT7!Hz}*PTU62_zEI!Og1DHVtg>EMP{LE*_@iN0 z?Mm`VS+l9~qMEaE)UV&y!|gk?5R7@se&SOaPE)w$c2nz?OH$Pyp{5pMfYLy5ewXZVLFHvq>%iWQEI?ILe4o2y zWPSK?J;nI?Vwc56ibbTJXa#JS{Rn|6U`4T;io9D z6+5))T-MWIHN{65zm$pdbSU4c@nBhgTvTFi4CVqyZ7LX!2T8Cn^J11N@q$~AhQsvU zwaV+ep`YJlky~-BrEGcz2M05N^Deg@H*x1qX^!J&FjDZpa(<{aE~xA~7JMqz73QL- z`MW7#$w}D+-w(`V3>P1i63s)e=KVjsDVtK-D`@0Q{S)9XHMb{xqoy3lQ(QQNS z*GX>QbRU@s>A)^;J&7GNKlN!qRlc4hX;kJ-98R9ndp=@Sgifww+xNfFY$hMgtPVpR z6&&RU>h1bW;ierh*wyXHa^$dAV-m?ZOLf5?-BHk8HVQGZW#R@%b}<`j<;Xn$LdOY*tacKkS%iy zoIvXFe*%=KfNy0rQf$CfbVzBh)GTgi{q>FnRBSFM*Rr8smz6?+lPqn#X7~ox9G2y_ zjx?p6N{lB_msZ@h)EldYUEbbYH>?!3ve1#TFXv{{CjriCqr&N; z%DZ<%PlQSStnR=M%PLM10!s^rQ1n`XJP)thT)3?-W5f-X?ux3bYN$%B@nV-bQ2lCm zqV<{AKWC}%DM^o)AMf^QJ*#4YaYVLVznow7H@L*w4{a0M`|@o^c1PgFZGCh9lu$o) zkkq0;E6e`La}L{Q#uKFvW(C}4TdXH0E#WMv8UHSL$EQS~a!_3IhDv@E3N*d75x!Av zmi7d05ixMZO+@1Jrzt7GNhuo;OR>!9%p#4W0mx&6%Z-+(kG5;%i1qUAyfakLvT z$IN5~0WT#fXY*oTFcl&Du$mMM*H{I%0o9xZs3a9rcV>U1RC*R47yfi3Twy3N*rsBg zi5T$4U3s=XBU!U(z%#;AFxz z2p|{qkVv9~Ny)dLOs@WTXIM~~IJ=K6I8srP+2jw-tf(13Yrh4kE#>ie3ac-#_)$oN z>dKOxoWOrP1td0C$FVoA1+zD{>$F`(v)==OA2k z=^p$=Ij=nr%HoH`rSB`;y}vBCqX){+TE0 z)Dg9gPOFuR_;J+fsIbHA-oTj{n#`iUWSvBy#J-uiaKrrKXZ1O?OnAr3mHmH!I9-}^|D!afU&O> zqaMg#t06w~pZ|9Y`wLlt$N~K_40w4T|L3zY{ed9yfpAbi0TBt7kq{~Y8x8c|ev~5C zXpTQ40A)T+x|U)k-0{8kP#u0~qhp*Qbkr1rRKv^#JC;lF&|zb%Z6k)SiMI+Hg^kcb zaDAq#Qu`ke>eN)T^V_z72&*@rPIhYq7d%mCWo*iI5=<>bsdSTM<35gL4*8vJ8HDlAxF8N-&sN4 z)G-7cV`QeXcfC+8w7y@MGMwT2 z_ui|c3$Jtc43BC}?D9saf4JgPwGT`CYgHJbb&E`PL?^%%NDm2(w$i^mPIVqK96WqFl_SKv=V zk0#mFCgVt;TZ|#-Mp{(kEH3_|57Yr0)QZCc`bwd^gNI)8QS40Y6`xlGHh0A&zgl)IdrL(Dg+&@)HW|3N+O+(Ge>h%uOs zCoNJ^QlbuZcQm4lrpg>jl{W7HD67a7^z@{2S;43hml5|_!L_srL{+!|o|3r9F%zb@ z-B}ppWlmC3Wh!>+>O~(~w1sRsyD_%1ki6qw?sn@6h{mP+600gsq|MM_ z#(ZUIjXLt}B}$m|_tyQln5Jj56ExI-<1OzeE&O^@)IF*X*HV#XQ1WPp@6-wdD@pX-boP z$YJSFSE&%i-^zwR6NVpaxS(I&r1MPQH1=GRyiG8{ny04g9uAhmV}b?N1HMgX^;<;5 z*dzUnGpV>kszr`=rELB}4k|K#_Nz_?gJ_ zP6jeU>WAr`Vk>HRqxcP%l`@xBX{_Z%CM~*Tl$3AOb2G)CQv92hwgvV#^Z6&z_#rwsuMUKiPZ@WX#AD z1?%qjaQ_Wv7voP93RbkgYG9_tMx~{cAyUp|NotwY4EiyJ;V|KiSQHcq?J2e1#V;wZ zR1ezLlX>GSu2;djZVckKAemr|ZEW7T>& zoCVq$paIELKvG=LpvaeK*{9V8-_*9*LmQ!d__6synG8d+n)-=@dO^1u*)=@r=~Ip3 ziM9DQTUgvtTsj~*%T9Myf<#G=fn?oHL*A!MvFjh&nq{K^H9O|+ws}G+Gr!-&_m(yX zeN5;u0Ry9w&or2JdU;_c;0A||mlQir*?my-X*VHUMwCzF-pybY`n>t#(!#yLu@DM~}NP{>)bqV-utA=@Jg9G=pz7XQxS zJecqx_=}afyeM}Z=9h8vdC#y1Q3kOUG#v9?!F(JD2O17y@yrGq$Z=1TY+? zU|6u<$!giq>o@3#a}hxhjhPdC7JwFMT4wVpmx(D76iZdvpDMG32}MG3q_GyO%Wb_0CS_6O70{qb9R@xM7W1DbQbAN1a7CB zdOY&CNb2%D7*$q_zI+r6Mzt&IU#KM)2#EIY&7gi++~}{n`@b3N$?7w`6axlc!aDZ# zPr!7pPh-bv&k%nH@=o05b=G3#C-#3>$6*z$>a|*}qy+N2-&--7vEtzwRS5+zxjQtp zNor4U{trnYpjWiFxSQ*?lP~0J!j}zI=k5dqrARADa{aZUvUZt?4n&oA{TT;A%Nl{D zM3}8e-a7MdAtvTz3*Y0U*z*xi`g$eMaj9fL8d$bA?B=dUG9l%t*0|x)I*HFnfFMmU}_WcxQ-$f?#kR50yuw|s<>sBeU|xXajo$TS--BjSmmP>c)2 z2xtkFyo$>XKIF8Z^Y$5T4l;sH=Ql24Z>d$SMjJ|igS@lf-B90+cM(+L)SqVRV96CF zmy{cJ>_a{IZ5&Itz|mxnkc3d7LDa!arJw;NNNA+gw&0W$9ov2PBh1#)A*TS;|#BMK$f`8RS{qD*; z;9gu9Bz-sv>dIRT!?e!<7E)aQ&yuP+_w4Ss&&yr2&W;^zUKFdJh8(*d4{37Ui!WrS zw4ry7U>O519DjE!yzi@9fps(hoSe{d*AR(|=effWb2@kBdf$^{YQ*7fkxMTly%V>k zr0Ol}84rX9ao77P%KOiMljVQKBNMn0QPw7?BDOQpg0ciHj-!D*dUtQbo(!tZ0>EZQG6bV|%#YMedV5&C@ zY#M?I$Rj8Pzj37(kpqUGB*c{xjMEOJNH(hlP|i4b$j&pHLPeHRVlERPsuE2@xU>n) zu!KG3SN(K7t5_k`k96G4%(c@j6vcscQue>hmJup@1vLcXAajYR(}Y zPG&+T@V{)XKHKPXr*;hekTeA4pH`M0i@h~o)^}lQ|sJ*#^ z0ayC3^=s}1fQhh4+WVWu#QPrLss<0&25lape$Pv8WJm(*T0esAs^2HyD5v3nU~Z&V zAm#?}c^JjMhwW=IPLf4%ih?Eii|Yjt9{bMzC2Dew9Ix7qfNZw^o{C_9#n6alvbFJ$ zE@K6~3?*5q=S=Wdwv6hX(KlCXm~*<%GTdW^D|Aljx-;P79LDi)ni6Z0L=Dh|h8k*f zLRwz8f>6p-*y)Y|h`J0pgS+e(o3{+KD6;CW*n}c+^fc0)-}}9w{xGb5^=P33uPAsA zmRej{Kw@wEB1%TdcJTkP^_Edlwfp}t3`2K!cPZW7Al(wu(%sF_-67r5-AK0}odS{~ zB_ZAXx99gfYn^lMvlj2>4YT((-}uyZeXlnnTTs+a6mOrhOiXw4EeMZjT3|P!=+Ng= z9>JblLkMgs!vJj+L{y~st)KIRRphMdbIuYyc0i{s2}`@Dke$DWK(&uwWe8SV+ca6U zgy&^MmGP7>bIw>5>HN-?tw9W*CzQ0TXET+bW#o$hne&E5ijMqZ=CAUv$!0mZeB~tm z3M7g#UJB0|t5+{T68kmvwr_0c?O2Duoh`$-0KfEZH1!AuiClZ;-xzU-KwO*L=E@u#nka_w7m=Z}NIRsscrGe$p1uRpQH_gG zk@lOCX5P=XM2UW1>Ii)$Cp;A~GJy4Mej5+V+4_w#Q%AO{i?o4@Re3xq(B5GvLP$QJ z@%b;mu4K~t-yRZTtWgb6bniM4AVOF#Pq5>f{M{M~8P9(@iExYqSURJKy^?1fSkt3W zeso%gG!$UfDIHZ1OWa*@e%p3#aJ{6BF+c3QnG@h@@E~t;=zU+O_){xO(`V&>f$3m6 zkg(HAV=7jV_lkQagLQ?eGMMfSS~1VEuJQDRUq!X$vD87nl7vQ&<=;LXojgty*-+Uu zmAy!=wT>np_SKGft1dfXur50ul5Qn~TUp4&`s!BdqBBlUDKGo3p;by+yL4R7BE{FT zFY7rz6oVFW{n(oj^nUn`vXK>2;iz#$bHmmB1fj~crfOGH8Lb@vK~|$eOL%*H+GPE2 zN1Styh`;-^$OBxUeElCM%HM{W=-i8V2&kTT3G5dH?T&FVeGu+;JbQn8<>R$Z=eNJ$ zYb;BL`qGP~(Wn{gcm{9GhQ&5{8{bQ=7E&ER-=XE7C}c<9bq=oRH-eLRL%V+a@Yn790ZXfu7;dgv>V0v1SjkRme#dX7CAuD{;dioohpCyiC?=(NseTe^ zgo#H2(dTnW!ob6V-u@IGu|-#^h5+O;0GQAfn;D9j3{_idvT z+)VF?m|u8dv~@KDg1V| zk=+7#MK-Om&?!O~`#{U@%y)9i@BXBAeo(g#)~3LAUZvPdHMg3|$>gR7Uy_dGpC{fooZb(+N(vHG;z@xLuL7 zDAa_o)p7Q>i>0m-4J5gNR-1aVB6xI|>L(Ub%B4OohE&z#-s__t#A_d?au5x_c!U9h z$>?`KLNWt)wBG=94nO!n{3tJIK338cWXl;mo$(QDKq1ydtrKZBXxVo8wET&W5lWq^ zE=hVjq4yHr@}0NQV?uXf?iqovq~L4eg6f(3)0a`l*q&qFz-B#lcwv6BdFsTNu{-nO4ra^Aqb+q7a()k_Q3xMtxtLk_yq zRcK*`%SUxCw@JA^(dHBE#|>q3-r*t%jquCDM3K{PhJ5+->?H{ob+Ul81WkV54BOX2 zZaZ_#)n8PZEni-wKfsi$VUed6{>qLW39eK*ovsQ;vY1z-qDCZsO=5jaF5t#zJ+F+E z=f4C}L4T(q29Ci!(*?=>W|P)nD9N-_BMY6^D~n*P$;p|lD0RcMa0Re-sj!pB+dHp& z5k9xZZ`xhY?Icud+QyTbg#tBmMT5EfYn7TbI%>Tbh~+jDGYD^mOU#hvV$nj1Tzo~N zzFqE58N`k+BBk0?7j*-QIQ;jMnvQ$IlQHcK3skJ`4!Ca_k@3>C`bkqdCCTxnSiAo$ zei0^WBxmK9^K`;WPA0qmSe#mB35~5w%Wx+u1(iFz-Dh+EJcAf@cB1~wJD2i-_esbN z)Pb4{pL)5ra5FA^@3i+xdCubRYB^tdiEn%nlK+QUERbyX3AV{fcaEB~HXz?R}|G%zxL>$rXcG$VlG zXruT{fYBzPQkxP>ghJvrcZx`h|2}$JzA*ceD1^KTGBl9;C|29IJ_vmb!tjM#VgM zGrk!Dv(~Gpy+Vz(w}pm7x#$`9OLY;vG>_|H$`HpwguAklBMhDju#nG%snN#NoyRWD zol;xI#EoY_Xe?rZAuU{#37kkcOM*74O4W{PSMaZsX%Q(MHUkUl5xZDS7Iu~Weis^k z^g+Q6+~smeVZlAwwQ+a!1Hi|Laiu23iK?BTn6_K{9wp;nu2={x>tCEbv(O<={-vrP zXhcQ*zjqWeVt!MMhfv;;8HUkll|Fagf09D@T>mpwM}PSWTyRiw`c0#sO#MV#_BnK@9hTtcoql-^fFc}F4TTWy1mKIf^4D_I?Rm>d?>1J)0bKOp6 zbg~y87(cgXxixM>OkHczgTH&$Z#7a9DY*?pD&|xTsitzL&vD;_+>#FSG7;>Pi?_!r zTy)RZ>Qq&|6|<_!?q7_D!}Y2!M55KeyP`&IF>iYa$O507SK6jO>#7{1*6nKV9-HmH z5Tio}U@iKqq@tM9VJ*esFIy3S2)!N_i=wt&SN~Wl2NlWf!4SdlfL%L;aV2E*lN(}nFdgq^B|-XOMyUp9oDHIQN#`Z0hp_N3d%`4}wHtQhaPNbzQerA%?Wt&D^d|`jHE&!KJ%# zrCs#3C-Ov?M^A)vS&z#$@BYdNHnKb*d9R>Dw7-;LJF^A04d(@oHVh&xC}LKlL3URrE3QYGrMYeWtSB8Wd;^4LvyTgXy2Zbmb#| z1fv5AxQjuv^P_Q3>AO?3^Yg7blnpb(FP_#PK5S@MTU*07EZW&lc$z$rucL^Ho1DC* z|HlSA;{^U}ihWX%S${qgeY0zp8Mg%cMUT{TLjC&GF!}_0=v9?aQ z%f-2RmCZsXWpm`~f0l0p7CRf}ifrZtvl5oQ+6g~VO^7yf9K&cm$)a`}dE2XYVfcV# zFvLllh%qyQLhec#PX>+d$P>6L5tv|O0FuUuvI{U^c-hGq=@VKuV;F8$$9GV~lJD~=GJw~^hJAUU-`#~T zf5SdGhfa#<@xiA}h3vhJ>pp5tZPkY79%@ZBeOFlxlo-wF5hktR5$a3Ijy`!On?!>@kKz`^*yo)K)9?JoYUJ0wtzfx^kBclEDdbz zTsv)DedN<@M2$~B4bExwPt*y(Kw^p?sHikxsGW*X*Pzr~NHrcCKq{}`rbJ$tIm??_ zSXj_^KCfdIJB+`#j`pA8_%EwjY|tyUQw+gQJ>d^Zp-Q!4@yK`g=2>4RS_#lYR@=_M zvNH(Qbo^31t?pCUZyr&dMpr&775c$wWbHy{*sRhb2pfZalkbGmrg%&u=t_dftC^8% z)KZN)73DS|=OmyRW|Ehd9`zI89Xhln>{#}MEy$0?ZEN^bH9s8*OnMmnEsoQhOC$() z;wL9GG*q+=?4VGw(>M1`iLvAJ-X4`L`=V)Pane6!7kbXIe{L99=f>Ohpw9@uF2V=I>@Eb;0j&1FdatWBsxHt$l+_JyMB z#UEpt2Y2)h;ga(A)Kq-i8>wLbtL!-CQKM0jHU&3>s)A?EFM>VffvaBNXxv`tZ+OTC ze@;18d={MTU`wY;Ov_?iASDzm#u(G^4QCS-UkzXZN{pfUTgZANjMKrD$BI@v-m*qO zM6XLvL@9kw(CIu$05$sXpSh6{goEfHPFIEHU(ZGCl6A@tumeH}kUrc`NxJXl31?eK zBib}ywXv>20+W)CZR1$qiM_CVq#imOc&{k=Djw8?FuT_si8dEKY?b)LuV2W0?H-T5 z_4|!Rv0wk`yjje%)XeWLJeH{OPSCJAV7(i~6-GYDOx&3CHf{3u9n*0{1E*rg3<|c` z#lN}x$qfk5{nl`lzdD>z16Uw(LLnO3St^r~6kfgn>g$J>*wb;se(`?w$UI||HEgD^ z6SeU3q)RGI^so!H@Y6yUq($nm>;6O$6LFUVvemK~VKfuJna9lWF&KeZp@NWN8kGa5 zt6(&jEWMT=!OY}-=l!+7?F+sn?}pyFy!jQ_xVVM#51r&4Cuj&GhXRi8CHW<3Z%dH? za}-7x7YwE%1p;Hzt^A0vPPpOiLCmPBn!99AHX#%2DH2tyZ%F3Q()3+PqI(Vep=8{Y zThjb*Z%tIsT}v(~N%h3awEwgU9HC4;o~l-St2j1@l_K|~p(?e><TE;k%Wix*udg|L(jE|O{k-~Fj`Ko_0=d~!`jxQ8 zWYH47aOn+;JN$rC5;345`|CJUG`>dTpu_Qrz+b*WgH~5auHO8%Bre|n z;l271h&lK^ah4N;Sl#1xOH^04lOXNmJ4c=bfKh$he&#sb zEZfPay}6)n+aJbf0M)E1>3Uy=9mUO*Dgo>sTn_9eQSG#123{^tzrRd(z#>+kZQq?X z#Q#xLwEpOt;hvBEkL~u{2>fQWP@W|Qja1<0RPK{a)ot4kdPb)Ev&3Uj4os>}%VQdN zkm9qx-3BKzmjow1)57vX_i=I_6r)^^8nRNPPl-QjMH+HKXn`KG6z;?tbADRJG=bS z%#}@^c2ahi-VR4JCWdhQGq-@`3sQ410pypMvCxN9310~}x8}W^cXYh6NMN#;$0jhUn48FD&Qc0hx%xa^3~^AokE zHN1I00qFE+Jr|h;BXPGd$k3RlHU2gvG^TRoJkuC}pn^l0WNNf&I_B*nQGu!qQ1Fe8 zdg=XZcyz>^wIIz1-MwC%c8t16RW1RcVG$z1XScoHo`9E3w1IR_@z6c5dBQyl75c{5 zj_2Th;NQo`J5rGAP~@+v+-#{S2j1%dQW?t+P?!e@qaS6r+&<81;(Q(dq!)_stF$@R z-QL&E zV%K*}LORBa#75m~u4TaosLC=`%uA#U7y7deF2(K%kMlzaIQ!ZwwQ^E{JZG2`ru%@& zQ7oKkp}Vp4uO(1zJJ0&GM$}4&L94X^u;K2#Ni4ch)qJX*_|WB5pv3Rzmw-}s)V)e0 z-dyAuHM>zG*wXKr-br3!k4RFD`dEF{ZA6HGI4?Rz5)85Z}<8D>3MfYxTS)~s;iazXoIC*Qf>r4Ox zy&MQjP#h?85Vrk(Oa?0IamE8*1?rrH z#vdbknlpj`ZP$dbPkqCPyY1&Ad>(=mE$3Zb>m)MXNtHAGq$Ioj)sa_x*|#@DMMpp& zN7Wx~zgTM0JulUDXrBUO6Ss`Xb+C8+W?U%bL9}GFIU+O??~XTBX!BQKVmp~y zVQ|S(m3z@~w1~^k{1^|Lo2%(uZ>O#qZpyR5!*0@_nG126nu1jkQ@L|65?V=M#-~5! zWr46Y3@+%o^Tc{X9j~#NQ570`T@M$n&(3nq6%);oV-ns83x@KG@rpHFWeypsw+hU{ z^tsWO33*H*uayYBZq-C-cU{&hKoPW?&P6Kf>&MriszPKI`bhsannM}*xw<%Ed@lXX z67PIO2C2qtsDDagWa~;X9M;cq-Uwo8x0CX1(6_9N|G=mWT;5)N{;+WWY|yi8HFdZ6 z4!-{+g~pz}A`_6g7ua?_KTF;|&1cJq*Y5~t#F@=D@>NMxJrFRqz43O!@(bM0WIGEE zIPYDX4ti>P6RhGuf@W)PFi{gFJY>f~Eq)J$c}vv2ToI~MOR5}sw$ zKZD0Uzoxb#VD>55L2wd50hvlIH)S_V`i`j4C{Yk%-(I+0@_SL;q)S%KrBuT5FO$o- zoZ--8n?_9TVqOwFMvg=Qd*NP^Z*{y=7=0!GB~u&tqAeecxiRBtAVQE{DC;H(NYR8s z(zzFiJDV%vUjD41>CYwWW-o0gv6tuacY`7eW3!F9yk?$t!}ifFYdE6Ee(NGFr>+D4CY|`dG>n%BDE?Y` z9_O8*ouMHqc*MrU0=9!3KDkvXVGElIZpqRPlFDxu?OGIvwGs+S=?dR;AYZ8*7u$`@ zYFkeytm8_FzCzj;=X&I)%xr44_MOQmzIlC9KYloKw&*2d4iwN4SGB>M*UhkQ^mzP8 zJGfLlb(Amlz2v~YiCwB8UFkFh@iuo6JMYoDYRQBTUtR`Spuf|9yct#)J$-3rTZ<9B0W7eJ{c+hsxG>9K_C^7&5De*5~8PJ%Eb zZ>TDKiEW@lwz9FkpHu?+V!h$E66yMt@s8}MU|W&B-XBSE_;&AYc6`*Y61Puq{5qNx zqp^q2O(jaH9STvuAD7KEw$tv{Xk;S?=_e?7s8Hx`#)B*qq1L`5({2%uxI+p_^#@!< z9iVq`5|O+V48TkSt)bTb`h)y1t5BGB-@9bRFmw+UA4mzacOE=rZfl5NV?``2nTAUb z;CH;%12_YwVOk>;?D_C}RpUZXv30w@;E!h?|Hw4R#&nA7BN3u$7?VjLBVA~*)v2IU zqKTXC1dTNJV4&q}pvb=SkQCozXQ&(?CtRd2{H7U;EpGu$tMnlVOdIn)p!Vh#CRiPi zRMz5r{7UR|^e(1vk$5DH_X@uXZM7@rP?D~zuoOM#NYt^bu+2&_Qq2|hI$YaB{sgs# zre0n6A!PrAIl|}J?8_TRE0w^ml-tIzdQKva|IA16O=$D#%}Qz$6vMqMb;ynZ;K8U4GD5E!)IM=^J|qT zDiRR6a%-D1oxJm=sfy;HDpk*$qI9X`b7(at)*h(L*>ScdwUMP7R{+1yWO?^fH>IOd zeynDkzqA@>Ghf4p2=PtT&EDWf7gpI9104iA^{T3Y-rQbY_dXMcgYbsjq=Owm%8x7* zX^Xq)HHA$eKa!R^%Z=KTFNh~+IE7u@A*>#&MO-nL8$v^&5<9~2=RPdUmZ7BU76=sL zO1t&=HO3U6H}0Efka0BAiGb;`SVZaeTUDHqmMj}U<`?sLnIg&1m!NP-<>ZmBm0;ZB zzRaK?Fl?gZAm!DVAcTV9VQT#6w2gep5bI9#M*1D~<0Ur^k)jR)QImBZo0XRAl5w=B zB^rN`3;y&RxAG%*ZwwDy7N8X30>e@F&!^22{8fB(L3ZA~KKP@PhzB2rNj7a)WIh|@ z?znqPxbX=@T&1)g5ph2`d^A1EgqW=hZeaMsny-U zl#*gj?KgU+_XC6&+MvU4ClQ~we-A2pMUtK5D2k70+~F!xv!r~y^ zil-2wA_Sp+!DxG|p{D=(;Lv_j|3B+}NZ)rp9BW)*5s{wXv44uql(otJf?5vJ$qaBc z^qO;YjMd3=O!mzmDC(%?eyXXzPg}y7A*|2DRVRL2Vn9@QVJlm}TU^sF^jW(sFL|gF z^^jESK$KAZarz6f5i46z$hv1Y4QT!o87QGzzL&HSGaCfz{^l86o@M>JoDTz1>Yb17 zf-J*Uj`YgKMDL}_ad5+MZpQdmG%lD752BMuw_IsvnW%ebyYGo4nVBW|zZ`3tF`RXL z$-5x9PD+J)?Qo)JSxtvwR=uAxZ|(d3yxJElZl9(lJboNkM|R>}9DFcKfvPSqudXRV zj4~&DDuYc;Ap*my8keHU^&K$CF~-dOsz^$%OwXz;+pjuKGG)Ouvo7w?al|oSv|nsd z3Dx7-rKDIQDYq^|R5NFhaV1DWTEkEN7;e75^EyN&N_%$k8Y}jUxz%R> zV`Xv4FwB+>bS-1dlE>H)DF)kik-)&IxD?Z1@{G`6g$E8J&~P2Zmuv zSlbJAxFgNn!vkea!jehTq}Q&?RKF^BQsFr0ASnNKt_J~hI@j;K$jG4_>Fo(M7+l-CjY_BACCNwC3q;5D$HI*CH7{&kmu{l zUo?gdK(?oyY^L;{YQ^53A8AmMv-qACxeBa5!EeNH*4*xzDrWZCz=r%c($c4bLE*8f z$)5f@$@xQ+MdT%jwT6L5UQ#*cHlE~=nU8x^=)L4v6=*rO#0q6%UHSKt;G({Fod~Hl zg0t4KkIz<>U0ctIHxL5R^*QOX2CIk$ROQ7(&{4PEHX_}p$rv#je=`7G;Osk8!wIxN zOpHiv299M-qPIoP8paE)fgD3+(!r+w0X!cv5UZ7-dv2F8is&}-zoS^PK^6sP{EvS5 z@N(7uKtUMV<+hAGp33BuHUIIsBwE45ud;^HPZ4KIc2$*hFYXkiRtr_ z|9k0{kS-_M#A%;uUIH_D2LI_PsR<7ZV^&Vt(+QBXzGrH8GqkyaqmEN z=Z-D;{*}6O?Mz-Leb%R6Lp<5x-=;QPZprT0d#oFV%kMi;v>p8Z32pG??Xk!|iK?wf z1jPYLIp7{%XUt#KJ@VHXeOLZ0LS&Gy3PKdH+W)8e7-|v|A|^=clNR%XE4qKiXeS4e z@dHwWpyUOIN+d;)t*R5oRaP=Iv&i~ejH851wDuEh*c=YfwRq4gRTVM!;Kad_Fu2x4 z75vepw?ylD$)3~3tWz8O?0n_Qz=8JQDf7-CDu122jTb=RzoA>MA$vTOTkeXRGc8!#$$O1d*m z6lZRlhGH1c07#~nO~bC^vaK^_$i%>r5*67i+^^LS}DUkmW?DoP(rD6yP_QW|IP(R0rAJb&-b3h7^FShniGAKF&!k zBnI&RQ6yp%SVCj!4mVLIj!c=kO99igH^)~@{0%cOh!i^FYYEhK_~+@MVNFxmzlEKg~XppHgBU0`jkfj2k zqZqF8+)L+wV&%4|jD1CQu<0fbaarH%FH>8J%_yagl`K0s{O97 zQeR`66E-04hB0Sxkg~D(@2^Gx#AyZvtkKV5P!$cbe23A{4Okp}WPH)aDd9#}*|h=M z#a0#ZslCUQchJU<0e%Ni4nS;{yVaUV5#NqCdO z&5W)dC*OgWM#%#W?!^nP!!6Tbb9iw;;jhkvU~NhN5AB@Lp?&wJ)#W|oKeeD`NGm8R z*u^W*3hvmbF(?osEg@b!;IVq|PecfxDp}MkD7bAkx2gXApJL#mfs??~CC!QHD6t@j zP>CQtYH)weXYo<6i(fj+K``>0Pr)7c?YZ>${alKNj+lcg2D*i<}A>g$SL)Dvc(5vT;;egrTGgfnhgO<<^0Qa}l zBY1)TRqH`yiBND9G%N()P8?z<^ zougU=`H_iMD7)A&vxR@!hsEvq!Y`ua!hVjq>EtbNL9Xx|g8Q=K@1fgk2IJy@XY`+i z#}6BFvtp05%=rFGKO@yd1v~nKP~o)F<%Id~#4>HGcF_HZU$h{dLVS zEa1VwCJ{BkO4Qg4T?IS=@dtNKhaYrXkHMd8NuRgK?>|zboGoC9beP~Yzl-A(lIpeR zhhz504+3_T(jK(q6)cC`l}Vlt!tko9>WFQA%|tw80zvo^S-wW{HliQ8u>(ZT zYikXuS~LrbnD%IgwW@+EIo>kXBzIPJ&FX>n_B5+mb>}e*%=*l>o0p6Y9Si*U(j$rq z;F4^PZA%6o7Bp0%=H->BvD?gwf;43Mh{2zLOenp0$8F#;QPjVQN}tVO4_Ptt@emJx zU>#LXkJcd63*iLf01p*tarrK!b3{-XfI*5)bAkd?^~u=IDMVxg%2(p?4z<}MA76qf zfIMwFw0_K(!nB+uT^4Pgx~o9ii~KurmQe1V655~Qv3J-~Q`_FJu(L6Zw{VQc|r4(=TBfioq0_n-%fz1m&|Yd8vgvd z>*mgI8Md?@1-6>;r3`IncdspOJnr&?klv7PVgzXdB_`dL<{f>1z>& z+m`P*iR5h!yAwbM%8CgghtgN~CS;866G16Mx{GJt@@7L?j%^c)}K8haS^EM zMI0vo2h`dV(I+*#_q$)dE1b6PoDKIfdc2ohSSYQEn-Zi5vXQ8Vc~TFqCKySgfO$#* z?mJ?gw^B~yp^dA)H-2@w7WcCJ>Yxv=z+`L3RN5-Rn_1+7of8(SL?zW4o8YF=bdN?V zto7O3SzT^1yU&Mc|40YG}eymu714Z=Lfflk&SqG$3XSd}p6zlMUSYOYibY^Dok{)E;w7!HSGUin^d z{w3B6FH|oL>&YQVh#~a$pzRzn;oh!tyv^`RW6>5RE5{mn-Rm_90sL$tYZw(2AW4iEm@a2+0lxW;12gHmOM3Q7v*y46dG+_ZSL zTx0nt+)Ih1MW4K`1g1)LVFh`MwABU&oL{3^Z50g)M>UNXl}d4o+IaELSi$9S0n-db zL(}buAltxiWxMGi#QK+11v!c~8+qr8{}~112MevYOUx`FXs&4VO&b^{g~fnj@g1`g z*}^ZTNeEO*P!`N}U-!qT>-Q9SsQ()4iprC5DIhIHop6h9p0l;**;#A*d7rIJ`2Bic z*bH1{_TkzVQ&Cau@{1n#T%wu!NHthEz6tJ4{oo(&Ox76=3X~i+{5$Xoy-Q$-ocY5V zC%<%KM}t}?U9y~$}$H4O4g2Y~re~+WHQ+JxolpuXnTgVO`1d49~x|uLg_- zuu}C6Tw96fTdO?&Sl&ru8NfzzW4mAaJ z7rKKNWwVha;o%z_nOZiW91nmTU)ZzPj!{G;S9>4>q#udk zJ1y>N(Y`Ik-#3I}BQZhQ`xI%ijev#}KC|WIof28Ijq&jZI&ysCIAdue8vsHmdoz~K zh^P_l&?_K)$Br@y7`x;b;38wFMlTh=Q!yEQxIVr;4@V`I!X(WJUwiyp#s4#06gYZ> z;BZrw-HJ976O;6^x38Y(b5GMI?x!kZ3JP}a)EL=Vlv-3pQQu9)_lM|Q-&B8bH5l}8 zTxVV2K#UY+@S#+VakUvu3X;WJlL+8BSmbHj^+iAcF<<8l)OgC@o<-`&vu~6AC?U&? zVykhaV_9<|dBR<(c2`^aHQI znbSxgem4+nc5m;i{YkvZRF7D_6W&HX2Wp*aJ2aaaOy*O2_pi7W6-)qrD3fy*mJuTd z(?xxZ>>?VcWB`+pm6r4=Xq)VOebc zwocouF`pcdiN1~>k*W-f@_|}~xm%DXU*C23S_W#dP!E&w{ZIzvq2?b2=80Tf zzsY1ZFyMjhad)u5@8bJeBKyOy1!TH<0r8zyF>lu&+CM{LgOI^=(HFIYtC1{aWKg4ggo?3cUBxA#^hNDrmkPwM4ROUN zbPt=l6`wSUZlVf?z)K?%oi8UGh~+OUe=;{H`$}le&M$6P5s@sU_bdmEMoBS`1WNn? zvHz3XG9STeL#h28yU<$hLoO`C|**D*9XgE?gSjk*k^YP3mlow!3BY??I1DO1d7}X6DFs=ANC8C=~ zT$g=&Z~GPQczs0iBdL^+#VFpSnXkl%Uj$3xE{)u&y_dzmt0x9uX*bv+!3q+gRjqV% zRKSU7hD}5H;3a;YhJ*1;|FU#|z{kRq!y*l0_IbRoSiC+pQ?WX8=mK7-+coe)1GZwW zRX?13i0jdp0mU9JjSI3p)wh09EGlHo#}44rtP4-o=DX5FZ9917p0UT(o%r`7ZQuhGLJ@f{NO|&j&X8a9Wy-pY_LDtw1{Z zrW*IPYB9hGnHJLWHa?LIjzjl_*MLlo8kqQ&Iw3VRUyzoK!9o(oprzyhLJM>Az5~dv zNu)O)i|`Gt-Ix(5EUkl#8*3~Lt2&TvE+ctiRG@_LR@RAPgT7nqH<5mS{3&MD%Ka&Q z+G(zrc#LJpS$w$fhAMbrW2U?5=~Lf$-VEw<`^uDKtTcvAS3`H9CrO>}fSXUC_CuL( zKUiC$|GR=>7-d3_r^Zvta(Af^ADri>zDlVQ@5>!a<;$Dkx~cyF!HmtpKF0fYcF87# zb_Poy4_(>7l3q?F&D`(ckGyBK#H+Tx$Xfiq$!DmrWcJfDqXb^ah(!9@qQsV1&og0P z{EQv~toD1CY%2srbU4=**d=nIy<6b&h zf+BzF7(H=Hz)P3>SH}9l`qI3B*IJ2#Shk=qu!^nJHiU~QbWR)b_5G;ut40GK^qe$c z>1qg$EU5|Bqgh_71mlvgjwr@@UDQ4 z)L>{e{~QpmhNG-Wl8p2Iol+RV_&%2=kfX()qrxBlhjElm1PBL0+mkf$wXsD7ge#Iu zv|2Cya-m>=l?PQh#2o-b$qJGOmy>qG{K*Zkx1^$ijmsFSzxefI|4gpE@%{>SCj^Q~ z*|Az0qf^SvM^e;3K(u{}Jh1(Gr&Skp%v-e6DqxO|o2u}b88 zG+sZv#Si84YvZqfrjtPLQX8ijoUC_@W?=37|5*T>%~s;jTd~$Hhrm3Z<|X*jxR zNEIILnrd$tE0b55R?^fh(zI?LlPj6>Xcx5hG&>x{BfQjIxp!BQ()0_ss=vXlj!1^J z3Rwd5Q%X>*GdZXfpBQ~jKAN@B3}mq)q>PUMj(~%~TwtKNc?emxDSo4T{dETce}GWW zR1!zrb#{rQ%y~pMynk%UT6s@~kzzY)q?NS1w$nu=AUET?a~A@fr`3B)VMpe~yJ_rA zhdH5>4&+Bk1rq5m7QjWg#AvzAQOqqz0}{3hB%D4pVoq`xy5Cw`Dw4c%5UUjuv*f%U=qePw2$@cnZ0IPEt0}5^00l>}@7x+TyrKkQ{NtSkqdXTE-@< zZiBi>eHL>rd;`SFh|`4K6|xD;nPZWrT)M?`$#W7&g-=v-$J$&DMSdlXd>;x7A$<41T=tKOsMJ`+5e%)Kcg_#cO(#5Pj^4)CfvWOlEWBeiWUbEHs`6@*EZJDs@i}w zSZgU&Gu=rcO9bDR+@bVz>g$MS1VVH<>p#y4K<`Bn*eI0>C^dcDV%zdLrTHi$KHGq# zn@|Dg*i=6y2l z_eR#+56-28if4tAfZI|`0s(`9^g>1A2)-mSG*S7WFf4h-_Fx2dcf&W0wMy4NyT|pT z$4ASUqp6$Q2j09)K~D$okDe{QXTDPvj_$KlgRGhwc|BzKq%13OsX`x&bZ^c+{3wXI zi^YGN%NzXr#{xAPJ{ni6nm47d?U@XVZFN=IE@TN8{+TIiKDMXi`Z3Ft%DDS_)?#7x z5b=1R!EEAi0(mrikC-#mR!FL6gh%7VYdF5oQyM`xBtK7hO#ycoIdxj`H4QuX-4+Vi zhaL*r_AJ&d%Ii#84Fv9_rD%v9wbl8OtvXkG>+aZQo?`bkb;aSku{`Og67`|FtQjrL zQJbZ?EGTW^jIZ@#-?zHr?DY>euaqp>v!74Dr&#~aT+h(;Zip#`7xeycZko|qr?+b< zW+JV8ga#Jo4Fja2Xi*Wv@gG%I^(6;nlBJ<28P?Xkb^U=NiGrI;<*eNg?wK z)R%HvW}Pd=c({h&TvO=1n+2KgRPxqeiHf=;|CEgHs18f$H3XsQzKgMF zIdaw-DsicP|L(HmqZ(`af~mB9b6z~gg)C;l#WU81Y>kFgc@~2fc9qS3rI;3OwT*Y9 z&ftTUsdmSk?9o%&m^i}(-%i~EO>$bybq6|#u1`Zu8oVH(XxB>S{r}_YEra6Rwsqkk z2@rw@cXxMpcX#*3p>cPI;O-XOAvnQZgIn;%-Rr}nf)qg0Ud(1JP@yMJs z5f1vRmH|E#MA?cyE0i`iEE!EMp8Vd$RhG5H0*P>GK)G@=IQmx@6NDVN)R{gC92=`P zL%Id?L?lR!0+igQ9yI4ct`$537FL(#8COv>BPITU51|`2*tUqd0@5tMe*YeV3Yhed#D4F}i2|@ocG@&9y z2vp=AODLVMG(&QhA|isW_y*bR&OuNdeHbTY!?fZC-`o~N>fI*FSNHy0;)=rWL!hcE z(+^`fw&He22>mdJ;F(@aA@6$@Ugb%CIS z9fbsXmAk4v!aa7>7NT-|w*;3^2Vg5A*nM>i$L1^cKr$b$^@A>}_`|MBy``tKIYZ!a zTU$*7IUTNUeHvw7qeo#z-!g20aw!&S6A~1vZR>*RSV#{Ac};Q zn}0SjR)&u0V8?-4!yN<=+VOvbIO%1|^uiPqO26Q^`@9f7Ca~%(#i-7WXmAI_!uC@V z;K_52KKru>D%I4ukoCqveWO?FrI`|GYDy_8dXM^Cl`uzdsql5I`zEy6cxNFW(r0dc zyWr%#Eu<^2<7n+r4z8;zFH2oB=B|=%WTX0F#l!JLJ4t%TNRA|vlXcXES)%68F@Z|9 zs9>8%sT+ z*peeiA@qB7xHcs}v>x~YPMnTXc@mp(G@IhP%W28>GSILjM%Cu-_b<4sR#MXTGVzr7 zJRi`y+IQsDTT5atl4%bmRy|{4C%+YNf-yV%h!s7IJuk*B1IQv0QQx@|OQD9A`mWDh zlWQO~AabixXXZFrs|w~sx&pbXBO#5VsVgA=w2nPp(T%1GkWIB(#hCs;BtCIBdPWdde#p?h_X5YED z0oL&ExB9}Da+DS*ahDkIRJ9WAy%9@QTKTsm0nJu!>j&H*idIOgv~C)ogw z&t9Mu*YFv$+g`8ve%bPQG~_0L`LM?K8FY9+lj^Ge(p>e7I-?oaueWkVtJz{29t*Q_ zVoiRw#u}|!UOrsmUY_oTANJ->4nG5232R-V>W4s4pwBlQ=?~L+hWJAYEPfpxi0FH_ zr_@cnl-T{UH@7@CS*u64yoN|>XrepP|3tWObj?ekZ^Io>J^_eDHA(EW~l4wVNe zXQ3-1`!=g38h+0>Y+NFR;g-Q*gr0EeodrPGLpO!5OUHfg+rEL6gqy74GG!z5Gx>LKNye$N#!>`?KG40}tUfV;Of#g#BKo*%I8>}% zQ%KjZ@KPHE9NFM#=bxo?L|+!hIvbU9dKRZ(+IyZ7N{n+|N^0iIzJU*wk3DT(g|z(D zyD(O#kfB$y^}>Fi+;JjR=||voIkXC8{qZJ^Rwr`{i{f1l@)~&tcPhbtl{N4pJjYh7!NBJHVDD)kA;IQkQq6uIkWP~Q3gY9jz z5b{Z|v3uE^P7dwMrYcq5%<*7zcgCn3lG_}kM?LG!(LhU$JA&YGOdedl9eOh*Z(!jV z9uAw{o68H1?u5H5c^C0YKmx@as?H4nyZi>KJ}genkc7TKblCiWB;pVLnTV9t#{hgl zgqEmkDZG~8@&tTb=OdAIv2h}IvTA9r<(l)63)%%yIC+`b`Fh(>h6*!&cvhO6ud&fO z(I?3NJmq3WnTspGP3M--vN?u4PT2z!Bdj)MT>3#CmocRX5dm|C(BCIdNHHF|8?W-u z)t$e09uGn~$t-a@Gr()TUFTs6Ze-1w9#!A0++TWHT=llckZ$^JGhRq``m%W!cO5S) zKR3Ijh39H_%L*~A+03=z_}$(j)wK8iFi^i(YMzo@U0cgpERx?1Id5!- zrg&2W#mA;ZhNlS`-=UW0SAJf?mVB%^Dz#xu`ZHeHT(_Zn_f?FvXoEHxC*a2~Y=(YS zh|o+pQ4J1e{Ar|hd+?}?@g4tN#Gfj?A8oQ;TrD4+b`E3$)|qKjt!4JdF?>Ia| z6bzHVkS~D?KA5|`PO`S&=WeQdcIGKMkOgk1JItt~y~lU++%4Tsn!Za5YF`?PH+kI^ z=gspmq?sa!^=5IiN;UH2=6Ng72zuZ0cS9SA{NcTr>0$5M3PdGBmJ@0XW5CLmp9*rM zP5ryWbrLBw_)EmXswmC^DY9g3Lv!l9oU3LqU8n-AM~8^tm|#8^Ep6Ycws+2=~5Gngq-qe7y&4jS@()a$EaThCpBt z1R27mc#!KA+T1>$C&Q>wlz6J@AoQ)5V(p7iI_J_dpLc=R&*g`_fC=g@-#nc2+mwZuqgU;^vlBsamvc}wEEVuvLQKd|hl`^Ekt_?=%0j;P z#8eHcf3gdeX^J#98|l0Ya=Z?bTO1LGr!C^X+OBYE@w}}40Fj0k7Q?U<9u#NcD!ym7<=Vf_I-)BoAN|BQ}yQM5#lil3Dkb%O`oK1I1L8d&&;EXKYw4 zCYUy9Djv%_#r+C!WOuNj(?3C1y?J8~TwmyXcm>UsixC+i@Z&7b%nEQV(0fzc`E;Z% zp4ySs4^`7>@Slt8a-LrZ2Ih)9Nnu%=&VfNYF(k`caL%nNxQb{Xg8~*FzoYrnU3hw4 z$W~X@GUU5bdnth-u~I)nZm ziFqdCusHKp&N!PoQ?CV`a5W4dCgaTVpKAsJ;zd?0pSKvaZIj8u5Jf4_sfRNj^R$!? zSE9?wI)W{Bi>6xGCnM@F3g$3%s9e_f`II8b{(D+38dojW!6n$~m5kocXKPWR>i3>!Wyvv#mva3zVw-Qm;cYVGgH5oM5#$Pkr4ZM_DO-ppU&m|EWonIvZ; zPt-Z-MSE8fu*`x3BB5^`VAJJT!Ye|8&><>|HMR2{?;=?&!@oN~`ois_(Ll0{N9)t^ zEyD?V)JH#5{`@!_^)`@&g|ANNH6Ug3k$_vl!?^zCbDO7lKXZl&@(*ezv94F-khKZ~ zl(IKnEhVoY(V;{Wv4m;qh#p1;u#-u9XJihHnBV1oF@I!tNnDx{{$|N`0(w)0FjrTt4UC{=}X#Ga6oLysnP;yG{f z5)-Y4=;=g^JRaZk>)WT;fKRahc?LoQuyUxWdAB4!n;#v0R=_podq7jHE-v|46%UVd z`++5q$6CabJ6S|e@d@R}Gm8}Xc+`fjnJqo{fP2mg<8jJdm2xf~=@x-P!AMFbCuQ=k zs}A`+SN8a^)I#Fhv5`mbG-xyYss-UQ4z+-lLaQ9_W*M9O;c#m$fP{UnyJ&ZzuhApA8 zJ&p)ca{=C_*QW9yCFS8XWdT&L&Y#BT3hx0}=xpRsXRLm|`_duoNPB|#CXbGeuBNRm z4TE&N<8HT6(4$`9^92>w@N7Z|w(A>r->?It`(GAVXtWv1C(03Z zjbw2oKttFh@?im?{xw~+6EjJSGh$bSl`Krv$H(>lX$+%+)F|ICcxVys#lfF~*g!z} z5k;>rE(GPJJn%@dQAyCsx?km(pdXVZc}FeMt8G?zrW|iW~t^ubteV++E&y*j(*6~S5aewC>E3y{SQK^7Ys}<=3EMoc8f1J z{#`-O^g(MYKJYxd*e~q+xHY8nywev@wwNEypS^xNGyG>85u;z`D%t`GjAI|BKS{<& zl9W)Uo)*(L|0XcFXy3tF_52kIo-S0E>8q`2jC>!1&)V0{`OG~b=&eAN(y6=)m#)AzM19uECNof3u(GALzeBgq>vo_jMC(-B2h$Z;i_Yl+{ zjshe`DP$rodHOuGP2lS!B(Hl{H}ZFk;ppwMtPye){^5ug7iSXQ-CVs`mY^!-sHC0u zbjnQ(Fz~I`2$0K<3)R$rfxX(Xg=+Ev6;V2aD%xmet75;7F^d5t>ZZBL>H{^Mn3WdP zq!_a@-HF3Xp0%FZ>i6$HCH;{vjQ#@=D3Wext0i~&AFQRTIm_Tt>pzVH{AM7ym#R8p zhq!H(_?`lpiZDeHefR56Hy747%YAzQz(rXZIxrSaz97mTyulO)No%4gamMo-uh8S$ zd$~Q?m-u>qXvWdXX&KHqUZbUZxup=`-y+r86$|=X3tzjp=kZT)z%qEZyNakAi(OQn zF(8G0y)^eN>?HD`XWYVOPz3>5Gr7Eu+D1aw$lt7O zm=kY!Es}X9A8xUV_a5Irf~NiIYpOV7_?chZ5Mdpo*2#7((pvRbGRwOayD;MGbI^rr z6{|RJBlRY}Vwb!mEF3f2w11$7f0%>(@iTzoSKAhIznB*T(Pm6Bb33o9@W6_Jh0g&yh9!U=r{U@JOJ(4ru&Y;7vAtE~xR4T^n+daI%8uJSL3_4wfxW zH<*vNu`f9wM$4$givd5<`@i!N)LALA6<2&HtAOL0U3HIVnQ@ZVA9Rc*i|)$OVG-t`~=~$dliAq z>;xLz4vPwDDB1Br`7m2sGVXeO!8B2G<7i$S;awN>+-(!4OL;#fn!Zu{JX+PksJH1Q z4jYeGZ{Km?4piu{5Am^{%pJ!MwgYj<+emrL26n+xR8;1}D^; z`96(BDn}PMs&hlW&}LR2>MuNBW4a{>Nn-G_mDh^=fuP2BD%`s&=r^0H@IPNS<{2$% z%Y%(^!HNew0=@#@dAaow9Jt24I1CK${}@>mu72xmeAodu5+#AQ781l=RgqA< zAxVUNRf*v26t#n@Qr0Q1qFy4S6q(_xFJLLe1FVAlH%WmiY?Qocb@k&Gb;@d2X3t;| zfAi@^;~z+qm9{i{R~8pf)hTU8INxj+5df*kfiEr;I7>d9uyj~t*wbln6vIW7=`7(j zNk|E^e|sD{m??fEpetJY=2c{}ciExIIm5^xTMKgNs^*OdpZ-{HkFZ!wmhkxnp33^K zJ1QFs*!l3gy`gcthicr-XYrQWLI+Mc*iOQWc zOX9uHW5>S0`muABCk!RE2_y*I&!C=_em)~#>SL{WVcP{aT(QP9^+6R1i z`+gUL^ybSoHh5~-KtYz3LaCE^KBGOESM1Fc12^+!TktXl4-X7i4PkLsKEHEKSm^Ps z&-25VUBbw&!kBEmoJxr!xC~H+I9wO0*8(rwz(>5Dnt9L@J-;he1np3qC-Dhb<*G|K^Kv7qMrq!u7owPAzYz z|A!mE#&I@pb6sjn(18YhZ#k*8|Lrtmo`B%U1}{8TFg!FFp*jb6gPmmc93i~1n0W%* z(=(6u^dc>)`zydz%Bh!Wc%r=tG5m#fq2`+pBg$`*lG#?U&`KpRp?S)ON@27yVW52B z=cKRY!cy~6lPhQxoYgHS;%B{$)%7lQf?}K_6CCqVvo3n_P^lnFnPVU=o)WlZiSv!I zR1|9K)Wx$^j2yBXU)A^^GWh+V(pV!b|>C!1l0Pgev$9ELg4m zr2BXW+hX3@bo{k1${h+s_@YlEfBz0D_zy_C6J0o+>85l(BnahGLaL)hS&i(GtoGDI z8oUA-!St|HQdntxdNpsQd8{-aeL;zQV|Bs&Z&aOM1LHV6b6X5goxrQT{C*4pYbsya zVc^9H?p>Y3upUcxgGoV%f}|r~Y1}$#uqP$MCz)4Kyn`)MnE%yc^`Ipj-ERrKFef|0 z004j%(ROLrl<1$+iZTs%(q?a+pQ0QNxUSa+*KR+?m(I7`6SJl_Y)=pFT^-qAIWKrq zej=NEIt)clJuuJ~rOjKb6!Y(RDQQNo8#?0ugrbx!RD>PctIzl-ZdCP(TAL|FG=mAr zfk-HDIJbQ}W!S1>nw8}@=udk7%frIZUOtj#1qHgQU$b=FZuDVMmXj3nlqz)yDyZjw3372><+43>V^Nw zh@Kyl0X^~Ym$lKI*cFUZA&b-Ilf8a*#e7cN6&1p)OKm)HBXfa^;dNr#Pv!b9+xm}` zWZz?igMJ|9rt4roo}sVX9(J8jhBFGNVuk8jCSRj{1p~D`e(+Mgx!#n74BUjBFw_&B z_VG_4@##JZZNBZ*jK4TbO>QHw4kA)coq?PLuUjpj?YKjd2&Tvs@Mejh>C6pOE&j7T zg$H4yTNWrf`STWV!j~zc5@s4OZD-*D+~`Rg*a1xp}^N3mmv0gYoj0!kT4i6JBFcO&VYe6?U>vJD82<*6gxVBL|0ko+p4|P1xv^H$azwp@a z`VOjdvt1?2=zKg2>oj)+<{H*#F>pz^``FM$nWBLe1v3&kwFOcLCl8`c1af(<@*tT$ z{=qvBK?Vzszzj_F`QeXMY5YloLEHm&%Ge_CCTa~qzGeZ3D_Pw3Q77^qV}eMZ*H5zS zmYcSy(h>ck(1^vP#@7z?{&m?n_BU_=yzsjR`kW-(PY`vo-ZprvFxsC7n1RQ8#KNub z-bV~xc4zPTeB9Kdt}`2&5{G{)D=`)8%M|fNNWUpNRx=|*h9l7Z#(wDO2o>1B2uK`a z#FH~8Mfv=zCtmEmg-aTIvGC`BqyNkZ)s}x?+LohUFatV=!mRZ!xx`#do{ua4fiEJXGKp}rvuA4JpUz+6n$%Cy*NamKW z&u9MoJG2*7@wwF4y}$)3i8I;Z!RX@_(Md1mD={7KCVpFl?cM6|JC@bom&n9*8LrZ} z7BHt3c7QT{6uwcwZe$bT0Lhl{Dfl=;yMTl_&UXj!h>eIE1$dAR`JF|mU6J$U%-{2g zj)oEVWk19Bx@{NuLS1sKRkfAAacFgHb8rVD3RVSw?LnN!)n=$1s&T@|H&5Ft*4Mz#t5MKVXwLhuXN ziii4P-5`#5g&o+b-wn|eDyd`9Y^uh3wzsMe!qa+d?cc#)?N#j2dhRf*PcDs*wF%a` z*H!^ma{20I^fM!xIcrTe`b;HtRi`To*%=A*>FHRW`cH5{i;WcSx_Fp{q~NUt`0D7s z1-Je%M6v22#QrzO$BSG&j`8$mk_rye50ce02e5zRU2ZQ)`(`tW*bWx`n~ZFaB@ttq z&WTLA-;0%lZ-T+>;l;fuy)fkBkOpQCQdVCDzq`fda~`;KOf718fvpA%cIbzz)&N&H zdHhoQq%e%x0ph`^~@ImEHNS2}SrnN5_mu5vZ*Z@8b=EJZqJB#A%6+VEnt z`rW_MVCcJ3>1Vk-!O~!7X2opCJqv_7!n;vt-*zchA`X#&%qKp9cOurNpIFdRtz^h3 zlOqZ3tQv7M8RFt`&gRjns!CFX8Eh&c)yI1YEX>97$XA<{dN+z+s8fgV`XoipA1UJo z=D7IQ<2i`Qyy^Mkw|e_ zW3|h)n*|D+rwmK(E6;%5IT~chB{&U1`Nw=B!(7JTsW|SfI~Oyp=3-^l!Hr&&8bibs zNl7f|;mFH=Zd3M;frm4jBhX?Yx%74UA0{dw6HGIJx$8b+l*Jj(l>|Xd-jz>8XmgAo zjbVr!PgK=_LEk~wRSjIDIzgCphgD)y)J5`X=7f_ zuG7^6j8wACJyt!oO1Z{Bx6O@#wOo^bV{^2ao$*a2TcRZ$^oBxFiW2Poe$*ZD``qJRwWQUpEL7ncr}MaE zq)4{iUZ+YMcr3qrbfqhx{OmTlC0L9u$RD4Lc*9>b>~fi~h@ZLQDda!5eVuJ*| zalJRp3;N{*5qRN3>()3$1dh34x!9W5<*!e)7qd4mfw_h>O$=`9BmBd;I%J22q1n`H z_sD4i-o*E-DGgJOw=@5&qt&EgXF>I@mBaT}g03cjp%V7)Xb|&$6xTv}8GdUA-)&ke z=X}Kmf)2*U+m6ojpm`%`jd<49w*X1BCm2-7P1=_&m{D@Y#FoSsPV0tOu+`mQaHMEZ z9vt`PBea#omLr!Cx0hy)>m7CXg%knLUG@{?qWvrgGsa&)RVJq@{u4|jiBb`}o`!~m zf~745hYEMGqR>P|lrejyHQW*@npqS&r(z;@bw@MbV1+KqX}k3zC(`VWr2~a#S(e}H zDUHrV&FLU6o{BB(En5cL4-9+_czxd;EFM?qx-0ok{$=Sc){W>5p&WmR#Dt_DmFDVXw|s1|7a9*GrEh9+q(AT1Yq!da0wb1kS5FJRN3l2^0~N3ejKC^jvvY+`ODeW2m$NDx3(1A4Fq&CAKx?Z zmqJQsnC_f6z%?TPt{H(%H?0Z;8Itxh>COuD?)?dAW5~sQy4>#$<%V3wF)FA1^x;iA zC=WWfV-c97W?ou9pL2#aXNn63fpNqn6@znK(15G@_8yKd2?x#~&B!JiljYpwd0Oc{ zC~ZaU*E08%h`70~q*_ABIK7dhFV8$qxqJ|EJ_ zm2+5Xbd-$LKi+2Z|MxX&xy0znqEtp2ef8T+z!lz`Ma|I`>ZTS7M_%#sF1MZft} z^`XD>W$tl#+dtWR{q3@ov5cb0>D*)@;pPc5C}s;}?PgE7ud*+0`GWa&zJLHcpD+fF z?fr3O`(^BJx_o10e~|~X7=*{G&*jYpCYKPqYQi`$@n4eCKo5CYvKLo4@;_&TxOm!q z{9+9Zc$pd*${XkLkrx1Vcy^ZYH+((zmX=g+(ib;eW^@Hd>lq6L+9=bMnrSmD^WkBh zT5s=Yc{AdTl0Ra6<;002e=eNf(%ugMzSn*IkSeDW%)*dg+-LAY(d0Zqv3cN_tF*bF z)t-MD5~^jAM(iHn*^GsWqv^sP-npf=EI#+<#{LmStR;o2XaS}@gd+Q`W!{WJ0@x92 zVc8Pup@BA5MFGtlYlT{(t@Ywf%-Qi6W0G*&{l-4)V5$jje^*`f;irK#c{Y+ag}TVY zPh%gkDw>^XImH{yrI`xMdu3UD%8i%jFVCygyZs^vW;~>sc5Lprf{J{0&Yr_EGyAD@5qUJMqrb|n74L2 zsfNxq0eFTBMCA(Ye_Hpyll(Kt(~3**ny!$kl`%)Wl?bk;+~|zOnQ}0Zx*MOy-qd6+ zM~~MhO(98I5-zx`z?m(;JXg{*;%%|Q-~~8hFsBP1-kZO@2`e4^g~}driYogvC1T(- zD1coV3HmJPzd3RtGsx#mQ3E)VAL0^b_BCRmJP$Ccpjq5h>KhqDZzDK-K|dyS^ux?G zr7ufgP?R3~6lv+bhQ)fw|i}GSc1NYyn%Wwkq-gSMZ?2G*e zFb^l_EzXOuy5;w~$$rUZqRM8Y+cB_)OH!+=EHDmR8|P2irouo*>KZOqUUJ%qCuPi1 zPsN?a=MAUX7$%4li1*I+*G+d&i9?=DnLIWs8pMf(fj>J1%D99~uvRaiIV4(l<24h+ z5Lr~OCdcU8p0-rXNuxha>pG*oCB5aE;=!M{OOY!aKf_|`QV45cH_^)sRyqO#z_Di^z?5YrR}iUf6lYlLqz*!6nC!m6c!?IlLmZ! z<1bp$otKWQ^k`I-;YyO$te0Ad11s1(su_gi2TGj(8s7qX17JZF5uL)wb%+svKiOC0 zYE*^bet@>|Wtx66zTZK49qE>toEd+jBi(>RQ6i%$mZXUGL2vUVCsHQtxot%PIX2Q% zWV#Q+;s~&Ikz(AXAdF>*8r=ptXFg>Bbe`4?R@Y`EFgI^7x}M%xsZ_qRyXeHo)8pQ= z*Y{VAxYimm=EJX0OBv2rWvaCA?qCC7RB64t%)ut?_OsXWja1gg8b!B7N!!EV$?#`- zqy6~QIw_wKsVwL{d!Kmm)1J-66$u2%mo z>hP3;HFk_Ay*bGNQuXW&{XiB&*LzOX?qVcwD$B zvK0T_o2RwjZ}hg>TBRVDyLB+sR@~zAb`FaX5a^dnQxZt9GH<}_km5tlpuEtByJN5h zC6gpRaG=79$^I>_jVr(>BrG89&^f|J#J-#H);+qRjEQVn{k4?`HgrPv(b<618bea+ zhsSYrR_7AIWH*>r(&ml*<)7S-yXkZ_5wWDgtr~-O{JOY)Q-demUn<+=U2Fxrhxt7$ zb6&I;2SI5a{$Cr=0oso{>J5G^H@}|mz>N@1;f8H2DkAE7X2x_$xj=7Pj7xvyx;ISKJZ`F^gFK}OqS z4cpAe0eK14$?OeBr3p_wLb$bDV^_3OjJEKf3-BaN0AWyX`Ww|3v*5Rq%7m zYj{{w@WV(f-UN%B3`Cp8|szR@`mT~luct*r?Z->3^i(P*UkT1s_z zCJ-H_UM6*c$z9)MGyT;OVzVp#Hx!z;2daBGqH~;WKPOu<7DB!d(Tp*aU1tMu)Y*b7 zZibKEsHmw6!oKj2;9|VEvN-WG0q{trE_PSb3~+-3nl;6!K0!iJXZ)O9KZ74rj+dM&M@V~l=5cm&BziZ#l;D?!w+If5h$U8 zh$QkD)n`2_ut`%HPf+jz%js-!hcW(_8AsHtB=8+GJ`XUKbA;S6&$x6G-)oCeY2G_C z`u`bazOcm+8nOi|{vm=JtI3={+@%zbMn-b&>kE-``(m2e`M6-4BAnl!LG6V5C&sX!>FA3&idk8e;Z7 zS^GYFtce**8fi;&1Rj9r23;g)3Q7?(S*p^_wCf@c!rk_zO zMKVXHN1{jMJfn=V(8jEZdHbaI z$xe=1=y-b+PR&}mmiuelTd(RP6wJXfA==?%0bOd!;W3Lzitxd5Ter&g>6U{Z!JK96 zX$^L5oztpg_o1rh<|WF#I_qiPN)(M?nQ-;xM$rr1`9f4E5*h`osyW(G04H)8>@9P# zo<-O_S}?Rzs*1$;IO2c4l(bCpNf3#O`=|Ec$P-2lPDQlIt~)pOi9M$QsR7ppsSj+Z zu<7FY&OXFBm;1B-!`7>I=+;;1UGFxQlQFu|i6((P=($PQ{|D>j4x4~Hv_xffhi)e( zDd(}>yA@6zII$b=OzV9V%5J~tx{inBl2!6YiyvuuL>xRtncp%8E0%>Wq^Vj~0?h4y zf>S9_-|+s8cuc6S@z$GJ27frtcarp1fQvM9aP1L*`qGAMopNX`-U*qjo zizI@S@kLSUG-UO)z7WOOl7=QEuWti=c9$z@NDwA0@-6>jE|fnF{S}5+dw?gHpv@24 zlgIbgpUxKODD`xa&08|c)`{C$ByT zz86#|=@0dko>_FV*!Pz{PQ`+)#@&dXuCCB~JZtsN(RVfk(hloghm+fxryft$t8_Zg zlwJ7mq~UZQCmV_8%3}E~bn1`RlMM;v<6}Wev=?u~7y=YijaEy24`aT13ip9Lufs9| z(efn*7-=-#D0wp4HL4wwaqP+fqWHm|QA+HN@>3=8qsdWGQ862EJj@1QoMZ)E_!3>T zkhtj`DcHRzIDqNlZ{UT5S;QI{(Ahe$pHi6F2}+x{UWaSXT_~ndl2?SzeO;7U?JicJ zc7E!_@p=sl! zQYcAOz8MEK7*592h^!+(qQFKxER=OOCc61lc~$e7tB`kH=gfN{sG=2AgqXMSZAPTd z`1c%WC0^1)NwbvOgpoHby0P8)K3wgi&=-8TuVp}G%6WM%Y!bEo7rPh2cquDo;S61Kc{=>nOCHJDOx8HLb?o4#& z&1~jhnNc!D@&1xO*-InT5!x>z8k+MMaQPTz^#(O>*{WsIIj(~C9r8*;ZeuM8XEdgv z+tDEIeA2izJ3P)v)u4fx(WOz`wyY{XuyiY4=~JC2u7iPrO&(+NyH-=?P&Y)_c?3l% zK$5zr4bj_EBn|2?M3$O7eU>D9!T`%Dph-}DF$TmFZ|YP6vo7yCaH@8z*qvjM2C1~J zPde{q9FK-dwQEztOQ*t7eV{U-b><|{MgC94CMI${;Lb`!U0_MP(wSbRd(YZL^TSn~ zkCsG3KvS#^(Cwj<(qiT-wB?dZjyyp;$0R?^do#g$1h%6t9Q%SVwfdt_7mo`OiQ_l3 zrNe(AySsS(2`4C^+1*M4;v`z{b$!!87!H|;IoNuEOV4SVt|BAnPu^!8&3TJ0oh@l3 zpC?m*z)PbuHkOjw7$@d!sNNIW@A{PHd6qSg1|1{Wa(>|>UD7jHWtg_qlT1mK4;I+s z1=>DIRoOtiQ@qnuAdvKbnhWw^v-85W&=$ev#i%B4s%PLPkn;8Sb4juWM@QZw@A(p< ziDvKl)h`4mwoBqnq0;D^C&!Qv^aQdM+L(#D+ucBXy6G7 z5c2c?rvFspz_w@VhY{4(?eg_!KsC$~zbhZQ=DtY<$!`NpL~^L&$+6O99>qwrH+NO^{GsKcNxY(wZuuZ z7xbrjOAf6%qWdbhI~-Y1f&$@GY<}CRqqbj6>pv_(A)P8$=k2JzNb>4!kjGRb zKT^N(i{iHpvYSEL4|Z7X_#dvYW$f|UEqr732dXt)KmI{(gC8+R*W-+3lzBcX<6ap^ zQ;YwUQ(!V`9Z9L%x&fvYRX^+=_OOj47a?ifFTCi>v&0UIIZ{JinR7(KxAagaw6O1U z<~I^UC2v4mvK)~oF)x9I;A^Wv#p#p9=#4=a;{x2sENoB3hC?CGc$o%X`Y~_J9d*`` z!nku-v%{6-rIp|%Ke@A5`{$FjtfyfujE$Gjr;Uxorl;Ak%Sg65tG>r{^)1)37|NAI zX7R7PNilD7X#)5FK$+??d?$qgzKb3<{qr-IY8WM?3QAZ7i4x_7CuA6<$(|mC*`F>i zefUw+eI-7KXp4-$vsF3ddGwhp71|v&;aDoy#Z_MV(3L6-K#}2OVR2BD za(K+8L|#INlA=Ei0Iqxq9BKu7`-)qWABvm!&en;6&(&x%83geGhV5t%tWUO6C%e^)PA)M0Wz& zRTFE+hx?utoRf|}O-(pOb8##gh&|n#(9|In_MNf->IpbFl&$!FGsB9JAkziLsX=-s zVg2S2{NiCWcv4UcEk@p^Py1Q|b2$HZ_VX(By%Sm8Qk(71r@}&HJH(#%T z&&g9hv7eLtU;Fs9L@3{9^I?eck$;+B9TysQ%7D=i^&!~92&(Y5Od{occS{%E_7saa zM%pb3-A2_I90v zBCp3l9$s$i^XXb+&_}N*T{^7bw~QZx#*lBHM*4q_LGJ0U_6A&WxB`!@x5zdj)S zoGjsSpu)|N)O@|i8e^O&tACvaerA9EHRZHgHk@h}dd_und zme=zu;PNTC7bdt=H~{X{Ox6nSspYCyIaG2#M5KL(vk?`X9z*o#=rp@jsJNw0_=rqb zzatnYFj>*9(udn9^87KW-1jIYMF_V`y6xj^`!tqnLtDt#62bsXrZJB({VR?iy{)Hz z#ccK$`Cmen5K#ei&?27g%24eE+?l2XCqu559U?rvC$NlV*b^QelzQ>AsK@t7Ys|?= zpQ^|Z56edn7FdUM)b8&mhSc37@8B@C-+z>ho5b3Xv?+~e1`W>Tje~CdGb*`s0P9-{ zxXV)hQ%HvSh6ZlmB3Wa##5BiY>#)dRZxe)kWhs4649Ac1F6T`kPAZbcC(|()HA;I(aEL#rx8ckc z_>YRW;Ej{qhoOs+<2Mcs)<9h^IE$cR2O@j_GJgi%2_DS)`!}Sv_^L@x;uExQ7sP`p z{FYh6m1X1Ox(m6R_9cF|suJ5JM{{1cB|8m#QMY#4cMZ6q>?{cY;#$@{a$m(b4UH92*uTSfp@ z%Tx$k{ofy_E$nx$FA40lR?1u)2nCA*DCvIZN;3XBH&->OFA>RU7AH`AampYTF78PPc>aS_%Iph(d+=LOfv-ymYCc3YOtFwD@{kn z3_8f^S2TugkUvUl1?xhEMEswt3^J zPp2iMrp#Zz=SS3y(XBD0i5dhXKdP0j>ukzHmuPL(W^EepeyJ1NVFSuDH=+=cGIRw6 z#w_MVo+Q|(g}z{KUD}0U2YgjDEG5)qitYA3S8IUzn2}NAoMnJ5rVr$o)H(9Xt@GWU z?|R0v)aQ#4?T+2Oa1chre|E)7m&>QMdM5!5?4>H<{}-zQruK!Bp}PfN48`zR>n1nE zCeObBwyRU(*SKUv!^=WjHI6S^lqnq;9C$C*M$b}_4ZD~klyFF}~ zrwTqW^S=j7m9$zrF8M{uY;d~Lrn#dXIMHgfov3)$6$Bv zhM-_!&i}{OR{+Jeby-8xcyJ9K+#$i;oj`C6PVfN1H8=!!2=4Cg?gV#tC%83)@ZWqh zZ{}s{uWG7p6-9H;+3RdsTaHut2yN}Pwc|Qk;big-x+FscsfR4;*xFIP#@Vv)iJm~# z{a0D1w>=$4>~>I~eN6O;)%*79n9m-^SX2Rtv&&cD~_`3yKer%LtxxRQ$-=Y)C zQiJdu%rfSAcdsGReXO4-ZIgm2sG70gmP%nXw!}LqCZ*oj4D+0~>h7xDS$4ReDqXex zu2D{^ftZ}DM+z)g*JxBe#edOzX^$No)LlP_YjCqJv#A=dBW2H(BhWR4E>zJ4vQX}+5bqzAA zdu{Ke-(cHr#}6b^)};Q5z~^u2N{-m!#?-vs@VdE5KdHx+Ad9X{Hz4H#sjbHDYsvRS zQrENgrs+4@8>dDGQQMUg{$lF<@l=a7jm(~6`RA}2&9yb#^VryGO9U@F&nRgS>xgkF z^XZ5dZtu#K2!u;Nmv79-zIM!Cj165RC#y7i-kKRUp# zT@Af+-|hYR7?IE6=6K0cIFn`ga9J+aEi#Sqa06+-nJ49nD}8?}YP4lL78XTV?)t+A z$8lPK*^w3&F+}a=tHxP&CVy*+`YC@-RF zOA3F@9mxX?Xv;`^l>d&^g{Luqfa!=tM%jh-6KFn4ifyynxuTY$90wx3>N3sR1RK`jnnwR;5;&FbCDPtrKgr_qZ4 z(-~A;ui_+4vA{@VVZRSM=eVsg5Y0Uz4DwZ_+Pr-P7y$=(VV z?>vf`+)Cr7ev(&9e%g0N%|8a;Dfx>t%3jf`Pg?1rx-Kz{P80wPUSJ0tgL#{{Fd03F z$r|UG2G#4!0I! z-BxX$)Df+;<`afy>IZ=mxJs3fLbYZooP?4S+<`B^Tp*l0*^lJ1b0d(bz17UhZz}bV z@$F@S=t)4M)v-WqWdd+NPa-7WH0rz{!J6^sXEeRk;faPRzv{hE)}!PU|1C;eN#jQEfsRHsCcW27J9*-Z=NVe}(lR@SqxP@DE zZr=rMDkbTDCNf{jFPubFisl5XzZsQ&c(lO!M|RM5w_6H^`F{O=kUCyHemFik%G<_8 zg&kb=$ui_piwk^L-H1L~47Y6y?Hux_I8VeNZs9+e2;f;gutf#UKiEfU$pKa3uWYn6 zR?VQT$Z2WgRaId^QKU4or{w@LUA6S&5p0b*Jl`1<2Kj+qq+6A+4ZG5 zEsPDt%Uds?h@Wf1;X(SDV$y#0Jy3@)ce24R9o$%d<5~-}z{ajx(~s20Q6YDPxCaD= z(aCfTI!L^?VU)K#$wO_d9DExe@grBJP>o(s!y&XvZ-IhQSKbYjUQV)n_1kdy#+m8o z$I&TW!ZkV}>Q_);BN5^Gbg7G!>pm5F^Tiv?8qG^DI^49}Vh5P72VTFWfB_t?(o;~x zi|s(_Ib3IYog>4eVQQl_$?dE2^c^;_95qOW!T`s%MT^-fo{YHv=GlZ7V!#;Lg5TJ2z^N%|Zu4*4j0PqqHq<7$GdLYm6@YS+mO z1s1P|3)F^yb&0dct|-%LJwB_)54bcDdDoxRRWfEs!uFMD5P$+2^#-3fRzXG2Hh(?6 z`Y2wW({G7=^zNu0GkkXr6vK%q6VL>Brz%lCnj*HlGPzL?w*A@VTVbzMO2ZQD1Z#n6 zli=IeI!Z;1?@SD1-rSOKx)b3*bG@JdZIz|>vkcjuiMB$b+Z;OvDplqQt|dl2!#IwN z!PmZOj;cfNn<0}8pITm!<-(ojE{2(n;UbL{fD*f83Q3qhoo;^`>>a^m2Jc&>VrPk` z7l^Dm37gkGFR!&_G_ZS%!fQ7wiJ38Rv>q3at%2<=l!YYg9i*Harw1A>|G0f_8O6;~ zwjOk>M2;9>c`Acy(nfJfb<^fI-szh>$mMlZ!BpidqTq=D>+uhb%+MX7y%GY8J%)|<#8O!f8 ze|ez2KwZ#^wjHRSzFGRhV?E&qFzCU7GMM8B__%aHq05Iz7}9(%!|-=UB3WyDTXup! zWoa{U_@X9(X-SOoyz(|g`-925gMDRT!F1S*w zW*X2=Ze(&eJP2hPo;KD>(3E;0*n8aO=pMI{+@!?FsMGPn`n?eBcr%8cx$gjnKgr4{ zOgTE*JxB1%CGE+HUF$K(`+rFL|9YSW-$diML7H!Z)fQ%cCw38m@$+$gr?=c3SLbr$ z$3}y)ocOelHV1MYU8Rugxgh;AjLNYamV%rmk|1A43bLNOQ%|+%-QD)GH}Mf8D&jK0 z=&#`sy^ufSenOBK(ndJX`ntQI{{V+hLb#4I$g$5Zy!QbENI}Q_mV#B)xb!g0%Kt%b z@Qdt&WJjSXg^i7_Oz?_}iWt0(SnEk~LU}e-IGXYmdNk=Uxk>u{5`i}0#MS+-_YXBq zz4-ukx2+H=vK}A_qb;VK5CPPF!1Y_dZu@0)0PGPq%$DX(ux=U=?84RVsgCYc?}cn> zO%JxMBq=3_$Re}Ppmug#LmI?ZNSffq6vPIVU+TQpI4`6QE_Sdj# zum$%*7)$c9Rl*Q5zQvu-O)e4OM!o`!gHS=$M)c)5Nc-RQnbJm}K=+Nb&mkI4_{Coh z_HF}1t6@hHA9K4A{)hki^WZ~UnH&4odh)}p75t*VunRbg@hDQBOz22$$hTsf_8Y=s zSQKtp;F)emG65eNmD2rW!a738#pkm_#$N%Fk)%1e1B*M=Nv6W#BL z#t&F~AUE=88Z+N~NXCPEAAtJ*%N~J{UWlp!tt##&5v#fM|B`tQ>#h`TAC-KAAK#E8&)tgTZ46!6!o0(^wPiy5TA3>1{+L( zo5rSAYz*f{D1mOe7b}YG;7ODYCpvZjEZoE`JaBVMzls1bkR&p zJ6F9U|KOwvcrH4ewmc1!U>oCw(FM9X` z!7H77O1v`&hv!Tf-f!~JpfQf$q$xh~D~5yriPHXqjvf}=VcADS6n?(HIvIw@K+tGq zZjV=Da44i=u>am{VE!dp&xf#n$$7@52hF7wS6ix5A^k`jt-CD#dj5^vr0VJ_)#IvC zCyT+hB#&}jjt%PDYx@r*$jUaq`7sJdoG?o0J%$h37(>hb=>c!>r9Q=4ps#Y(#Dn}$ zu?2lnph~+8Bhi1xG#(%R|L_ANm_xOA(;+={s+{qMTG}>5@ZNp+7XN25Bc>wInD$Am zE9Ifkm*n4@pGOHT!B0;jg%e|TsK+1uC=g5U!DDt==t@2}u}5+#Pw!FR=KOJ70%+z} z*Qy%XR^HRZ$bF^{#s(yu{Cgqh*`6jWTb^n2{9lf@Jdx-o)a3&sMO5PjlE2tk zVox7%3gRs@g3GeSw>=`=LLROpt^V5_{;`Ni;8~XQRh3kVhV@uwXt>9}OKf4@Y@S@j zj;>T#!!&P%doXI@c50|~x~TPsl_2OolV~_~m%17zyxZoOJGbZ9)xGQ(aobi?YpbHq z&`2=u#8cN2GB7MpBt>5feMsceQ>p6fBD4yDpXX(3%8o2TpnOj8XRZmeOm2D&;5#0Q zzbXi`o98d-CKNl}OQg{IsGAyR#3jr~S#BRb68oF`BHnmVx@LjP< z-OW7p_u2Nf<4oOc!~%RRD?nIqm1?hmv-3}mO_;14;m`{8^=1`aISCOQ%wN@9lAsP| zQb>#L?p<^E5+Yxs6KZbUD|o&^yQ(CuIjtb>k)d2`9p*jF>m@D8jBR|12|am88xF55 zaMxRLdwY6HYHEbmQv?lnB#`;iKJfLa|5_OabTvK`63C{?3unrY)NGm>ztd6ucXw#? z7PzLS=GS~*$&gAdNrP!1l90q9o%&!67n10l!`gCkd3Zg#Flv68KG}$4?$RDht;*6Y}wccK+L*#BA}uu`%P@C|U;y@=2Quf9Ty3L6GN3;=^km{jbPs0! zH}-vmk+2SrkI7w#nBuAOq9hPEjMeTLKh5?2vgg1nv`U~ySQXuFxdR*e^ zONf#D|6=zKd}G17>(0wgj_oz=)lVYyeC!O4L{Fk@b|fw!nM$u&31@pLe>f4cc0|{@ zEjL}kpXeTceKV9VHf2`e%~a=u}31?SOX6tA?j8D&Xc13E%z!aZhEJ zHn%3tl78ZKK~zp{t%}e-43DqRoHTgw@Aq*9nj3H)=lX+4Kd zHXbm~iV8E+;hD?CGbOA72SGD;C%9E4ix2dORD}ncKf`}R`nO2Cz#zTM;AJnS*)xkH z2OA(Um!j;v_sJzgmjn6s4|XXru86Ju^^U-{P1=IWZ<%)l zq#p8mlYNcRL&O1rORM@_hktf_%Y^;|&TI{9Qpjh^x86g#3G~+~A&bTbF`Y?jf7yi9 zc6(sDnv39Y&KYy{`@eJ1^2iLP+?BS=^0Fbu#Y-O*%PS-?S)~4u<$rymUx9(Hhl`Gs zS7?DlMPBleB!M>|&^$UBH!O47X0Bp_<@+qS1egf?_t4}+1V{H}^Q8*B z@Psay(ipf%fK~X{X&*)iA4(VHl7GovFLhRA`{t`%YMqfw^ga7KFMs*$kxee@Isl(_ z-l4sni2Kj_g&l65%!1A_r*mWF3t=#S|1Thq!Qwj~LHzfX`lnBhb>_;8H>C3@Gg6#eum-2shK@#+Mf0LMqYb?08 zh!kJWZ>IU-x3EIX;@hZ&18SEfxh0Q>#hE?zsAms=-0ZJND@?n7n`t%pI|8B?=FPs_<0GO5fdnq6NmUcGLZ+0M&%%P2xO9WFqOb%y~g4_z;^~evk zrFzw&pRd`gB^<+QzNyuH!XVb8`trZK{$CR4@&si>W<}>-bXf`-&HjzeDa^^v!eAuL z-10@&uehc$W7}keJRxNR48;zs{p9NJ4+$tg<6JU)i&htJ+_q2bZ%#ST!rieXF;8g! zayD^|?85Q3l71a$tGpBSs}{zkzU+h2& z{y!L32kUl&<_%mFEI5>3!Tk%MP+;XmpWg!Q^&I9R+;3o}+iM;JQ&O5#=1H-$3%0cq z7RY{)s7tT{ojf?_rQ7e-uG>m7tc(fN^NyQz&7>8a0o%y|~GY}TtWFnEknlsbRp z(-`d%vl7*NXZ~-qKn8>0x+ZA3S_mH%jbS_FV@Pv?9zige=dz9nZB^3|K3Sx(A<@=Lz0&&7rz@!?e$O5!?s;<4aHO<>UHIB_jLP9M zG%g&~l!=VhY{CJOQv1q3Sb0q=s2j^-#qShSk@$+aytwwo%#YY_i@H}zjz6k_^WS7!QDR5FYt9S(-~?R~%i@-T zuEwW6S`7KZh&%Gz%6rs&tVWBj*KLGHho1sq%l~a5^Kg9|g2=>b#ER!UZgBs#LMVV3 z^EvZPfGxxHUw6)OBDAs8m3-1^P&PJWB6cFqR(HV)Q(k)w4fg+cdcb6RC$i4`m-B24N$nV9B+>zLNy!$kJ zB=muQ;l^XYwczMp{w+DWv>ND9)^27>vc>i`qOQMZ%^3MZL7}F zVK(Zq^=)kFSZ`{Ji;Jrp6K(EW!wa%#OmNL2Uc;INl~hGUV^@b=RUE*dOl55~t173< zsi#s=4a6O#_Z>JCbb?raJCgfM9P|l{QSjYnGEF18>c3yjjZTbE0(v(WVE;^tti~t( z{BkCKBu`-%56}2%-1+rGL3`=Zc+lv05I38t4;;P~Xt)+kP4>0}+Pj)8 zi-nP$lU>_|bWK{T=3P|$4LAk0VP<^qw$4gIfOye0p5VSwC5h{3Au29|ffoT(EJ`+- zA^}WpQ01tOmi$2=PVos%d0Zv`aUB$BSQILA;?fJ#*@_c~XQYVRgVl)$VY;jb#>$gaLbmbGz@sRZU#XwY z@3X)N2)-0;bfEdWO<2Q8nKvMkBt?i4UAE`3OXJj3FR?p5X~4BiHvG2=KPaq2PF?Y4 z6~sL@bJq;a@xgsz?o3tJ?#*i+9N;!G1R$pl;H<+wRE-W&KHZPJ8T5D?XfH;Bx+4d5 zjefaDJTYIfJurT5<{zl*&1oBT!TFsFqlKIb51M~r*PZ0k4cx@+t5AUO7VfeJl^hT2 zTgiFRfXhB%Mb>omuIQNm9DTC*X$4<&8^T~@lM9bGxtrb3ZYcdGqLMxbu`Qy4HYV*Me*;cV;DxJKRe zUGAtfPN&go+z%D)K3CR(@#pyBux&telK|JJHNfTkYidz5I%UokdeELG`o;ma$|tx3??Re_Hl(GWtt)k@oL|eCu)_GD>An{$$#zExKL^eOV?a2|U-je;92P^1pom7*7 zz8riUtEPSvo=p`r6S@c4!WfHmkGUcwxUpT)K+6^-+)=0FKme7TV>^jPZ`@O%FJ*qTKKw&^b5mEAT^?Erq8!q?>4v%y4b z*s?l?V^p$%vGqlBZL}Fdu3T1-^l4&_zazMEsfI}Q^>D4$gC-q?8!B#I&{^#>WNCy$ zqRJBTa+(gCMx-{~WwpjZ*cj2;3gx>=gt==cbmr6hlW%wP%V77Gb8y@ydK$VYCb@7x z(`Wux8BVRSmZ&q0IRSwgftlPDwiPy=DE(`_PVdL_^^uzsceUhT+0vvdHUH*>0bekwLg|IH(l@(C`|W z=&O(i_sZ?bbSX-E$&i5;OlnJQQs@8g$)83R^d#EKY`582CP*As_5_AG^V_ zJ}-Nlf3GJLAH2FpfR2oMF@~H{&@vBzlYP>Cgs$9Hq}1$Y1T?zbk}?o4?GcM~Ly?M# zJP9JCQ<;vVm z)rC~H4kFa^&kaT2lKokA~Qd%!KSy&T;HF+{!5v%2zXA0^`4tSJFM?V*F5qx50 zf>pNOXs;9J#+)%SJhWIdz;3xd;(4b+-%}j~))cfth)XpB8Oo9=@76XsXzPZ=Ix8Xb z72C8HSU}5p-PbkMYI_Z`H(zC^Pfs&jgkF*@2>OMQI$ry~ZNAH1Tj>Z;>(|-sN4o4I zpV(x_G61Du;D2MXsHR_2ti`Yw(?;KXDuI>os_(v6)xY~bB5a-m$GTtwSsZe-iEmFH`BcbH*59Evke$w|ZpO`WA+ z;FxLc`n$y1bw3ob%F*oYkC&h5o~_O3jECOMkD6G&9%eslvI&tYw75Zq^S1`OQ>2xW zbzN!s`YQssL5TLDfq0f>;qR~ClK(pY?q+wHUc0#)Hn4)D4r^87cSTQEB^z2EABh{U zMyIH1?xc$suMgX=q3oQ`tNfZHUqj~2d;x9P1|GPyoO5rHHlnNuF64btgKcK-COv&6 zNtgWZ`AOWY<@vexGYW<(OOM`W4r($QJ5}Cd1~XXoZxS8@pK@`WnV5qjkRy>;zK$Ee zUgSVgAP%bV3~~upnaW3Ne%R~+B)W@9CUL|3;G!@%>zj^)#Z+n&q;@zStP3ey)(r}E^7 zx%aKLRE91AH?@Zf8ykBlc!V!!kDYrATt}5Phd0P=6Wm{39|N_qzzLc)bm}PNZKLC- z1Pv8j4cn;R;5vT+yrn=Sf3-F z31|S}7=Sv^L%u}@ISk%mMvCP-UEH9xZsyiQ{NS6HV{tWac0VsdjNhiYy?huNet(-k zjH(f|=Mdbw$8SiDjtR=xb$}%hbAPhk=#GW$JeCwhMyl#@K~}ZyKYx?AD!ajR=mFcm zEdsgmq9o}I`#{pa?Sl0#9D@*xg6aHpg(n& zb&w#IZy>!09ibE@>erXjFYr8Ji00xxWq63))>Wb^^%$|W+xd)<~IA*S_r3=7;aQ~B~t9dI2yZvQB5L|&FtEe$xkSfUstKI4!giD=Ir;z1ogdR=3+kQrJO{!vq&2%0&6HF$beYJ!J(|i ztfuO?oUuC9n{AXI;E7JbKj4tdo(@^dULFuMVS5`9NN_Z4c}>xO{Y0b4)=y&h&{AN9 z2B5GkYCuNc)9Et$%GGlm{{fA@rsMuZL?ou(PKV=1tD!)J!(Pt~;iHHy=xZ?9IAi9@ zRh|7u;O~CDK}Wq>TDF8P?FUhldEsB`GzdA|)H72=|!m*|51@=(T+sJ%t%NPI!g)Y{l+KJX@V`pe49Mil`t?2Aw$Z*jdb& znQ%}0x>Mv=_J)n!K-Fk=O^tGkD;;%5q5}74JP=IPf>=X_%=z1e#Cg*`ZJl*nWQHnt zvp%X>#GAg;o#}iY#mJyHwJoxRMDX9cEdL#|fd=*y4Ud z(hNZIKy|8UR$c*O#)~~yTm3N!C_>Tj!R`b8!>EPW zrbyz%JLNPJKxiL8jG1e36ekrtG6LCO2k`OdjwpxTlfr)g{(XOWd0A7ZoPm{E0tq#n z1~A~+W+cZX6WyqIjnyhw8Y?Jh#)K!i(Yn=_A8ZpFv>*^digQ-nXtP10=&6fymFDq{}+dGLE z&8xb3KS5Rt;H9M!v;dl{yZc?B8eoy7dq!L5&0 zBZ}Yd4_BrZ#3xQo!Qf}6T|`#jDhfaaJ~5q;uwkEN+@-W_n(!}CPHZ|3Ayk2^hCVdv zNWETkfF@b8!SB}5yr1WsJt$em0;D4_bN4ys+`?nSH*!vhp~|`Jsl4BsrAM+Of#7lU zfX#>T(ta}0XV}2kbmBblqfo*kVTZY>t?R-55%KmD$c_7rL^GaU`wY)+7UsEB(W2`e ze$7UY$OQN}_5H=dIi4|Q(;$s}9c&SsL-c+gv~m8-|? z`czIFSGQr3rhBXfs&JMwZdm5j*p-5YTg_&q1dSJdTVl`l;z_|a3^OOZaEm?~nMkM8 zB=UFNSKs#>DESu|GaV?lIO+F=uOKYf-ocfeG2U7DyeXaqS~L58{fc3oq&qu1QLiH; zr|WknJHS1%zV*hxQY=pASfJ80Gxk;y(>pgNmNYJM=5=Tdu|VOJfItBkBV=e?DiB-6X2B-Mo%Z2R2_$LH9=*Jaa z-@#^Aa1Dvnt!+cSF9v-2A15=8Fnf#jA;S`IXRl;8-!Z=JQMVKvK#jnzRBIN7rxNab z^Eqi>@Qb97mG*?@T^%j-6xru8&K@(D&%S;r38Gf^ZM$xmKcx4$Lxse2>=8zXP;mwXQ41|uczQoJ= zoz9k->eVmTkI=t-b|@8fdfkIM&B~(|aJzOktbhVt6TBQTp5Ou zO@E)$7);81Booxiyq96PV$6ir6s+|u=%h)U(3ZjZxs0*r@HseP0&|#YgUG)I=C-G} z-v|PYbr<2H{5n!h!N-|=&GaN8zDN%hg7E8*wdU*a19Z<|P8<3r5bk`UiH<7P1zzTpbO;5QQ{Mff)4;{S* zshf#VK1y`1`s%b$EF4zr;@yOCWv|pwJOD;M+d!JBJcnnqC4!o2I$VnZjheC74xG)C z@5h)yh&mc$_iCR!q)2&9wh#(VOb-62WML1`b@lMAtFG@LC4bA-)UdPb%6Cbm{npFP zEFqAm{;iARGB0^Yp$*m0?RUV#$Ax}e7iQS(0jX>pYr#gn_i#06uO>KtuQq8=;tOYt z!k&5(nA<;%04Il4mz>&VZZqJ>hJRGmkcCM|U!#R$(2(NCF<)iXSdjM`ceNF#B#H~s z_&h!R74l^4PMyfdp3ls_sL9ye2ic`x6n(MHm_041%6>obux67!$2*~z^xpWP3TH%gv9VdWOf#2^U)qlbP97a({Hr&HQ!+X1a zxt%v`q9_(so^T@F8GCG_W-^{to{7?j3}+!%THf%H=VdXO6$zPkEo%rvI9P`Q$_u_Y znoeFGu67v0KGr4^1#YuA+6IzQJWU}1VKPlwvms$J5&OJ`Z-$LHq`FjB%#ctL;4~ki zK7ME_Ul#Of9VbGX&=thM+_|lfTM!gR!8#q65=;KMsH(FnQO4&A8hCjQu5vuM-W$mE zTgj1jH~=R9xhc8?zvxuG*8A9?_)u1L_S|ddDTShY5v3HpOqWfZ*)ZuecJ+M?5^!dL ztfvSlHh=g=V(m|mQ=~N+U#BE1#Bszo&=4bKgx0-^W#2Bn(`&>8?0=wSAQu z@#*0NzHVW}!<5SL5U-K{POK$vM(#PFQG@scZKW*5)5evTVP#iJ z0w&A*L1TVU`B(;515Meweud?j<6MUF8hw?dL~*C2;%U;3{dkO>{guyn)$Q>078*+# z`kBsqdp6Gi*|MZgZoLZxJVb~1{Vrf$`Cjb}J5-gYa~0s38e|u$O2tlmIUB2&ETYHe z@?8P~V>Q%}89@vKSoQN{$I8G{FC^U@V#H`rDSCeb6tKfY{~ zCah&2zU;t{lZm)13g)5xYm+^Z=E%%sh{zK0G|c2vIkBG5GMd~qOswB5EVhQK4NT7Y z!?AkgM!j^hvqg1PN2Ejj-5P)Rgwfoh>}Wx_9KYageIEQM;SasvR)dMV&POfLM!>=`2X7Ps8Cqmb#ssvxa(aKZd-5<%fy}wp( zA+$AL`4F3Vg)N;0t!Es)QJOKq#HBay&T(iF#8ZpXDAvhd_pU=CfA5>{8Ha8%&Z=4^ z$k{)2T?@JXp)EG&`H9OG9XE#eUh&pWs`mwR)NCppZS>&cW61JK=qQ(OWqL5eyW{*Ay>iI{Rcl4Rt0}8H(bj~9mM1HY zlxs@;VkXxPt1p!4p8q6}*pUZatCPD>@KV5bXm-E3Hpqyp&_ zaYy@s>RNdO{Lb&872T@e2t=jIv?N0;t$CdrUtXDyCADR6!*jU@9=(}dyc#~e` zLbG5too@g(F)0K%zRq++yv`b=#54M!0eTA0FN2@9p2VWIUSqI|K_yOX+n7L3UDLD# z`85CFa8jp)V#v&FU&3uSUGLQ;qP=8pcb{G!B^l9^ybq41mt;TecVS`hDN>oL$3%tz ztMS0Aq#*Moc47Zly#H0nAx7BuqZthw^$c&g`WI2VnFLZ&s!(=AQ8qX3`P z`Hk?%UftD+=Ce(bpP{qD86)?a)=@Vok4})UE9WN#WneiDZ%)u})!=x8sZaZgp^@4am zILl>k*DNg~o&WxV{@eaiQ$jGkjL)uzs?tH*?+hE6B{dBh&8wr|v`hug*?AbbF&vCG zVMn9tsq6S+?I?DhY(_;Ou7uoP6E|1m6jS)gcP@3if*G!2i!2t)Xe;>XQ?HY4dC-Is z;@YGE?)-kGmocWvJ)*6IUgAU~eBfTKKkTR(JTeev;JV98)Of1zczJ3>2=ECon7T+< zM0DtbPIDt30z6yM$_PG*o=b=G`RJiDu{Bp<6y#Nj`uZLksw7(TB)Ejk zhvypPma7T1zd(zJmNcZ#HW1In5|Sx%5+E=2A^nPhw9~ zHM$?O>UrUr^*lq(K%5}~oB8`JykPTBlS65i0st778og)aM2w3c1N;Lsl=Cz@?=BrS zkGZ`4E4grxkhe3}+F|$kdEnsW!x&v8A)OjsjZr_LYDGL*3?Ziw850+)49MXYeXGt) z??;4neBM|9C5xIi0Rt(x5Dl${t`fM=>*U90ot1xP0sLXc06X>{Iwjes1Hn2Dg~od@ zl9&UFMnUy7w!)mw&wnm8yJ(9{l~6W&T@x0t-{x<*HnsV`$|XKb@lE-8G>WQ3m}H8n#&8#n1#QZz zb#s5Z=d1v`?5P+8ziY@o&b2Bgs#|xA{kWBu#P7=c>a3z}vKdkp>#OP}?egVS_5}LR z@!3kjV8S}3{(ed14e7XTW5eIJ!CB4o|8W2pI(S|y^HG7zHw-v`TrG(t?bRxCR(m@0 z6EbEJGv1_xibGB#Z!gnPVdqP(TFzG!c08#!zd-?fz#ks|kaW zCoapEZDCfYw1^nNm;1JWKe&|W%XedN?ItSvg|0p8xS;X;HP8UHGlJ6D? zroDhR)2hR~9p0WAeWUwV;61rL92Mt01x#w@=mtNAL`M3#e9nNQIo39qy4(4nGfh6! zRQj6>12eW)jyPbicBex*6p*8D0`Lg=kEd~P+l(S&i%f-2au!You!*ta4N zKRF1Tg?<)K65Jh;(v?OWNAJy@>#g{C4ncwd*1`$NYu1Rnk)clXo z2C>`o+T3Q?57ROb6umoDKJ?LicXpE`x_O;cKKSN|pBm6Ms(+BD`Hy`vrU$|hn)UoA z9N?9z$`04NRH!;|=|OF%81&PtX#YBmJyBhTL|-^awAgQ`O%7>*~1rg2dDx!~1*GQrlIE}66T;;1~)jU4V#QoVgAlt9y0!cbMB z)g$mh8F}iNK5VQ7<%;w`pcR0$CWk-5 zqwT+1>h@oQaBCj&;8+}+x23j zUrZmRU~Zql*!2703h`puNyT7bleuJ%cE)n<`QCA^nou5>TYU*(E@F39ymiv3uB%k; zzGrt34{;3gQ7q(*^1hFd@Cy$8FD zds5kAq@WNIC~-d#)V($x81~1c3`|6};Lcj0j+!kH_FgR|d}HBZ%+wNmx=IVEk#8C` zQ!I--FR9j7n2=@ai<9(#qghTB*8F+*?T3wNw+FcAcpnZ8rty@<_q{SCC@ARA>*-Ep zLqx!E{Y(m6S~|@UxrSB5WD~@4wgDF;IYm3WW`^n?%!2>3Lo`6&&;{d^*$;@;UPyU{ zt7?{UT}_wtL>gkkTkpzcqNkYX%BTmgwA@<%@DZONWxrAp25Y5bCOWEER<&gG%kx;2 zI-it-NB_&5{jY7yLI&yBV;4?IU!?X<;sguY7JsIjDg5gEar4V7zp{!7_vfr=5lOO+ z_9xYDo!DIQK{=fG1u?n`72aTs_IEg1&%A|nt)YfzR23Ol-iz+9I#!esa8(_N z<|$wlL`%zN(&IIU*B~$e-^s#7@D=iE*n6zcH}njb6zL#{GzpLFY;1TC$Ui)xO$Llic6K-+xJQlo zk@T|pO{O}(ukmy?9sUEEMxw+C#>{pBil1%*x_h;JpX}cv{>eooco26|5Ll+-HZx2> zioTw-GHYdr=21P9QArvIGc|MrIj5y}P( z`3(qVPeT(JN{pnS0Y{;kD~OuS^Zr4hDy4tIn?;}PwIp0o*I|BXRZH2YwJ2)gd5b;6 zsRrXK6%ThYZ&>VH(NAcqeeobV^p6kIvUsH#E*WpJ^(3syl*#htd1-&uRLXO&Yr8?s z=V;UrsrJMLk38Zw9qhcnfUU?Uuu&JU%veVgd>EA{TWHJZ8Cj#Un;K=v?vP-Qts$D& zc-|vpeClN5S5s-fyYOSW_>se()Qq#h-b=`E*Q#g1 z(sLkdyy};&8RGos;gqBP$}(kuAJ}UkDh?oRBn@ULyiKZU8$h(5@&o0zWepF5PLRV~ z#3<;;c!Lp}(+!MXh&(v8eA4R~n0W{DHWx~qGDPyU;4 z>}qK(Zn^{V^G&94j*(rj3_po|1OANCB|4tJl%t>BH@7 zH4aF7d0{S53UbrGzU;`SDo zuA_7j+?^iOA#-7#GCWKTbmp9?*jHWso}Li9X_F$TPvatJTgeMWS~(V6m&e^?=SkWG ziTCwxPDcvl%ZyE-?oJW*v<{zeP+^e17m9A^xrk9p?I>`o6H7B$M%4Ap|9C@rlGxco zI__=PRPYQgeG5R=L`$&s-6d7X(k>;{A`eTY zpHTQoKJEF@Zsx|B&r8--N#O^(9B7gkcfL07UR8;=+8Q*Y`6l(MB=b4>bN;4ItZim+ z^dZ0D&iMJKNIfF0rVeg9*rb6&|eLdDnMyt3z`^Wf};JlMp}Hqzf0$xlx?%hDn5 zv;<52KS6$6Y5g2FfRss6wOys{U5_{FY-ZzCN{k*=7x`oO+Y^!C=l0O;Ek&8rML?1x z^R4pYW%mND^0H)|`ZRw;_2LC>+B^)6j`~keD<8o{Ydxw>BM!EZp|_=y1{?vII`phB zaq?Z?pb(FlT+?JBshrl;sK00MCsss?$>0}TbFdp&^3IQ-cD+pj-%3~~;k<{Kp*8H{ z1r=~QfjHqs1*?UCaz?r#!p$VR3aa(_iTsdHkT7WM47WuDJc%`GreV&Q`SH8!s3EYD zr4_d6jD=hJ%)s=?CYTgwNj_)MnMiXBuDT{H-Z#{*V~te)A6xGjpVzx}3%6<5*tXfY zv29z8ZQE&M+qP{qZfqxwZR@Q3_xqf)_kP|_`K7r(++Ei-=Nx0qG3N>*#$0;8gKTb>;fUb|;{G|2s*12Q94`k>MJUo1>+^OC#SXP)f}6 zgo6VSc4Mh_kgti3Ns9xq+29(ps|hHA%el(ez*tnS9@g)=xp>T*WHe>p!L6#%mVa@$FW4JRAVh^L< z2%D-PfImz0pE7u1w>jGDKFpN&(2vN5^%(liu@BK!uis9I&?g@v7c#;K9Ji@TjW(E| zsW3MyJ2s%llH$GEQ>U}Gb4uB|(-r?rw5kD9!F2pS{M{xU+>n&Ls~ztuF}^$b+C$)+ zLET!SNtSig}qoyq$fI5CT zmDbzp4Z0)DFDTfZTUbb1VPfk6eIfbqaQO{4^#&z4*WQ=d{X#%m{OJYtwY(pKI`@wV z_3weUPPSlMuGaI?!eS_IcjGAfag0@w$HP|K5F7bcV_?3x4*YMpxjXB z`R3;Q4F+=tzjYIW_{Tz>Cq#YqjQZ-O)PmTQQn;Ba^QkYIivV<(R4<%qd!(D406PQP z+QZLEPl~M7TREE6OT>wWW!et=U5UK#TMqTqxc>&sfe?Nhw#oh=7FI@H??M)>1*ENX zWL&{`%kqu6yQ4o!CJBHSR(YT{j zSgDSNWfS)k_`b9@$ts&!07`=1KEw3AwMX-I0IY9REYa_Np`OQWcR^@(pJy}Cn6)FC zip$vEX8Yo`7vQXC$W1+0^w=+`5o$!X(%`MynPlzx@uml+U_HqKdjKz87EHxn$#`jUCnf#$9zid9U)lF5|*u|KD5 zj4I-^Mq@vkn)XGjVe~@x_erdal!-SQEn;=o?e-Fch>1!(pv5)e;fwu}w z2-EdV9$zN8qaJ9p^73-6HgFHHLA-dQ2cdf1JYl>HqT!gs{q`@XOcB0B82%JtppYJ5 zhz%k}+!pGg7YV)4?!#7W0imX<@;<8I(zQ_qk?AsAaiS^B0LlUt+PEP|Dl!3>3ZdGc z`Qm9Wr%4OP!ubB0uYSVs>#zf4mk6MN=xOs)GFdelJFb35tk3M*mN>`X@W_y@Pu96Rw+K8*ANG9vFxlWH-Hq z;65Q*KfkfX;})ZHbbd;JoxnYQ#u7OY#eTxGnEYCzF(e^wJ1i3#74$rTONe8*#>_g9 zvqJ$;iT+wDb6zh%k1A{U6&&XS z2TgWA;rFh^qf`CIJ-@d@zr1}tGEcc05mjNAkdPIN_5B*2e%cuIHs9wJ)B=tzJkp9? z;V`$m)R@7s=ZSbykA5@_ZF)cI7|dpa8CM$$qSCau@IeAiO}ypqscF9XMjZ1?Yktq% z%=1bW3-;Ir0JiQNC@(=PjdZyMX+#GwgI_nA2;S*#PZRU}Z&xcZ;f#!FS90x&FXIOW zYXZ74^W3{hVca*EwY$49~+y;zl#K;=>vR zR@VQCjNZ5P^to{?BVH@cU`BYt`Z9xnGY8vIdN`;jC5`}hLQ+!l$F@it7K+mz{orzA zWlN(oD9Afu$r^^IwfO5;@BWciG=NI*p~{nzI9m%OyhqH)LEI!mw;sl^2=8|YwYL~n zPlx$QHlx}g?K!t`4s?s}oB?`HcG%9n_831;;WICO=9yn3Ub8d4zvnXF;PJo2Y?u*a4ZNeJZH;)`$?34hf2jdkz7+m!;q*KHvtxoSKJ?uI5;jm zAOFF&_cG`;R3IC7xU;O@z9d{|zm;7G_Ppy|#x(sHp1Rj_+X|_^dLX~Bm_n~Ee6Uc{ zZAD;Bcm>szeSzVm^7dOxu+Y=J1!q&M1A|*Q4eWjDkXvqw96MZzFsPGk?K zR`S0X5Z6k&JR^9%hd1rND%4#h#Ueo46=xq(HY@5BaFnQRu%|QUhslm4Gq46pQ>Suv zyw8k{j}>R)^MR@C+d^J@^9oh9k6cVh4)m5BtiSzYH%LI|N~c}4{k?nCMTcvms#6f= zl}9M+gAvZO0u(l@p9hPG)?B?)w@0ca%LO zrh3bjr=+Z9zOK42BY5Z-OK|;3SzPdzfvW9owU1JuF!eYg_Kkds3nmvccWy zR|Ve*`M%_d3aDKKo)5cilc6g6lt6_{(63iz+j?j4`vle8$q%|H2dawv{+GzrpQ|An zfq*vuiz`gwdT1ky+NOx@?HWHir>m(Meb2}{E(%5TbHaBUAyqW1#UEH^Y;!s)rjbB< z*{x_v8?-GpkHDZ)_TVIM_W8>v!H{vU?tXi>zc6F~HLl}m%!)@-lggZKM0VyV{wNer9wc0XWH;8_|eiHJ=v9aWxgZt?1O6EsR z{nu%q%l`6~4AkIRS$ofh&Tb4055&QUXa8L2)P>-l{}Z3g`{|JUQF5rZv3|noKauh$ zWK&fTUcGaFkg_>_Tm2 zQ|345=)o1KNol7Ak%7)N(+(B z)`{E(PNc^xo(5U{8UsVMeGm#DdX{n=&O|wX_}$6y4vIrnrW%il;|pRTqJZ;ZAvj1( z$v35*L}r05KU>a)-~6U?YM$fTZ(V?@T+0`$|E~Z#7k}s5#0!YIH zcW^***#JzT>zFlwJb3a-IpqSyJz`?>+Z=euWQ!%`i_IWz_00j-(8NSYX(`ns97ApQ z2tPjPPMG);24a-|zf)+wb5OKli00?E&<<_|2my0XG%DxI{VuBw{3cD^j^jT+Qq}6? zuOkEN+hCU%tZqV0RAv~JKZ~HX&-VJ?if(4d2^`d^N&(K6qy-D+pDNC~ZsscLZJdWu1f-bD7@n&jhf?-!`sp(mFPD#<#q~B{c}F@FK88;$EShumFN7VDqn-anJl00 zlAEab+s3b!I5qwOhebdyMTp4WfP2_Pml9uOSGi99WiKH)olk{r{y`&Yl;N`r=mYFL zE~aSb(@NYnM^UnG6{vTQ|K0xKL!-OL@&Y(f$LcvguAfSp_29LtOrR31R=yAtlLqAB z(3z)cQu{Z;>D5Oi)?!T^i?TLIlf<2IQM?zsec~!nv;9VjsI^A~unmx5o>)H=F|t@o z%X6SQU*qb1Q0})tz*sc%v%DT}x2`I1IQRJ5Aa6(#k4G;_SsnV!pwT~Wt^a9L{sZ_3 z+954_w-O}A6^Umh%SMuu_dRfwznmXMk(^Pv6x~kYnxou_y17MAb3|`d^D@UR7%wjm^Cf& z-B(%u5^jW!RLRn=7EmyNR-~-EjN2yX=CX-{WuU4_1C6UW7V70}WD~G?$}jEIDQFAL zjl}qaWute!PcL7Eb>BNJaY(iu_GLeep|;DAs-uN(^;=r3getFZ97)*A+}d+b=5ns=d%~&nYF})$F@Jmsq)Z5SNx&;KTZWFEnwrE`Ao27H2w9*P;63=^6#o>}hTdk-zCS@*&|1i^)RL zA_1}bOT=)1juBmbAY73RKF;4=LXAg~gy{QxoF`o1 zRh8tzlA*O%e(H})HsC4bZT9x!K)IHImWS!nNw zdmZ$I#xTG|?G4ccX|%iWn?t3X8h17_8Ti{8$chgSU{uTf@u^lTF(Y-d@KMx!SH=wL zA%BRbYLQH1#?+B>S6uc5(!*{{;p+s=NWif%oG<(vqvA-f34!f1?X!gWCC_pi64DML zDH#WryKxc4x5aTWb_Kjpi&U-U?Bg{i;VM-H^6uR1WiNSyPM!Eacw3t7yQ9{jwLB){F73=)1C@pQZzDhI6~J4ry7&U&F?Vf~7* zvue^2K2*;vGSWl>Z|tUux!@lE{p_-GDCqlUomLvr+Y|!$o^Lun!%CcKs}Ec)K|}&3 z`YR1s%uy3k)Wiu1ye;QMzr;T)nmo@V<)}WzxEjZDdbHmv-6s}Wwdi(04~jZb(!GC3 zED`#o(}cgpbS6U*r#-0^V2C&s~~Z!r1I{KkMk$Y`9_3wdIuf zrXFMLuT26hpzbzAdpXXenlnp;9gcA9%u_2mP#AoeFE(hQqd_D(f`oXYwabe}7{C*6 zL)AbLpy4J3U>*@-OHJlZU01v`E!tvRr<{UL!O_O!3V4#qfAdHeP_(U|*3}VI&S$GF zAvzwX4Z|8yj99sZJRI_D_12nYcN*fm3GwRkkGHFXZv@Pex|k$MOB%Px;-}FH6qNTI z9-s5ZmUrxi9y!Nso+VI$_FtIn0&4g10^sOOGZL-!#WJ;3(K*eG0rg3brDonAm&Ars zMjTN_`BuW0RBZm)UIMBJ9Xx=Bh9%ziBg=vdHa0P=Wl{Hv#S@@PYXRMptzd6!^znW0bie^^$PBq1ZQDEeKV8BDlYBbl)fwl$Z6pEW8On= zETAB&${eGITK82(@vR91JrWr2CCyOI_`JN zV!!gj&<)~z8$=AvA)>MyX@QqF0(CP~+GdFjLXy!6SL?b>35Ep3idu!vzRCQD|DmRAT!v>lzsfLQUDoQlCsXW^AUE3Wr%C z2fd@9164bGJ3g^J+IwBCi;y#?R=P?+Wf6grOu7TTk4CaXdux$R1o3Jmtz^gsDEB2{;m#7__9ITg<|4qG_sh5F z!D$Ctp@(JKi_?3`-`1*Ihvy(=qohJa8GtJ^fbB>~?u7EgE~?z{MP?D!dz{*@_jFE9 zbsgkTJAP!XCzWw{KToqXa1Rg?iU5AcL<}SnfOaQv^z>Pc(T#$xBFepkomV;z{<+_MEis7QQil)#+$z)9HP!lzHtX%k#ob47Ho*tQ7-FH) z0VHD3`5V4R^?%pp7lH^CtWmX}CjDbKV<8gsBJ#y7%+TV^f|5}z<(E{HYEccS43MtO z_@NVQOaez&; zElj0;suDDnj$bkOe>nx1wakVe;Qea{C0*0f;%5Bg$5tVa*OO~~uf*|_(+(mWcVCQX zGnpbTa|$5-*wQA7nDQH7f#1N8W6gFu`8+RoHJ`~^JkJK>^=0lB=*<`fSs33eY$G_a z6pouFIQp1nj3%3>MbLdXs^VMYiAj4GD${a@n%L6hm9=hPL9Oh&f!7{4s>&%2)$7KS z4waT`{1gtWyRzjoTE9AuKZ`#qUED8kmk4bL!{@>+gg^VM)uBCstc0S*NtlLSmANz1 z+-f2r2^slUD83Q6x&erKEWr&6Yi6csI+;GxXPTLjl)`i30lDqt$M#t2(q% zlXu@5&$~ZiyLGYfKtYMJErHPV;(@CI8re-x>OB@ulrM9P9u>_}56|b3I_Lz_-w=+5+ zCX9nm*-}PSYy@XnB~TfL#V1Tiknx{9p_wuML^suw?0hYsB?n8K$jn0eK+V8M_q$@m z+Lza_mUIizUb^d#lR#Py6Kr0r0!D$M;r~!{vNb`n{tkhHse~Kgx$VSk3Oi zmRGmgSJ>j%EW#_(7PKn!SyAtqr^Fl~*9@TB@-X5oUzpDk zI9A1u8VZ9v1ZZ`zBpEgPb7|MnY1?#4o;%+LP6)K%7303-ki;)lT9J*ZogY5p6)#FG zr(*(CZaZ!OWTbZxQ=NM{O-Vs0Q(7#jDOMM7`aU4FBbHf|Dh+3JYfP-cHGq9m&edAu zi_<|7ke-ofCEg(<`gq4Bw#)8}(2QZUZFOE1Le`!I6bQk>?vbk{J9tlV;8>-^=?rEi zYCX|Psy$UaIBwFuo9N?AcMimx&1co;rZ&L*3f(3IlpUvDnL-~O;ZCH zDksF$*0kOW2MPqme9LixW(Haq>5rvaqhRQ7?Q%|>rV%+fA6lA|B>P!l<&2)Q z19&LofviJ=X+$(U5H*hmSsQC;+(dXtnDQ04ob*7s`?t%zxUv0q6$QRqU=Jn{9^2#BMl52QFojOs+5HRe>W+iTgsZYcnCQH}o<-9}`ii=E^ zzaXXp;vFWB45-uz;~-25#2;DQICxk^7NccFKJVmoro7SM^q>P=9VH*=kNJ`Bh4bgp zeDP&%k_6r}@f@_{#00^o;Q8zwtXkJir7Dsza6(B&UHZ8DcLOD%E}J2(w$<3SvAA_B zx-X-87LOqo0#JvY)qRv@7P=hG^>MV=I^jJV-{_TatOHYX$%al2tCNp)Ejkkb4vkIj zMwvKak5y^*5e?c|p%)Wo@Q)|(4H;pfC;f_war4Zx-#HmiR6nE{;>`zPT(#FoIms5> zD&jxM`xurWJL7{?PFH7b=V+2TJf;az#%#D1KVr#^fhc_sv9u<(4YGmn%rpH<+cn}ZAz)VX z(I*qi;K`VCL%+Y}+8*F5Tf)rKLo>A5nxnhv}k%y$D?00;ds|?)ARd3#MCD9M-RS?=E(%6fb#xQ4pWAU-K z)4rA!!B_7f9_2!Lp;$d{L)tpxhg?<6ilJ&GB!qAa=ii^Q-~4e_K59GM@w44*VYi*3 z3_uwvgO(K1rzoW2H_utZ9-P;K?k-0 zfPC@&hC%H{x%)Du0V8EqAELmazi(02dMgG2b@kC-)(Zj-jL+`J@7k%?6TI)~5s7L+ zGk^+yG2N)Cw_79YS6(IJZYunp*u$F+wQz}>7)l0vigvnab??#59-Yu8eeH3`jBR>{ zPJcL{s81Xb=n64tej3h*;)QEfUc)4fz^?U6??#svn0V3_cJ6L9@hhC>l-p~ z7gJK&)+QBb0UIn-;>0vEBA%Vc&_3Ug+8>q0+MDc`wf9lJi?j9Oo)gVb)5tEn~T z6|1>K6=cua_bfg7@5A4O9b4rJyWNj$-_|!r$IenRKn#F=rd{ifvpQaGIt-RWE>4K= z(V@4>r6}mcK47S6@`^m9syAmKEo|iH6%_V`jH zG55n~L~ndC$Q85dRn9Ee;`gq0?m18An$T7H04vSP#0v2imk5?%N0vlGlI-C=vZ^Ac`(u*_a9sVYD$r2Fn+**VAvC~#r^A()uKxsmQ}(l+8q2{^_MlmQ>;(*r@2 z>8ASw3obYiODbtjr4hRM${odD!QWEG63ZTsR~%4iZZ~+mtLT8rsZ8v)Zk2-F5(a=z zK*s5v%82rOiFaOyT2*o3Ie#_d*m>O-NTyWOA$8$#WX4XY3``$H3Uz4c&*xix92%r* zxsKVFUsiSDxpKEM3tQgmCt;UQd5?}U%K$?USJhaI|4Q0VjVepoRjqJ_Aw+|Td-|641V9|gu_c4By2Sj`kwoHz&2pHs zMwKQ{7lxgrIj9C5_0|R9!|C-HnkiZ)sPu#!bSqx)25C5pO0+W6N)HPb!0r$;2U=`O zPcc3xbC>jUTfS|6zCCaeBZ=%t^W_=sYLK!e8mG!~ADBp*^p|K?a7FH_VS{K{z&cC? zl-D#%WK|-uMAguhg9=ml#ZujiAvEfW34e;zC+W^i}Ze;uY z@qheJ6S9$SRlyoO5CVq#F1t4@G&3sq4&k`EOCV;^$N$UoDGHJ=%?ckSY8h~RH~jLA zwP+&vmphIn{eY6nrGt-wrCsoDmNNLI}B-dG)Y4>a$oh((7=fBf5154hIC+Gg&7 zxO)-Mk%e8Q%r-7!L&3=hcR^A4OzA#tEJyjfOwGK&g1rQ)6z@4)W;Y6VhcpoTr@NTwnK*<3ks$T9pA)cAwOuwqNi}g=rYwRAr<&TiLTS64#EL zdcpoH%!$k7iosSx+}CiI-_seJ3P#SU9I@7;hyvQwao~#GHX7h;CxCOs)s#}P!7@x_ z2*7?>AkxFT4hDAAj-z8DVHb7?*wUiy7@|x1BYloqQvL`tw`^P8*U7FwqhwHyev6Xp zr3h_tt>ck@E;CJ9CTfRGy^EuIa7azz4pAa7Le+07C!GlQ-yj?4oX(kTBJ7qs`?&!5(}j8b9?Y` zX?6(bDo&dBr;D66R<^8vSN;Li=T}XB!8+bu>GyCcxiQFxa_}ZtpfW3YH@lq1VN~45 z(#Rc$Yz=jD9t(b-$*pzUnmRSZIe`HV3@)kA%BHZJ{i0hCb7MYFR>+A(F`gA*rK%o5 z)h=Kp3wymxtUa+Sh-;W}t><|f0?RvDC=fEU3KMW+p_DNLc(fH&adas3WkKAtKoiVv zy0>8em_wHCXIalWZDqIHfjEfX-*4=p&ZXGe_i@ws2LMT+8Zoo8Z!bmU(TY2tjz`2~ zt$#3~Kkal7$^Hv%PCMGms?^SXI81sml(b^lqJK37En3F&#Zh^*(HWgGi`={mDES$K z{x;I_PoaUfwSNqL46kphe=&Z%r^byj23qbvH3{>FGD-0b$7$@aCHeCsGDst+IH6<>?%LZ>i%483Qka=&+RyzYn49n42Z?Wr&Vp}gE#;^Z?hjE9$?YW8xyKOt&y|M$Fl zXUFhqP`ADnWoV~ah60Mhr#q;2|I?E`*!-t#J5;i4L_gVlXSqf4q&I8SCUD6VL_AG9 zh@DH^zmhw@-!i(_1cU<(AWhwY`6_PK55`Zl%8R?@6=X;@vil&sk}o+f%W}Z#4yZ3ogFTZc!}Pf` zQW!ICiP@$cEh}#H9fQKQ_7kI<_3Nt8bZ+Cv_vp>G-5Iur+0R>{gi+m2eS5uQ#P464 zBE6x`u%5xW=(oIF4X^g;U6utN?r%sh9^!2_bD;3QhN*ssdH=4(dcaTdP_ip#VNrG? z9PRmJ1!?n_%l*F#As5j9ZYG@u?&5%5O@X!+OM5;DBAO~H>yim`u1O*H7}N%2^z>Gs zmfh==Vnoi`%+~PefEyJ^KwF7Nltx37lKeNOr8$MNT;Cb-a~Vo{Ctn51_#3JZ`!QWw z`mqw$=G{z4uXsRP^Nu8@pl!NKCc6?x!yvCEC-?)M6|bypXvlZ#WMkg@L^dxW7uREv zg{w1~p-N7*vkLL9s&m>>r~)+wWWTzUl?S8{C4A zU2*AsNuPWDimAY=-Jzd5J!{w*w=wK7+TZoQHco&VCNJjF6MqkBg~T z4^<%;k`GUQxZN4q6DRx(j2_Dk2S27m;&GPNQUgTu3r?cHS*p zT-`6hJ2KJZLH@gq@$BgrNpGJQq`xtVW9iMl?}c&psjhG`JgKxRyTv7PLPgxL+j`Ig88-m>u-+fUKvI-bM@ z@m)QBe>!pg%yZyotnq$pT%t1Z%=7Hov95zxBAPR6dKt+BujSf|^;hVG0$EEKK>-6L z@Xw-Mt5mZIS3?=_xM%)dXP$5F!N--BE>_R6=VKXBOZCW<5~B3Jo<%(q%Ox)2$FGJh2{gGQU)<6;T%=^x+VjLRVQ1u8X0^ot+Z`ZnA;P&B?L6Mlcm{=*dQ7gp z7{4U)ZH3EoXL}geMt9zyY{><05W?k78=#G6p@{71aG~WY^USSrctA?wE@Hl2_tB{Z zsPG)8Nl#MJey-?s4!$0^15psVtxx1_m1S*ryd`-cr}9(DW3)LxF>Kxr|8X%$IbGI9 zPa^#$y8V7yQ@7>AdmgacCh1!qn@Xd?=A&iP?5eQg`}6=|&+cFd?`k%Tx8)(}rVX9{ zw*${lh3h@^J~2GZRZ%B_V?MV6TL$aX#9Onbxx&wn1w4C-I2&Oo=#RgHKD}1l_3mG& zDI+V5mgl#kSQ>zj?z4jYKKj=Qh1N%xC_HP__g|^!V!F~KO%#Yy<v=0IOil$|p z_$?q7*=L?RZ4G8)A!rP%E!U6E4us{-+v`EAkCPzL*Z!cu2wk2N)mbpw`)k7L;~zJp z1ajs(3%};;DGgujgh7Vq`z_h0GaW2uL$5WHCtLuYw(9>+u;78}71BPR=)G@&_Gx zXUym=Rq9ieS)k3YzXgtzU7c(AMOF~cVTb#q<#$sx=~vvosPk9|#sjDpXwU*Q89=q? z<`g>)VJ(OU#in%r{>!-4HI6lU1-CmoK7ooJ+F*0aa~Eqa8^c>2ruJPz4{PyUG=*U6el1pMZ#A7*t8{@5J9roGO>*$=mSIF~jsex42JYCQrHW~7QG z+!f3Ek2Gr;-3EZyvi;IQOs7lNG@Nze=b6W(6@2RjqSjsX3SLhEeE&fsg!a80Ps`b_ zK1ctZ0SN5}lHa-5_^XHMC~3i>RKR2cg)@%_mYtUuH;qm^o2trQS1mL7fBUR{T7ua8 zy}=t0q-SWkiE~;bU$yrxBQ0dt|1RAyo7O!^(OQPTsQj>swB*7jRF`+ZjHjMkFwNJB zGer$u;b81mF5$K6HE+8`5Twp=4oSE;QZ^2=Qrdj9ikHzO>^9fZ!M8$G*6e5tncm%9 zd04G4OPQ=uKB;6CxI<%wTBgne;cde;2P~raC9_glc2ll`ALQ3*>$bOM&S}*oB1C2{>B8QB3{XW7;?U^u|74OgB9c^KjS;n( z=%e2>hmu}_505``K2S{|#{V(a$;w8dF)Ewuh523!@E^3-A;%%NQn%*$J>fO5USZ;vfwg=newEx26i<{dff)3~oXP=`@ zyCToxMK4b4l(B(oY}6TSawb-5WlIs9fF8=YLlN^rr(o-O+f_%5V8OLmc!e=F7q17Y z+EnN!;NT%Ph!K)YyET({sk7a|&!?Nyy;=v`MZoYYj$XPkDNjGtZw@DE0RH z#0{c$nrn}yQ}b!mcfgo1`_QLpfU|>mFFNDoP9YtCp_U0aPXk>tulULXsm`KAymCY(*qgZ+2D)FevHU^x9Pyb(to@g?Jxlufb22Gdq^eBM$dn}h>({*iTjku2kdzoyFwSz3faSSGKD z4?Ei|BcNCzE~itlbu%VDH(cE7gKoN_j`dSCjr3`Jw%I*R z7E8aGP55pdZqOc}^Gf7@w2%ed(17n{MhDiwxfPM&(w8O5^G(g*bt?paXQT{P?cPiF z_7Us9@in6OCgk`LG*@N*Qc8H?dVd&_D)-H#X3=!Z)G~-u@Ymw+%7S0;qzm0mS+{?f zA;fFKCW?fob?ccSjbKeBP_!Vk06V+t>wO#M1jY`?Fw$_g#ihe%#+qFT{yCctWU;tm zsoL&RCtzNTMc(>j152a;0dUyZgHHG;i(%PUN7$~w0Jt+zIwbgK6}?*-<$Ght4p~3R zz|WtN9{fS52N*Oril42{JnqvqtSFGtVvfQSdWjSQN<`Fzh$OAIaHPf}`|}-_!sOW| zN{Ayf)`T5@w~aek+kn`oP$m^Q40P+UQ+q{SXQ3$HblDIg&(S+=iN*~6ZJL755ZM*Hh@7H& zn)&_hzU$t7(+`4XYHEt%a_U!f+^i%U2)~{gOOAfoXDKu;dREN{0n6I{zb*1ZMfJ9N zxC~Zdnv@%$i7IsnVP>r&Zu&F@CE@*{5WAz((@jV%(+MvIJ_HWi|7i4nkwOHtQ#o`< z;Vgipk9%nLw}lZ{!c6K1q)k}^?#4W<$kSrP(9Q3n#BY1vJ&JCfXlS60?_)ieVI(xJ zR|F}1zmXokn%ptsWtaw!qE+EMT0Oz<4i67&A86re^iZAnS`Xk(f-{}@7h<^Z&zzZX zn34n2N`<(yWQ1tw(NeC~zxzoBdl7fTphc%a;xN&D^1<-kvNVcdYjHhGYSD=+p11D) zF@nU1{OgI9xs9yE-vh5nUxoX)Y6J>ULq>&KiNW@r%Cw{xk5x9zCmUTp@fEt%12>U9 ze*dj0MT+Y|$ap~Enatcoh-_yUWxAWM8ANfDVa{Zq_zD2aWk~ub5xL+k{HIH^%c-bZ zBtk258OxH%)dRH^s&RNAruxk!KzyEH4hro_fneq!@k3d1(a8JrDMp1poE9z*@ToWC zj+_EQ=p-T}s3wnxe)Cyh-`!|2?+spN{2~(@q6}zmuUYjA+WyAc*Rr(ZjoD^LPGr-b zxBcx)Un`u9^Umipn+>=94EFKuP=8avae)jsIqy(EnP6bTMAfwH!XCz%GuBL;>dda# z3Fg92Z6MlNkFC*6w*pIh8a>YG3HiKCa5LDJlW|U;^-phn6*+oTs^u8>p5>*D$VPEJ z$*02hpZz3!gTw?$Dfa4E=1AYT-u9qQ6rcQGSPJ0--CPq5D{LzB zWTPfc$qyhOU#C0hqaaC!G=@*VI@!7k7Pz8;p_vAmtR?Jv0(bVG;8y&f;D+*BuuZ?; zBz)2C1nffF>z>zRrOC<3CvuTl&N73W-Mp#mdcFvW|}$ z^4E}}W7>{0U_J*p`kvUUb+l2{#!xc|@XE`NNOp;f*VGl>or|hjMRYM@=Pidk(7B(w z6hT+fa;ugiAc8{qTh24Mjg^X<`gdGdl}DH7^=2k%ss%3qwY-^HW0^1lP0fzH;b1<* zS!re3zd)s~+G9C%UTv7_xbsT-?vUiS`8%VbP5M8_=m+`n_>A-Jx|nvYMxI0I!@2%x z82Ro2Fhh|}lI46zFyMHDmh3wZx0SGWuWMS!U(a9vD^3MB8PboyTp^d8zqMZo=)T

%65G^%<0t-vDv(tp5C=*aHSp zK&)YvI$40K6x^L!#CWc_Z&J@1P5YxlQDqTa+lod}YVMzu1iYWeLn7$u{nZV>+GVa&7~TrGK1k$W|uipK3Q!Jn$5k5jXZ4o#f-IDdk3V1c^V;F zf~toxX&>0OM_)xQrRn7;(~gByfSSQ&(F1 zN6qi_S;r3o^BsLz#<2cpDLqW4XhRq+k+B~az4rTM`Cr>FI1de4twLn2OJc!7#9Z6e z%e240mTx^qUltQ5bvv-J@cShcKAhA|zzKrQYe*SSwp96PJu+)zW%sW_g11rQ+aCv5 zdzOz0&@^AvU|h5d+in;Lsq*AL<1wf-UlEcjYjqP9DD})mZH^Cu&Z~rILg@~Q?82$8 zNJSd7_H?)9y#5$80%I4@T9z(hW1xH9F zylfX|VE@2iw!b;FTqlM>DvFBLzH-DFY7R6t_Ev6^O7A@R1pF_?3UIb9jL64;xsZ=l zjr_kcoNTEhVTi&oqVuuDdyFPHXUi*(LoeB>Ks7gMAKIe4`f46R8 zxHoNlFu<^{WV`)gMK|Lbz~sp56y8sAxgIud6*pVsz@OHkl3*x5LbYM|)#tAA3s30LvCcH~XC(K7j(a#Fu?AldeF)*V z;>BWC%f-*op|H^2*}foO#pK-~4oPp&#C#DTNk9eMpU*PJR{o5STRMN|T(SSw^}~3* z-EH^J&iG0D_1TXNi^tS!`E!20Gp60nS#|DJ-sfeba!DcGwcwSN4CU_&eUS7Z;h%`R z|KAThzivazUE*f39HRX{DKoBDJHwa_=5t@4uK(=gN$Zf_NRyP#u$vB4+|ZDeqeQDh z5u{XbV!_L!Tk{=jAlA<2e)sTh$C4S?pyJ3&V2{+22v!%f%s)swGv7y6&^vsCt~RtO z7y6V4j}gYI$QwMd3O7o2gCyRbNO*x9tC*G6p_r)ODgJvNok4>hu@diCVSy_ES%E_T zBG}+k?5Pu+@<6bar5sd{W7b|^tg_1giTV$(P)IgrZE@vQt5C*zz469za@la_f-}yw ztwMJkuVoS$_|G~Qf|%PnpGYin?VpY{of#=#WZ&DrP$fe7duS-554SrLgYNJs!oQcF zm;(zoaCB{cChFRT2N+WDU!8b0sH!&|AtI}-9ogF&V0?!1GkfR~(ktj#(DA6iLsE;g zReVb2LzGh;d;{^QeA$js;F%@&QJ>{|{Da7$f;2W~j}+);x8wP??!uqy=7r1~Iwv@s zrFw1w7eQ?G6|4N%$V44#Wy^>ie}%ze9@9RH@}2&?_(?L>$GPF0Hgcojc&u|l-lgL? zu`ibA%)@F{qp-@I7B7_;qA>&E#NUh1|s z3vZes&}7xWmlQ^L>-TJ+TvTG@$ZFhYZ-2p)K7q0t;NSn9_IpZ=i<>#@gXc}Z9p^j` zm*|HqtBdS_Q&oWW5t7G~Wnw>+;Bojp^R!ib$DtwPzq|{>j!&f9akFCm0A*w!XvOZP z>^}8++rqi|%epd!l6twf76}AuWu9i=09$8#oc2D&zP2Q9Sw8lqF3+oq>4i?)CI!(rOKO3sx8#qk0*^gu9kab$nJ7!+(WdINvYfP3@CMgJ!WF z#=G;4je{mqCZ?Rvpza(*JYtbl=$J6#=+m1hv9yw{q=km&0+ZSH$zz`>YK^APQ^8j3 z2pG8*BkOQHzQ&-x_fa@O$Fz&(D|PAD8 z{Us~&#YY!E1X0^8TvA2{HNmIp&F~jNbtc`-0#uPX3m_Q3cFxw6) zh+snw@{Sso@XaW@-bcMTB^q3}+?x7)VDJie-5508?-R0Gn8=d1jjrQTdH~|2z~&dO zY5^P}$}Ab1u2<(jxlCE}caB$7x0Lktleep4V@PW>F&5lAB#>ezLyztBCg^{b?FUX; zcmGOW73(^8<3>>NS9~oPi#$&0u0`q>Tu|KcEKU;oOZ_XB2MDnHg8sIvAd}J2_yy(U zKF-FQi;nS&wm_a|o%3c0_31&*%Di^;e?E7`C}?W3d(^qvOIvss!*40p&aTSZ7MIM0GpQI|fJuNX-;WVX4hWRw zC23AjO6#~SH(ofm)Kb4rp7$e*+0Ay$#z*LxL_|}k!w|W({1gY?=qP08oDdW2^=Nzw z_3&ex@@I=RUc-2zfJFj)(4Et6FGXYeX@*;#ew^Ov)F`Wq^Y>4ubT))4nkpbe$Xem4w_t%;fskA zkFd^w#e1}f7;g5H4Abcfeqq-d8v~D0$47Q|Ze*?X8h`MiSBJDcTs_M)d{X;TT}q$6 z_;>4Cw#9e#La9>(Tl-eOn_OhM5+v1B>(tAd?|3lU{1y&j+ z+s3v#=-9TCj%_>X*tTukwrzCKLC3aj8+Z52oO|ZXyyYQJ`>(HR)mp2nM5ZW7o@BK< zNnox^IJwA($N(#l=Q)~{ZfaY9=}G4FesRB_fw z>Z0o#86EwS&1yb}1EKyS2{j+t`iRDfe@Z$*LQ|f=*KeADNUC>gMosy-GSjp4%_p^n z^-Wi7>!yY}megB2v|E?;>>_tz74IAqLW-XUDS8+i9rp`%>23bj0sG%e;J*?nIMjR4 z*^!;O$Xht{=s^sv?4-?$c+eg{ivqZ8Qy4bH%Ofs{QMQnvs=b7@tW}3lrR2m&2sjg^ zlkpBF>Rgh9$mQE}Qh=Jgj(*EpO=23iyg7|FCJGGAsRGq$o;T&j0kW`1w10$7@&zDwlUvN zuDjTpIZ>W;A7buJ<}DJm>uJP%>QcpRmdP}l(L+Ei^v`Z;sEX)75*|5HQlg8$7E@G4 zG?qOuq$E#+s>Icle|O-U*A)B1o@69}zr*aD4X*?8kEFcmllmF6{DcCtX-IRj$C1wN z+D>itFvVBwWmm!?`MkXw>%a$`5EZLEMh-^mEl5I{OOSL+$eAmjbW3@mhtU5{Oh7|` z>+;olwZC6>8PFwZV7KXj$MEWPo0*~>P@RkHr%CS>3l1;_^Wf<{SgE>)x9g_~S zaF6#58G~0;yrM)LgZ+6%$EP%OC#uhqZ~m+h;VBNc6&xo&6|v7_dLUd`tEm#KbeC$> zu~&Bh_uj~6_SaKb`(+-x?L)na85p5|oO3(qpuGPh7_j~s40P3uR9U_tPc0P)hSw`= zts+I$Lmk#Wyt&Yqv*zu-H9EBrMn}ftd(g#wH@R`c(F4boE5DB`dx;^|Hozfm#6|`E zpsPgmY+FqUF*m>`ck-d+&1{J39_h?{Kcbl@KQ|78rv<*R;wPG*zC=J(-3Q*WO+wM( z?cbnw$-J6g#1tNol<9T@=m=UKAajlTY^Ajf;gsO-G8JZ~b$d{MB$t6Rl1q;XkAzgt zNVnt_NxetZB4V99CPV0;|E2{s+g>SOxpPlw@GHh@L`ji{I8yHj!4cNsgU@#ByBLOo z6EQL-zE3{jZ8a*O_xP&QBdt0Yzow};LLmz2@aLZour|5&@F3dM|M9{5pxm%P*6O-_ zw{NkUx!K9-%|6JFltO*xl=(l*D#T4ImD$JD2PPYr^!3-Ue}5!2-TcA$!w*ky;PeMA zNg5Hh#ar4$`ityGB!di@8k9kV5*U68xM{({(z#9DZxdv>fUr?D&wP?o1cJHU_ z_mKWXGY#m`Kq8_Os^Jh=;R14e?q${Wemu&WN{xwbUlh>g6P2lf`Xo}NrkOD^3Sjy~ zikM5U#Pw1V`tW^Fotrk?Q)F13>h(6}do6|Rk^z5#*M`nO&k|Rb)kHDtCPSHyj5^OF zomQmd+q(xp>N+q`yYJ@N9b{LhWAi)>w&nHH&=0KNepmvE1aQc3fglEP4mjWg6sYpw z#~?)nhvG7}1Hb02h9c1GJeu>Zcyc2CSX#VrL?hclfJo~*LyT!d66^DWparWSsT)jO zeK}vg^*gH73~&xJfnM0=ZkUdl-YN`IWz=iUN+SZ^zPv4GE-Dp%Zqm}F4;~*vLsbxp z@$dz==47a5NvW5RMDjiI%m1>%rA zoOn<}l|}>SYH0(-4tAaI)|e}iS#!QvZJq>2FW!FT-Jm{z`QN(!uPWb{iIf(W-42=R zFkjvXxhEJ~T^6@wrBC`WV?DNis9LOm^$htY=;A?V#b-Y>p;ak&Cre;|-ODV3$|R>P z8xr$?OCKu>BX~t0t$49MFSaV^&K{vHxplYLvx)ir#Rc{q2c5h*{kdGYsXHaXd=`?3 ziOGEd3?-5qfZZy;lM|`P|1cQb1m~WrJmd~XR!=@WIs=WaNbwiKv`NFwu#;1Qx~8U? z+s}jN&KDp}WU7A|RDmY#1;P)r(p-cXwEA;;@tN*j`|tz`NbB_tq@{~)LMw4VS`jyC zUH|yBP_$oJ*{Q)Z>Wu{OFvMn<3%W#2Y~janyp{Yg=z*~0ZpJ1i{eY{O2c^DI+##t% z?o@zN#vlMc_9=7ddYLQfo%Q%9EJ1JEV`{%C6UZWQ9c*JwpRxWgN|heBpEvLEzD`~| z?DT7eS7Pn&5A{-`qzQc$saaqf)E;9mUMx-UJoghOkxatw?lHcf^;N?Dn0D8ah@AKq z1v$6yq$;z2vr#)Dx_OM|pFfYHUsb7B3v*Yq&Bj}2W!&q$V>21Z6M)vp-rLGLc%A=9 zRXXTv_bUYGg~lv*J3wQ)?Ge@=fIlg*E2|&E-owMgM-$0pD(D~WKfcqEIy$LBfY>tq zClvRSff_Wtgl>D!<*-~x6?3d`Jw}mlk*pKHEo(+hfU9h!fINC4>F>E)@+V^qO<_1- zf4wx0#*6JNhxK>vfA5aVNH<&zs)48K|MFmAkV++=P77$1C@6w!bNFc+e(m)l%5`s} zie6I71(EyVDZR;}tfYV+Q)qi8cAaS*9fo8kUksN-Rh%|jW@^oQ+X32S^V zl$2zpN)OAlvm0Xg+!t;K80wWd1pU2v(9$?h?d^I(Pi{^!XlEgO(38c)pMct4pEUQ* zzv*C25A{tKNbGIXOCCR!-cqc|d99}8qutTrF5;ka;k~Ps2Nm}scg|69`$s=lSVs#o z#Ju-+F12>O4m&-s8*pJUvB*eqoVg){+Hb&W1btwtYI&2+Odu_U*IXv0>C>}YmR{!% zp@c2(WM%fnsn$bnYL^u@S$mnCOVQIV@9>DG)qXk4^zezxjlAOCIHK(uYZWlaD_vYyM{@c-8F&G-g_V z8?%9pLR?x9YCvx`{7~PF?(mf*zXMdo;o2}lr!vICQa)BQV{CVR9pURpbrJI9+H)W?s`?{Ck_+}JZsC?!C$$a<7ZDI zibqd!&Wj|jx?toEB!S|OT-z6mGmK#F`^ex}}D9UqxW`^mT)S4r!|DhxTMlo>P^gSA{;7rZ? z{%e7TnCuSK=}p#l@*xa~_I)N*zSe zi+44^y-W+qOT|C17VU$EGVA@|&UZj8 z7aZ8|6hCWE-}LK)J|A>#i=sV_`XY~=ns{@2Dm-azK;d58Zfa@`k*L=^iMV>y?=R|76O#CidH~LOS)fiTK}O> zx+TGs!*!y%-N}}_O`kEtkWOiUE6qt^>Jz_fe+deli)sgC8?p-aGl)pe6-$$ z+m@XQ*V?!M3GXfWBKYB8ohLUzo-cax`*%o{dj0OUKPY6!H&h_q-`026Yz=T-C-G3% zu28XER_kW}-r4e?_?y-*h}p;YbLVV$4)I&Kzr3qHiD_a=&Q5e_H_8yc80_6VqloB7 zixNSJfU0Ehiz1Q=)~@5q=ueD|2B-S25!d#&eK_>~#*m=wwT^|*9AP7t2uZe3`@y(#|^E`4`*`U`LtPd0#-Mmv1=L`U?a~%Y9LnmWcIEt_3SP2g> z7QDuUO^I&QBE&NNGsuXnSu(dw?Eey6E7AwhMEdUO#MqR{xsjB<2>B_o<|%qZLUVks?myj0+Z z8f)cFHnMhTv*z)3#@GE<-^3PzQn94s83Sfw{aHBaZTb`oC$HV z6-p^l{Aq+i62WulGgBxnDmnKk64GPa1JQ~-vLEIXA#^oah|M}L2C=h2UIVa>E&|o| zzQ2|%Wr0S8>oi3DYTnvGPDppSX>%323JVY_<}%+KE4^*)0Z2~evAg7{1<=h|%ndww zR20QhzP4PrpD_#2e`SR>=F4p@L-ON(M_k+eOzN8)8ymyoa-tZ;3iCJOs+(z{{?-0p ztc@QsgMgo)Ne`i%2K$HdV_zKmPCLfj&dqPz0ni8Cke=q80PYH2s0->X_z7yK(2$*r zeAV<4dD12XVK9~8IzPLpLo_(PVr}kobpQke3;y==35jts#kS7@j>q0}vpL^C_H4Vb@cXvFpQ5ojaTaCl;t4h3!y?;P+AVE5k#n?t6)xLP z-y?$|f2lE|^UW!hZ|w4-`qerLyPlZ~@u5NpnT-7vDaZnr{Gy-AtEJ*($y%1l6i#U7 z%}Z!XfbK3-l8D`m z`>BTs_4veE^0ti`%YJ2RzFr!gbZ?c`^-O@y-$fNO)6Mc+ zDPxBMtxP&GB9;Ii@+x4Y^k7M5Y&Y$@d78WO#B~T^umwAdmDk;)b}&P#gGEcqA*E(L zb9pCkyZh74`j?N&ZN>Y#v)EoSH1aJ=^93hXz1K71n#@S$trttZejg^zb{e|RCh`=R z`^IHkb*nF5J3RyjElU)q{#W=r!z7;lHXA1xFk{MoT>m{?5OLjVNS&CD_FBj9_6=O% z47N8mI{pZknvdS@r~7ZLx5A2>Lu5n~bBip6SH~6uaJ@1db5|1RLHJDbg+kbiBv(gUu)m%n6FNy$Dh<2&X;$eIG9sta#-u5k=nbq z@SSc0^4wD(BW!zA(LxhHNqu&>VLbCX@UjU&PNT*@Y^CMqJN*EYcE1_E5dV3hH0_)5 z!8(&&x83rel1r4|Fwr${%R8aAhdd%+$qz^f;)mYpOre;NWvaQX1e=D94;!GT-btXW zb~MY*1m}E73bEWvpe_FPXwHozUQNV4RhLa%-3Vv?5`3cgeJs+PGpSd9EiMO?_H{m} zTWU&R{6RTN(;XhbCKxHb@v`12qJvlxlkm#6b(Vi?58C&ow9xM!V-ag`6a9tDCwg0y z;muhE8iY=$SM`uxY4Rd(7_X-A*uB-{`~z8V;rtDr+yF_G&pGG-1y;qtXRsnk!^jQl z-bNFg4UNKN6hY#4?AH6`aZyR_I)|0M*Gk-v2M11A!oghB9~3`oe63B*n4^hivhpv-@z{O2?b!@sQYO;UyuZh% z;e(Y+3Q23s$jxSbfb4d#X;L8&O~DKv>LG;Tye#=JhQ^77?q`QZ zHsa;m$FSOGcc+1a(Fp%vdkt2D0&Ylwol)&K{AzF2z^QMO(#|{XLj{ zbH($3f4MYT%YhDIUcd4+eK~U1DmDFhIqKaohUyAl!!&;D8vS1>@P7m)ziy~$j%5bz z0=w`$!O3iHOfHl0wDnxy&y~Y^$SuXs?XxpPfu3M6VX8TeMeBFAHpi1}MQ0KV_~vD$lVC9*V#^~6$Q1{g=~0sO4^CXEAVYB;wxp+_KCB7|b-Zp0q!#8$T6P*rv z3<|~bo3>1{4`!1%Yh@v(bFHUVzfaoAU%%2wmZJy9OFNbVVGM$QshLUdUW=R+wvI(S ztxmWz_1iRl1{>*hbFWwFsf71QKDY$}I;=lEsh7sC*WH@H%uJ3B<>3hjhX;iD{P-Zo z3bjLwnzbBu$eW7&Q-FT^7u zDRUNxBK3;Zm8p0rW1DMg`!abybw_^L6?6edV|Iq1?Q3Y;`NH$>ko8~OjWSZ+7H;AG zNu$Zc?Z=&5VOQCE9Nik40(zkH4@p%N5)ONF$HFXVd&Ku9F&Opd0fBV5KR0b<*`No-Be(b#B=)Kb5^+>%L5%|d zw;PKrnw-ACBgtp-R??|kX%6cj^I+7_Nn>H5e-z+GTubar6ywDb3y3&&hU{k>Q>iD@ z<3ZJKntB%*M!dgXC}r=wA!i%(^9C2mDeS!(221J^MK3DUOVmL^mpc@mIc*R4*SVeR zTu)JZkB@A=8KNAlRBw_w%-?n6lpm6}n@5yL4f3-V`jN(Zx1uBHv?-9dCr{=wRuu{_ zS4Wa8Lvk+$$l+TePE>?uqE%xk7ZQuQZ*Y|73SY8~YJ<2qINa$IzKS-~Mfl4%HdE@( z0%w0#ZZ_a1m0J;Ddj2!K)!xh7R-Y<(JIkzpc6}o8fm%ds!66rZ`H}Z*!7M3BlVizm z(yQUKd#2{yj|OT9jH@B`U(s(ui}ASwRqPI8731~96-hH)ezlYTZlh%@;ax?@0c>MB z$6i9C%p_`oncGVz7;_nJ-u&eJFadaHDK$4A*zJas;%WHD);9h8UTA38FOxlvdDv|j z*rSmtcA%^VK9xuoO=0Y=KVB2@#pK*AU&rHa3F$QhWxns{Ys*0x9T;O7dD*QItR;q< zrzdzJUK~lH@H;@`Cp;oE@wKsyJ?rbDzkGK*`OZZSY3iC#Bv;ck`DY3=!#dUO5w2+O zd=enZ7z}1VRG);19oZ*mf%&UybK;X;v)rYHDNCN9-w0NX*ADlzO1`>f05Z3h*eLrn zESJ>XysOqydnZ8_ z8((61j7vy{YPFM?ko|ptQFK%!E+r~Um7p|7fQm^}KFb9=OvdCmR}8o}!4)I1pN45+ zDmk!;T<3TxBQ7~1cxzat6940(YK&I45UNPhGtXLv@|CLCwMGvZgA$5lh_nu++$f63 zA;WCIQhdZpzBp>R+5O_o5X-XEtPIt6%XHzTG`?@O#KpbxR>upeZM=9!I}KelIz zceT9*O+z-P=jItb+zYiLjOOjErLiJtoE?HC6pEx?(~|y?HmZiJN=qlL$7fSFFRa?3aHPuuSZ3cnY0dPeXSp0D`#BI&Yd}Y)l8CXA zmFI(b0+o)lWp`TyX~sVDu^glr?I|Khe0mE$n)^J&1%!W!B0ReY z*douf{T`(ocy4ij>1WR!k5#|vLJZ&(bc7%6d5UCSJ>ra74{urW2*`fnnKBjpO@!Ir zQSCQk5A3^H2;SHvROyk_r6DGppZlt;g`=phY&Ar94uBM2&F3mPx5i)ldJkr_&a~Tu z_r|xB{W@iB0G8ILdC#jKcNSUwHm7G_6UGuj`!AZH7c0j9ep%^Tn5B0y^XDY(Angs zT-=fUM^OK@E5!)wgJ3M;rJ5*H#Be_&U@;K50#2Rw6ieaE&r@VaiI*uXu@9u2JdfkXQ*JEPZV4>XL|0aMOD)Wwh*;siCTyTN%$mJA#~*<-kW_<|*;sK~-TU z&Te}NMnJm{xdou(%&C6&H{;PWe~5#-JN%$c9(B2(Uv}0_AZMT1409CNPVmW>vw;`+vhDHQlD77;J{ z2jXo&=G#e__?iM1w4B5tDa`>!07+4*k&!xANz}9^E{ektlfd;-TF$iVswP|oF}?+R zQ$U!qZGrLwB97d^Fk-VmdB85vu4%Ol3uc+ZmEpSmm7 z?QM&#$Sir4%*F?r_oh)E1Q29-$f;#8NPcXy`)=ZnB~bT$>x?2o@>)f3w1v-p=ewf| z@Mu%J@rO06q4g%8j4-6!EChjPIORs`Qj{n#Rj$hs(*RBH%cPQ0-xy6~$kJAluuH|| zS8{ts-ye~z^LX{tGy^+?R>1#CE9M&meTZkufDDC9-LZhQ9~v4TmR@%v)+2 z3{ITFbXfjMGFv3pZ>sMy- zTR7pKkP@GX@QnCkhc1V#zu|j$}(*wNnQyNCnD46Th6UVh#sD zig?xP`lQ-`Ja(`8!U#%6JE)(zQr0qi*RD23t`g+rlAJJaN5b0?v!X&YS3)y~xCX%( zl=yz7ka%x?n@92T_9DU8aqa)v_-%&dWUfOMOvT(pGl8#}DyaWY#0Zt(te7zR%wD?w z;tzHSyA)qtwv*%X_pbL6@QuyDvlAN-VD`ZO?1N4&voKPUJVc9LV#U(R{%hI=E0SuCLN)n{!SfVy47x zB84q;?rl3t*r-IIP6FY?%#5rtdF~etZ2`6@9i?At5@Z=TW?*+%K^gBl+M|vNa)vRO zUVuh*F+=BU714tYV6P>qi8>YURXP2WPYL?tvibmrQ=R2ShtHOXP!=9w%t1U0YJX{n z@MB{TN6)WNKMH|4uy-3 zNJC9aB2?;V@x}X%sHlRs2eGZ|(17hqo?)#=cDz`2lw=&ITBrj{PG6oyt-rNnKo9(F zQ}AkAa*KL%3P#Itc}#FfiZ-(UDbz4EA$o5TZ6}k=0O_Pzil~} zU2F8alkV;yww|;g*!M0^=d~Tfc2g}5l>i`}hvaYE)?Q|Z`Tjc^hnH2*dKGu^3in)5yD^TnW%x}60K4d)wa z)nKfG<%^!oW1y&?yYgAmRb>LE)bN}OgAtcW;j=>E+P_?RM}pe68)J9Ghmc?4Aj@obXo0;G5 zUVIyBbJp>xM=DuBO~L3UJx3kHw%cX?r``VlcHC_^FbbY6=G%zR&$lbZtKP4kq^6r$ zFLkF^rB;AB)5Z0hs+d&(PeC%{j1(Ii0ilffg$1#}LM=nXH*iLz!5{9M6XdmHuGO30 z@78Q2Qq!Zgpo)Al|QT4 zTj{f*xS+}^zxZuFg>%2pW))o#Q-_*R{7^+Rc79Sp8x`$|ONvB-CQ>Zq>LSZwz9oYy zqj|fg8Awamf@}&LCl`*Y7QEa#bcRxUj-B)rs>3LaHJ} zb?Y81aJ+t&+Ln@XjPWsGF$cs+JpnVA9@b$ ztX2us?;YfXkkDx^l;36AuI?V}@FCXaGou91smN!;MCGR&uH%5}Ewz20p9~0cKYgd1 zK>0J$74@@ERe&O*tfN=%r0Ytw)ehp{1lwb2Y&;!qyp3Kw#R$0vo^@{_Q#fne0Babc z{Jujm_&1v=FZgnzdw)OD2fG*DHm)L&P?df zb_(;Y^FSx#N{&NuHjg3WcS`UB_Khv9wTMqpku5$A#VUE^WJ^aP`3vMoi18(QW>*g3 z|F#usa=lVdz;C)2y%4OFtnM~|upZ7n@jSo}*|#oSnJ6^9>hC)&F;XBrA%5y5IU#26 zCiT8$fk+ac6|;%)A{+VRTxxCld^CU8lMY*IMMr%0WVZfznXcO~dHdf;tqs@zWf=q& z9Ezmw(3kznQQdckJ(Ybj147wc8^Xesp)5A8e`b=oq*t$+asc(#>PIkrv9q?nH)!}=v)WBtKb>NVfvINOpw>O?l zd~VkwH>>E9S+BiH%jaClzU}8Oeeb4W9i91fU!$L~J4j-L;pvX!xsvrh0`SoeVB=dB z2V(A8;eK3=V(F1j-++$@e&}gYbhXYnI68{8+HQTCN{h3Q&C32ZPP0Gev!wusx<6Z7c3ReVQPYI;0y~# zw4qg#MA(oFag88gQ#Bbd{{*%OpuB3D)RSy$Z+8XF?pL=M&O@yk92R}AmtD2|p^7X- z_ZRK&lnu-J_J399iMkJu#ODeNUevc4C6aYT9weT7vPK=2n1~?TE=(#&DdE@>u!Id( zm;lt{z(v*x=pRx~I&lqC8$PnEn$@Z#8qHk7ngbQL?lmFG&5vZ1VZs0swg76cL%Pmi z{B?NJkpa-;8gf}Ti8x#&x~9oyv5o2v%R-@K@q3ifYG>Ubc`>u@YC2({km# zwq_)j`oPbRM5|7_RAI>?XUB*J;?k|V*;WwD&dqG z#D_o4{;KF#0Zf>gzb@VDcR>_6UJTmSomxK~J)J~;OxmM81=OHpl-k|ZxerlBPZent zDE`kb0AqlI+myHth`I|hm0V=#?ddAF#cHJ?72jP5^w)7kZ?+qzV&le}#=MEV z^D^rbk5SQn-x5VS?@f*gLgof#o&Ccq24{G-X>B@7YLQpYr>}FEt;*J|Vo!hX+E=}x zQt|RpU&e|jVO|x!L{0J6xT@H7@mW(4gfWc_be*K4DflcMTK0td_D*9~iP_St`Q|vK z>xW(v$ybg6e)@Db8Zu^Dsi=nUeU^-(KD=&gmdJHWtZuXf>x}=GT}>ta0lI;>*MOM1 z+V(@$!=*fU4{@LW#`{h2z_J1rnliUjy}AWizEsJbqn8`m#V|-hk+1%VyR-9NVl&43 zs5Y~_ZTg0-nRr{?3I(^_4Kzl#!~e{PBWlgU;`FW1W`ERD|2m)K^x2Q^wUzPpN_*bmsJ90D~%pNHyMS zeT#U)A}0(=@~A*^HrP-%ZG$ouDhXS$vj(h!fee$!H3H??1&1M)Yt_d0kDtT0bXU>j zZ-4=j;8sKc!l%(b@m%;lCi3U|xjd=~zawN2y0kXZiG?Nh5+1iJqF3*)JOs)qrCtXt zQEO{RYDAqN&b5MrP=as6s1EiCz%ncf*fRHSsH5e7jfLO)%?{M^CbX#am8T%Cp+$qD z<=rRKvz6oY0Sb8~u3G7z563m`j9+(%y%0YCG`NQMf?MYPkiH8A+9J3fz5v44e#2f& zI@B8D>2P;&U}|z}2A|v*!`#X$)z@q7?C90zC)S4sc0~M1={m|o=4(rz4rN!nf#;uE z=YQK86ebV|5e?AYU;VIsu2&(%#hJ(NosMF_LY@WIAT7mZx?&oIN^7|eCnuUKttV3w z^F+A-a_R!p;wIV29<2kP(-)<0l|lxop+v5r(+Fe&JqhuaIN;{v;mPO9%0CSKm*9`x zr@qKJM8#y}1r<+&EXkza(l>SoIRO6mG`7g{2yr-4fd+>C4vDU0)wc%B@66=iW;Fic z<@8So0V)@Q`#m(GDJ?Zg)vSN+)8;;;ow0`Z3vdMo`T6Y}e6gqywm4YL#5gXMRh$@MB9DmR`hKE~1I8^}G_)lhTDyoV22yWMqQUja zfub8eVqU4EKv$80_L4A!Z{Dm^O$W z-rZr3+HQT=M-QJTu|>3oB$&iUu!fITDHxl*ap#_#85^{UZV~%XXxs*xv-A=OPO5gv zDJ>~3G99q=#e!Po>nYBgY7@1YLzjC1fDt@j*!nBloU399XbVx~X3VbT0jgW7>@Tz) z_Of`j+^Y?!7&$yJp*T3HMC~NesoOKk)!e!*oLOu#b78Lu5sdrTOj#k~sOOH4c@^WC zj5mO*iUrnT2e{q$@#!hlo&N)xw}KaBqXHpBR^>9V1^rF%4-9Xfgs`cZd=1__j3q>T z{zJZ?oihcFJaWKCzw&71eDP6^Cjy;L2Z>G8M%sN3Z0DE?Gl(i7z)V=FzbhT(r$rnW zQ0<9}893RLU%mkf3VO11b?U9AhNH-{{e^(I5Y#stf}fO2!w*u=q~VNc)-y!Lr6n7H zy+^}|-;5}lzS1Jmt`1fk7HH8pb}X`@MC#ooG8syjSjv_>>CFk&l?pZ9taO->#~SR{a246_a zPs0&_aVVsqV;=6h+Eu4jW<6WYD_CZ&A*Ur#KzH;+D2!b-JgV7XYgeWRVNSy_U zJfdQBdaEqE6bIx1)$7Mvd%d-f{x$H_qlwn5WD6N;K3lL0zu%#|6@DF+=eqoejRhRX zeVEj4H&(?EIW2q>o0ue>o-Qq4!M2v8PRPwA65KXVZb@G}U-7)6=QZluys)pz7rdCY znIrt)U~Arbnq82`p)Pg8rmuGi7;Uwb>$~Efnll%@8wkAyI3N?qD&ite1fni|4ob}T zUE(#Y6M{KJEhFk)kN)ufpv~aGE6wBk;MajnO&BLPnYJ%p`3K@fcysvTF&W1)n=J|# z%6K@d?d>lRi4@kjzr6;dJEVsrf~@WS1}yNigb(8iG5}TnCrG6=!5{h>Yf}ds(I0wX zkNP)PR26QGve{g)Hb%NPX`u>sD?n{`>930HvkH$!?_J#ybncfn3JxfatzrXbu#dmG zbE`v5zidk_#F~q|l$i|+8uz{C3xH8cKq$&-vOSB1Ou1*w6Yq)jBKYdWE2KdP)AG;@SJDvFJ z0G6^!`L|~UZAw;Nui8aO(o=MaSKNV>agx%-F44B36#%1mx0|KI00Ie?#Fs?k{prz8 z4rie@2S-DAV(0>d8dMa!%1C&^jJ>5YU*KAnO+0G_0Ks?b=Cn^!Pukx?s7KdmuYR5JEaD_#r+lc;P|MdVqv*q=SZWdg0~R->^hm-%)-yzd&yu zGvIGtHQ@4{-y%v|d_nRkzidP*iH%SE{)|AP$=!K$a6RR34gN3YP?812vi@nnjuEt<@RNADNySYk#R%!xe-LFV^)6LTiPqn!-;t$0migZwWF-q`@rL zB%W*-<0?G3S&TljXhzme9LVcY-Luh23q^i)@#mutEs;M|K|hVR60&SOTWtSki+24NbCWeRaA(#8wMZ%9oQ7Q} zuliGt^}(=Q2krm`Gf3}tNDYfQh@%^2J8R@iDV4G~@T~Vg-}vG{!Ds zo~_b)#*Zs+%&4_7co;?HB=`$;oETYmpi6TN8CFZ|7Q@xC!iwRfa6pPpI_mV$yUe9l zs{y-`_8BIWiHL~_!PIs8_1qH^+!H0$Bm{FLhy|RDEFYU)>sYMK1L$au1NvBKni1)` zVI=9%QBm(Ul6fzB;jQ8KoXQnK1=rvz5EQ8C1o~Ij>Hq?@dZ5Aan`PJFv3$3Uh{Z=5 zA!Y9FcnI0;c6W7UX0sMOCw^lD1EH``2q`H4aU5LKX30w-gr376Gp|%aiClJ!OZFG( z9l8WQiy(r8JB7(Gmm)ymV-$b0Zl+ENvEdlVFyZ?nGXhwph6zi)H(@FXFtmIe%V$(0 zt82_clH`i@#8xSO%>Lk;(HTemXaq3a%pY_csDD#1)4a(GhM1%b{gzfzQEYQ#LAZ8h z7P`zxT+vjN0iL~!Go{-}tB#5O%ruh3t1?~XXC8?JeuR-MD54~40$=3Q@BUeP*I#(5 zq8jxN>3LO43vc(bRPT;wi`51*!&<+VmKd)%-QC^OCo)(&qXJ#BVP4zOmux#arT|Fj z--hP@Mcu)GeGYmnMn6jw>2bT_V94+Q#ZH<*`V(Gq?_&W*8$ZZcLP%{zfqygj88EjB z%#%L}1vA`@Bo)|R_Vh#`vn=$ut6R8&p}bO19jsKA%+OMLOuf+Kvk*o^IK&K-4z#(W z2@S26@XTURrOaRs>}*IAJ6U)uG1_K(Qv!y>wtkCHTI@^NM=b&xU2p(VLFv$Sq`_TS ztB6<1plF8cGhB;dK;ZqmERN?ybFmytbS1h(M87(Ge+Hnu|5DmGOOJwnZ@Woyq7xB= zN2vR}>X0zfFw7o{-b=OZ&-MJNpSJ6v3d>Hq7I9}|$g*UK#i+5ahvqfNw4jlt(Nws@5p4vZ4 zYDwr9+kS)}Gw(0fx|4Jf5Z5N`?o>^f?rTW5EaXTKfxg(?yw?OIQc>(a0eIg&0MA|Y z#1tR9_y&}BfzpkqS9 zb|k_fi-Fh%GJUm4Vcz+hq4V>~SUhfaHkz6lSZ(S-oJVCxhsvUTA4u}Pu&%u>9StHa zJ&2tR)!MG3qJAcdJQ0!pFGhVt_nE1m9le6G1JOAk(TFItuQFCYtuA4)RL)gBQtY>s z*t00f1+X}kCdFiaXRQ8cY|b*+1jzM&A?}5Bbc@<7U_fChehO8zC(f&&GmZS!)bv%? z^i`*WeEF&3%ebvGDZ{LJG0H6E9vP~AN%JySYG=3)u(oK@U|;3*#19yBk(87)yw*XD zF?{nR`;JHiUf)p+?3p`=TNm91_5TJY;Waaz?MQwYzdbBQuMd<1)5e>r^Z+-tz!@Z5 z;t)M~>A+l>$?j+gt9>Tmfehkz80H0F+6A1A1Dqv)v_1%TI>iWXLWlL2_B{!g^{%nu zpr#SySVLuqt5Dhll=oJfT_Nej8rneEWg0`I=KZO0n` zBOrS%%f~Zq4e|GsvlSv~iNa!=8u+rlY*?a?X8MAQ90dGnjJk~^Z=fn)H8x< zVKLUV8dkX`1zAJF91Tij;<(aH{Kt-WhUrnTtp@XY)pL>Z9OLkw{pL4+=lQCy?Vkxd zwyg(#;%4iW4c{;>6SgJ(4jAyc!UOgX0Adum`Ub*M3@cWc|0uIU@V3HLWt zo&Oc8kfi@VRK=pOfOR@uoYg^1r*gV|7AjPpFwn#c*@Sf;5NM@B^9RBIYwdwwCLsg- z+Ff~<3?{;+U$HHI$U2d@&#j|&{f4KsP?`KNQn$lFz7&HYeDsB9Nx95=jUpQqofj*b z&HFQlmG{y2XN1wp!uUZC(>*FtMML)}t~WO|N0fu9OgX82NdMp0_$|Lmzi4_&omm14__0f4=>Q|o=ne353m&7POdK{pU?3fHS{A2N{j(3MwpZci2_1$ z0L>eplkU75X!A)u-C^6<+N)b?hrTrn&x`x*MK0U78dPv3LSvR<-i3{wqa=z2)m|$~@N3t}rw^Gjlri?_L z?>P_RS?rKhypH4bivt{S2mi}}OWe{zwE(F?u)Pe7-F;Mp(IX!!P4Y>V;p6L3RF?9x zy*s6s^RW!1s-j_3jg{NhcvSlW4sa3p0ykb>JhaRmUVf zBEFqj{r&<0VB+XQTciSt5MMZMosIV2Mj$&X0l%wD%9V34fL#mx%dUmRDWZy1-*hG| zY4oTyNu9kIUJ!&ZlXwej1^)wS2pFFWB-|srH!nW&pf-&;xOB|&i}APsZ*K8om0aLJ z=;33-^W%G?`|v*?)9^phT6Nq*3hQiv0c6f9i$j2?7BLo=vw_$Iqxa9&?~4i5?D6#f znnCqLwxb29HGF8?mkHZ+$5ZvPL%XF#LOFQRslUKKhDGe!LU{*);~yZK+=Bcfh+jap zc4Zl2W^5MYTJXny#%rg*@ojYs80){5*g9?xojNPW3hcd-uJc~t&?&bB4Zih#bdMi{RCIU4iN?T z#foaIJe_w4Fo(n1O}qSr9S{w0WLfZbMi2!=R7x05OJ=qfUT1F`W~~$W*7)*v6{f3g z`WdNU)1$J15IayOgN1X~)#=kD-DY?EU z-wMs@>UZ@EhQ;h3#`mT9o;0ZG1BH2~L05CivGfN@J>2h6t0@^g$S{ds$j9^x5XlJ5q;W-A7?cQe47LSE7pYWOGpC6i4gzs z$Z&l86!DfA-?Ywz(tG1(&r&i#*oOZrY!hD6{6DtdIx5Pwdmjfx0hJb%Ryw4lyQM>v z?vS3LJ4Ym>Lpmj;ks4qqrKGzVx`&V)n%~2D&w1bTU7z1#Ef({~a6iv|-@C58_qFrs zb<0jALdvIcf%;6{$^n-D7drmylQ1e|vohAd zhs?`qRYUt`8!KKe^JsOy(g5g0-{Kp=i;T(f5jol;p7t*1pw1Pl`2i4MiMbNhE$7sr zf{IzP+c;%0quC*f`s0esg(wVq>AunCEj8 z>Tq-%>d~+vcNm1ZPHX6S@kJ9)pf4ExW%lN=11wBL#{r;~1s`CX_=KDIR$_JP_R$3&&q#gY{&xmmknZAKqhN&EZ`Jpx3>+&q zSMHj>Ru@4*#PNwgQ%VZr#&Y78-N%X2MpHuvZ3s)ksde40)$q3rCrS2%$m|d{^WC2m(Uy{4?xj54~=i6W?ngFsOAj@fwY=a$Rq|ELzALG=Bt*_9fbSuKwcK zAoJ%yT?sGS6Okq0MszjRc{&92UYgtlEgEkuJ34UnaWku!nyyR~D5tIPTel4V z&VD2XxmBcORj z4<3XJsp0J-izfIby+wzZ6CT~Iry)?Gv6cbre(uAhrMSFI#x>ImtGCAP`Ctfj@+KjW z;v;X(QA#|Me>6k-oYEwL*&oCon8?iI z>WJirk$x?x^z0IEVjstzSVv?|wSXVSJKN2q+)86q3vT%A>Y68 z5t)CR^>)(J17zjfD)>!zM$3KH-4ZC;(7I7ESwwg(3Zc^Umdup7AXZX~lq+9K?u5MX zObEUG^z7h3@9{>|KAVQCY`cPm2HPx!SwxjGhEYR>fbeKhoqP)|ZF!xKvjDfJFB5r- zNBi1~hvyzG?lke-Wfb0-Sh^p#Gs;8!7qTjeRb;Y019N8cK3ZCaW2_;klc-Miagz?- zVGd^{bSKxxiWK~uR|AwzB`K;G4aw&|9R1YLYPSjz!4`|$Vj~`(et#dZVcnciDGu(` z=O-9(OS@5sQKkN?KiwwG_d1!} zXow$PkFwn5c)QNs0_}Eun7`QCSjX$*OE@#ig|I58J95bD%24aVpiyiZse8+UuWmDW zX}~;?(vq|@=H*7DH5y{>`Fot!*NQ_$w1$q>cS}e0?PlCvK19ewa@11Ezx}u=PwtX` zRP6oN$@@rTb?*8#_OOM8MZtKP9#UOXQxk@kj=SW=$QINV{-n+J|3|IQQ=yKsQ+(?4 z(iaTh0N=RdnlJtdTJKAe3VOVQUf*=B|66h*taSeAhl<}Fu8lRSb2AO~!=&r)ux;B# zd-6BdJ}Oofx%5{?BYa{ru^TR2$df+tLn)I!+ZS|mexWl?sijge8m(k64h zygm4${^wDpZaC?3@#%da|Qw0c~DW;F+SA>Nvj zqcG4#PrWqgcZL&dUg!5s)l9olRm6t%Bltu9Q{mzANB60xcxNs0CQPp5@RkjFh6XDJ zHYTD=RXq&pemdL9ipM7&bq-HQWCp5Wy6&e#kKLJ0qkY-041xZlb)wjUPAaO%1Z}fO zPKElTnPRsE%jnt=@WT#`$G=bDm+$Fkwn)CyCf+V`!SlA}oT_ZR+}QFKxzW#JpZ_sk z>>BRw#o`76g0)itGd=b@pVe9AdS{2|@wK(J;ZJ_ofruZ2TZB_UTU$`TCP&+eWcHTR zdh7h_J7%WF*Fd)$KN)Ijof9q_%%MnKtzj{1vDEu4u=pUt+hR9#=zSIVd~r}ExF%gM zCNl$#q@gJ8war^Fsrd@EDCvkS?I9-<(AK-YF2oSZ;`_AT-|0#6-Kc22m*B&*?5{&o zi*~^uSG^%w!8J+}VsO)SMu9K>xk~x=a=n&vSGsQnUc?>0GA+pkBKJpmC(skUB<9eIm9YnN*geykG!V!-B>}vNQpV|bp7cU zUl5yz+w)knO(G@;W~vl566Lhi^ZEh(^p%nti{b&d$hEWjTVcDF8kGIwGikTLN7y*< zM%O+R!8Xs`q2_w4kjEpic74(R7dZOsQ>Sb|vNbo(Av@dKt~nq~`KFk}R%tWA{M~3V zU2gq-@AFSSQu+Dk%9UlG=da0Wt1Q~{=twx$ZZS;S8A2`26uhLT&GpDtWfBHBx+16zrIBJ~M6E$`$D1YgZV%=u!)gAqf@T&Q0THmKzzNN}rckX2~AtiTgN_`oQ+{VzreP zeJyS(SWbS~>U_nvF(Q;N%~%c+fGO=$TzI-<$Rfd*2e{S`ZavtaO>JH7`WzT~08!4g z=j4ZPZ*|=E9)GJe-!))VEQ`{JxA_RkL1zKY6`_BO>k-9=X|;@!y}fOEKzzVM@nmN< zoBB1S^K&I#^GlJ4$JyfS+#71>mNR*5TD#Il%@2JG%7c;-YhkE1wc!EYi*}l{GqXgX z+WbLpL;J^2!s?~#OljyLkg1(PhHU~{Eob7eCbi1q=6Dy#!8y#1evxpKC1@iQ0}3-{)S$vu`~#T;fG zPI6%|Z0QYh9)wsniI6Tq(=-E=1DI^ih*c`tySb=!C3izBKqZ1%svtlm@cr4tlkUjE zb$N?908hj?5S#9L4S$CvBY!EJ7el93l7y*fBh4TycWJ)#^i3HbDo_VTyLmjBg`s4) zCO;TL%z9s_ZApS?c=3wTt znMz12h!f?OQUwFax7JMTHF6gj4L#KYunsFJE?8Xzd=|zDq0GbCx{_ zTI<^euV!T|dmAjfZ-1&jm}uE!k_>G2OHyzxIGA`pDjOA}J}RqXobLk^^AqNV3@_o5 z$X^a$bx0~br>FJp4L(m3p|NtyVvQ38@<7sL>9vz%V)o&B>35P@(QbxK9*2XUZik-+ z!yHHa>V-;H_5W`%8wXma>`RjHt?)hq{mU;agP9^MvD!(jdXE4QkEo|pcZ~njn?G+& zIyA#J6jpJ9x5|Q6iB0=IMUmvxj!r*_GGxmYq;_#|ShE zD;*9$_m)nW~`7PE$r~U_B*w99TwRXkh~2Y+_y)c@a;m!dZ?{Qv{x8+wY24 z&M^KZGGI&;6_0-b)v9I|UmP#19bp`pZ}5{zZd}2%1IT%po&*Vf<58nn!PpmrjR93wFF1<(`D-NQeF-&V_v&-vFjrvtja6+WpF}mhGHc~_lu4I;SG;M07jdQ?GQXum ze%f<0DoqrIQsUFncRUI7Im;FZqYMo#JoMkA+fn#7(t%a%ZjL+Gl#8_(3%G!MG ztYX1d>T#pwrGLW1nY)2`W{SeL!6kfxpiJ5oz#Qht>`va~q!WIm3udHQ(!?Zhn;b3; zC#nx^y?P7`)4opg^YgQhAmyg^=B)W4SZoKo~%RxeZW2UcTA`B1lh+Z8MFM)aA-4$+ao6qP&;+Sv@rUOtyyr z@uPjr@i&A?g-;-#;-&fe+OIz;b0#C1FSZl0**0;bmF%BF)d9c*x@0 z1IWpm9k7*yf11mIwlvQfq|M@5CT)IKoMc5A~uEU%q>^@`ftuTTT^XszT~uq1j}AzJbH#JEsnL;^d1> zyz+w1wuEtK2byE%Y$x*h!4?y>sJ;g5?Rv~k{>Pt|tziB(56-XPF8r|7#%CXo%T+bo zM;)61VuW0T(+5vvO)*Z*v+gaSlhSwaJnB%ZVq(X5OutWSmZ}CH=<(^T-*H_|)&g{6 z=UxDRtmha>eJHJI82#0^Cou;1`Y?fcC6@`AU z%aIK0Z*)`_lYh0v_;wLwR46N9Dv$sMCUzT#Jdt5mysqzQyl+AHE@ybBy9}Zh=#ZN4Vg)xue z4}H$rsYKQr-U>WloS1APizlWavP||hQS(4V@8x|lw*y!>XkLceQ7+MdynW>+)>r}Hk<4#u0B!Ie2bCJ8E9UkfcRc=}VXdZ{;sfbRIkvxzjyiO}iA2 z)qWl&hC;$5z=piPxr1@<-ou=Ll(-sao@dxsQlOpKusz3o;j!}$G{4Y)Q1DBXhUvf?m_(}c$ry?mLpbzgQ{RB<@b?B2wS-Hg6SX5lGYYv~Y8A}ew73tX5 zML$51SA=dxI#_&M^51{)Y=nHSctr*Mlt4ulxQZwjchaq+?Y!z~2Q6!SU}3_t!1e5S zSN~O#_(`mV)wjiwngfLrZTjER^fQTd-|@BQCZj6zLkVK}&~i^lOiS?J_3BoAE&J%L ze#_1ow`cq0%uL$)LF6~bie8RC@C`$JPYq9sCOClQvNo;kdO!K$uud$aUi(bBwjM;2 z!^#@P;qF7f1u%G3A3LPK;$8n8J!lQ-m@#2oHX^ij=a+ zG0`+B^defKO8;gnuiH_I$3R~i)@V+>emt0+n0mCWJJ6Vrna{i~rOxRTuBtMoF(_-8 zG(FfRSO3I4E5ryY(=Jb2WvJK_;Klfm188~n@K$4qRfe7xvP2gvTndN8iHb$+s+%{* z6Yqx)W)+*Q_Z_H8cM+ImmAsHyvgCXY(p$~$q~&S z71<2VjyL8%c`N>x5NY$q*A6xCuFv*{J2u0;VZ9Z6;Rp05YVsj>`OrAtpEcH?yc|Yb>yn|V9;}JTGXj#2LVm&*?7Nf&T#>69xyF!74LB=Edw zaG<|HNpE$1K`?6iN`MO$Y+(>mV@H}awHU`k$Q6? zn{IuziMZZKc?$e-nswV5S%@$mi0z}#xZG=lPWH%qfg z(+Fj_)cpbEwgfVp7F^Frg#j?WKHx~pe7*Rg;@Sk@%4W{94%B&<0I5e?zaF5QyBU6v znp>7Z;Cq@l5hpsEZ$;(!H4k@U2|R=@a>LbgQR>5bx$;GXljrfU@=sTP{Vf_)GMen* zHt%FPTFp1~m81?!cmL()vX=!2f*1UP8vH-Ejn%7(V-ucj`aakX+>d7#cWT*o*Ae_l zvAea?S9w`Y7!#kuz*FeA&XLU52+RAtfwQ;DThD+-xN#74&njW95BLZUTdP;fqhYgO z6cvJY%!_{9}irp;|oIcIHtVGVcz7TaLOK&uN5pw$E55$j4`F zo^ONzI-DOTDt%2VTK(X4x&sQq;6h@kiMwCTV^QB4kq8*9NZz{{)SUpZCxy}VDQtOv z+)X(CrrD6|6WC&vl6XtxSLp-#Df)Ymr%B~Qu@Na{$^x+@4Yxt5H7aHOX4!pV!^Obw zmML;Vt-Q*-Xy5OM?JUOtF8>k5ZZ-pcN0}Hcdoz}U&Y+%>GXpl!@c&byC;S{TGkxM* z|GF!`X{$?&aHS5rK3ANEd2Er>cDl* z?~=lkZTzHO%z9#oseBNuZd+dnz%iZ?Lbg3_X=O-0E_xp{LxZu&&aeu`#9?37r6^zX z-(Ue5*$l;=lAL$aC}g)%vQ~cX8cbg>O;;U*5df1uYR}IqPBfWU;#CA#FPb%g@NSI1 zs#LhAeD8eC<@W$r|1h)!MYJau|0R`lOQ=uW%L?As!s4~+rj#aTHqt9g2ztc?-f$nX zYEeTfZS3p)lXKZ`uHRj*-SU%o@?gykKYyMWsA2eY!SeqWtA9f2(RBN@p4Zg=EZOwz z&aM5>bt6yAQ&bD2Rs8%B&_nX6tC?9*`Q^&AJ3>_=e(E;3)7#Ww;&u5{B)-Sx;H5>G zt^UN>k6@9Jse#^a==pK!HuNed%;>-{64ca|TRxxWx|pWf@3M3+4*fF}XgxLb_0NXT zunY;&cD*k|9Uh{2om*z9x8M=u;Q)ybNpo;X`kl9(QF=s^7yi}EgXi=WjVqtL!5R7H z%IwLHXcQf*m805h<{rix>CX+W%|`@yQ*#id$?S?ySBG5*S>0Y=9GKEhTF*i4{c*-3 zt8@4aE+6ESgSu#uoOdscUi@)${yxG?KI!X8;8b;FjCGqm=XAvEbI(!=p?a#2Kw#jU z<>st7{{S`hM{!NCMqfZhH0W3;4s6LB4v|)xb7O{BN~gVAg=1Y@T-+CVc%0tN2;uTK z6j$A4?Sgrm3DvP6D5#R}#l2$L3q6Mev|=bbPF z*|giP6Kkq6TyMWY?HX5`d9f3&r5Z6`8ee-9t2@1 z8kH&S^pm3D1)!k6L_x(AM*;p6CxbwV=9DjGwCFXOVf+}4p(*pUCLr!7?b`_P+O$ugb_<+ z@EzC4{V2-?=ZXQN{kl1;ysN=NXpblF+1nbIqiu?7C#gpDquwuV?e?!-#tuA2L{1u; znHn`7*$)ppInV&5A#{2y`x1WPxwQq^Pd?aPb`x|^ zoPri>&Muyo?Qhx;O=>C510>`jx>s78JZT%x=(EBz zruPNcZfCnQvdsRu8mEeI)vT)=x34GuIL#t!Q8(2?UF;&$gkS-B$VeXgPSc z179>84-JIwcD`V+-`UVNJ4!tBvG>t_&vr=unrp)OoU=3hU8~Rcz?rENg`&lbD%%jB zH@)#~B?s)zmiov%@3pc{plAKZh&VEZ%5IZ)zVyg&ee970{@~Cn?3D|Cq|?H>^VcU3 z)AX#QCY?o}n<*;2k;?KBBbUL^7kdS}>H1w^AQ{5s@)@IKT8DVatH|KL4jedpwFUJ4 z*n|4DXGrzm$wqpMWdTAbqi@HeUBPpjpyeXry1xlyEr*aAL*72lVo^6MV3ph5Jpc5@$d z#U)T`)yux2>_fqrEXNJM2>>gkF1O9{u)kJFYFj3~%*wlIXL?F0d|rNWyczB7x85wY z$hWyu$(VT9b*jAogKWW775=81hp&#dYZ}@ZE#!JI0QU^BN6<%8lJ_}seP4L>r{v1W z*>dq(Nnw-WK%mp0A}Cvb&D-B=;bv<;N$5jL)iNI6RaLIgTKqdh^bMc-*&FHZyV(%S zhTww9V-*c$DrnzfJe-ojo&c6X;}tHtxw$}1)#`WbxWC>+*1AgCjZbuRt#6=_%aDgb zml@%A49PDkzuh;eo&7v#AA9u4gPa=VpYtjz;KloSOr@!SrU!_~uq<*Y>Kn@c$TAB0 zkqYqoO*HvG38)>iyDl-dw;cL;Zi?qyEbaE)ge`KXTXaCnWGwKAyR`KU;c9MjFH^J6 zV`nqM%U1j@;vfYQEKHY>wz+W4Mj4jLhG=EV?MLG)_0tr=`D&IEvyYI4g&Xy@w_JT9 zoLB8%U)eXU%0Umb_?%bkNE72!sYQT;+-`_TBfCGyLuO0F;NZ~JL2K*PVYL6Q+k)q2 z)2s7sRsYKqU@9gwko4qv94<3B>JiJ)Bo?*?Da4n1nWRTvtbl`$A=$js^dWzxn=&bI znj|LgMA6SCKkO%7G6)iIA;t=a!dtxo4n^lHv09BHCD$Hr*Zte9)=ZDn7^-TKe6O5z z!l5J@goWArf-PlCmY6m=e{ZJs1ktIxR*`v4!r)Eml`3yT*Ug1XyAVN3ikgJ0^)Fdl zF{*Dp@sgVzKK^pND3D!riySD6D&eh`ry^U%5p`$bvsv}nCmxwQ5fDS3;fKoJiHHCq z7}@O@fna=-kyYqt8Yi@V71AJ|7U|C8m7uDuVKfDe4m#7#@+&WBN zI-9z#X_>WD%ZbYT3=6wO(A z-{oX2kl8Yw^t$i(}vECih@jUptwU#D_n|?KrpX9Ilt2Qid$!h_u38=esp0w*QEn5jK1nuaWbCv z=5qg2=Vbi7PRtn{4bISJOV>D5K5oOy%LBVI%B%>=*V(Ta8gy26GCX?YbBzL>N-C_k z#yD)`7|N@TId-;mVMK(XY{JViH4Hs1H#e2};Nu29hn@SM2~~cRB~O%SL3e=>d=<@q zemz+}XkRQ2tj@p1ru0Na2QzAaaZBe! zqHJygE5Ag6s9>(GUJGYiRFa8#I725w4dzwMa2#)nG2BMYyhr96oS)mZ-kd2WvKvu? z;|`^C^oPMRPtoA?l@&HCZL=Vs!xAglI4?s;&;#4+C|nxmrw~q?Go0=H*bOoq5fE~- zPR#W;8!PJoU3H+dvstzGDI{qB3)!Np;%1qA%4qVp8#orsen(|=uyoE|_)UHUI=T}b za4b%Anu2HSEzoiJ_YBRuKj%W=kzQA(2UHCs;?OzyG~iI8Ts3%@5W7rnw1Nan*YmMh z0uA^}O=)BmPBr_x*TSwlAKY&*HesvLVha=FYX=(MPPzuLA_jqgi*js&W8#XhA z6v7p5qkg^NaVtIf3FcFf!K@)j2D%nCMX~=ru3qYZ2euz6>@DVaAV1$U=wBv$w{`_3 zxY4EWx`~)nm27KKutphp-P*Fur{S_X9f($Uux2(3mK@Hctp?+4A%f*FPD>n1c`bsl zX)sTiA;hPK(3X)h-MJ;rGFykpM%7&=R^9wErp0S1|CEI zgU1+?1VSF$!M3gVj>gKe!A%OU{(zKE7)HN>9+nhe^^2{BwcCW)HTwmsFVK1z{KEx+ z{^NX4<4fT;qh^0diJoqj+vBsf;~K1X__(7Kp{kUQOZzMEbb#(u>!gulUYP8y@|1i_jwKH(Yov+dUxAZwS=bT0 zXf3U0Wgd8Z6e^hwLoXMnv9lB`2fxAoU;@k!Xb{P)tgEf%ybAD@BA@kdO%VU0 z$7kC2)sC8M{ic?-Y+*BW)4Mk&(;d*)Py> z-ZAjW=MEMtUr|EacMV!N0oO|-|3cis`F*%Ia+_|yEaW*1nW?vvIAoMYG(@^r$Db#r zDrNcb2@Ph)XL0wey6T|A$lC8UoeN}*%A%aTJrFT^F6ILh2S2uh)xO=Acm!Jm$Hz;v z`1@N-maww7`@}d*bhqcL(G@}`!&0>cHKs~x#(eKvnFjV(IKRt1LnFad(z*@msUjoF zOphS^uUFoJ?qJNiXx7*7rDB|ydR}zGZ5`5CKY%3PI}@`lzBlqi-po!<&ggU2pG-E= zIq%HJA$R7(WDxqk2Dv3hX0vOPEDM$8J+;%(AJNh9LU~Xx=o?yYN(}beNT=-8Y_)~H zcVSbEEk>qt{|0e*#*h^pKSMq<^?fn-sQaP;`yoE&S7AtfdRF1n#h<>Ov-w@sI`(ef zOI3Yrb5f42QgH@*H)?_gJA4`+fT(E!^D|Cq^!JJ8<4Gq}(w#rK6)3)6rQ&ZbfjhJA zz?~xHjuoNRsh4*pP4=Q?IcA38#sV6Np<=a7#>!G#!%Ch)Q?mVuPm`WpS8r`C4A_GN ziR|d~_X8%b^NCV~DkU5Bx?xY2*F9?Z#gbOveN@5wohklmXy}ag#4*jkdoLTVEu{km zXIxd>T~^>$)USlS>qXkrRJx00vehC?(a|nwISe8VlI`#?CVk@aS^vJmK3Gd_q>EEYg3aco5DV4ynuf$#XOV!&T^>yfyJgJQ8zXI!*Pte>BmTRzDJ?)XIyQe=_ zI>YxGC;KF2T4QP6h_rds3oR@!t?+L}l5kcRD$TE49@%XG{Swj7H|x6$K^ z9mrKZ|35`S<p`u&)xz41X)^>3jRi z8}bR#kuDzD<(}x$IDVj=Z&mV?LU8;B>3;_m+DvzzZ-YGWbzAtgkyyZsF;zh9Y;!tT zR)$lqJW#g2_i=A~vN5t;z+_B!YBQy|(^to~Wuo@lTqoyX@@!{7f^RtwHkcv>mKwYK zoLepcZ~@~${s&ux;zDMNC0On6ik2hAu8*izxr>*7{=UsQ*UDD}sLJpd(NW2@pGedu z&2>O(lW3Qns9KL5L!*j(tYm=$Q18CqvutG8HB-x-YlKVSp} zedVojHzx*Q$$bXEK5Q3z_Wrg{6wdx6eraym)EYg`N919mKbA0VB<+OM;+b0${>LlC zGXnhdF61;vl?OrccM}785jOart|$rw8!-Odeb&0Lp~d&fT@}AGzrj?^Ta|p<`OAXZ z(aA~)SnH(srn*3C>X>z|K)MZb>hr3#>GSDHmwKAggt@4SFO{GW?LC~d-`x^Ca83r? z|11aKI31AzdyTqWmMgMJS$>GP#tGZPN2_|}=X`nonD?4I=3cMR!VPWageqIU-9mTr zIfq&A-!AWwIp0xhvIStiC|t+;$-wo?W?q*K#X87q!f-w(bC5xYwZZQnQ|C3JJWafZ zd6k`dS>$!Y#Lhle;R0o;9<{1DrKcL=*mW_vHLS$1{*5bbu|bfOMiWkVstdXeF59Cf z5@gjxP^}AL(lz`b@mGKX%mt#EPp?6e_aGFo*NT8IeNIXCme?K*X62EjfK=h@myf4! za2^tw^1bLYa(nRTshs8L$A#3c1n_?G6gLiUGqWqJ9^DmSWf9AZ2Z)424zw;VzAm92v3T2=8OMEmcz zaJ)zT&N0T~-FlnZ09EGqImJj7(?>VqPE}BJR!&)YU^MG`je+HKC$mRGLt|v6sbD~U zGEeFs#jGb^oq z6QXNVJUUf#nt^z5y2P8H;`&`HarX#sMMy^1|KPc3tk)TOo0QiRa|6{`y3Y(NXUj`k zV}V~yD%ss%tpQE*yl3M|9(Hd_WGIyW`643-xP^&lhh`_us!I-g4zAXUihYUO!Ed}y ztlWV#E3Cr*1_7R~{3FomGFj$n&+6YL!he8^wW(2?HWNee51w%U?`W}}T2aovIW8co z-fy}zB5NeJw0A$QQj2xY-)-Faf=3*Wf>YX=XOP#jyOA$&RrSC&HegJ-2fvbixi8;H zfzOHcS+jp1EA*+*Qow`YJeFuZ!|-&I8-$OY`_!Z6{cg7a9)<~NUC*+LvmZ4>e4E<& zguG=Y)EN#|AlJ_z*Y6-XfB`HUZgGQCGg;SJMn$f_dS-1b{LcY@Wr5rSRA0z8YlK}9 z-mC-41K*B!UJqPwZ_}&`x^ECE-pk`rB?sf*Hb4Tf0lI2U+=j2vi+P6L*Fw620C$gY zbU?@ci?h+y>=Z#%DItT3l4YEBQY#{3v7wOoRs1t}D0eZWV zi3(f`vPbJ@mrIKZp!QD*5~A-gpZ#}R0O=#JYGm?6!?1bOnpohw;yGpD?t74GTb@#L zI;oc?3kP)&Xz$_a>1q7vKtW^UEQMnSK9=R=4RUeO8ac@eK+*c!low0vAZzM> zmb_p6w{-UGL}MrJnVv&Coxgp#kr9<|=#JyWRq9En(rHhKcq}G)wjZV3*(p`f75(au zr#7Yn#pgK1H&b>#-M@do98N>uU4v$vkc~dcXF6E-JnCWK%LH0ojwLWCYR-@P*r{^s zmAvRH*nLN5Y<)M%z2K|{(3i`lv(>~!*ni{J_b_G@dMa5OToF0w9&N-|=()a9TeoVn z)nqgH;EXv7+2^?5kM-ZJ`fJz3QwS?V@G26IcU&Kfi5a&Agin?m2+^fGzkeSO1EMg( z#!h0rdagnK_6(<9Ca}hUBy#I|PU`*2zu$gKF1y9PZ{Ay1v~kt_&PEsNc2ghvQ%}L@ ziTMW= zQMZr$WTXeb4t^JY)6Z{#&Nk|?o1jk5r=Y6RasH2WfH={&0b0z*B}LDkuyWB)7MqlN z>$a!HB z`V){Bj#Y}8wxG%)`o{ccSBR(1oOWE(5Vs}BLOy#!p)Kj*+@wJSl8QKqTd{GW_i_ z+~;DltFTIMt(v{h*q!11t$xE*U{|RVz5}$825GN^aYN28;xF0IccBD0rH%rm5fA=z zGt|iMF~Jb7X7M({7oz6BW5%vdww(ui6Ii;>`(2j*96p#I_vLd*+v5~m?$_0zxUo(t zC-(f;Wp0)*ZimFr2S4C*^DVJ4P|lj2R6A_ZxY1Xp{?M{gJ8?tcnV6-PZ(DK+^V_J@ zEvvK%NIM?XMM|qw6V3u_U!l<6lji1uF>6bp01?n`{&jf%l?9`FAOH;Z(r=x`$kEA0ne%FM z9x*R<3|8);LH(Ds&!Fq->dt!)u{*O{cTez9>5h+%PZQjfUBzM=FyEAYmC5hDt?zI> zdv0wEDE!kc4z|B_o(Z7ybYGO$Jcg8GP9JoNFeoMe8OT>mqHJ;7oAt}k*b=L$NUK78 z|ICG;hebHuV94?2v-B!hZUmQXh;tk^1T&?Vkw!*66?NX znr~lKXWjIt?AMH`3@~K*I8AJdA_e#W3ys`~>z>^W{g-QS>$!n|JBYB-+b`PZ&rb}uik!tzd>TpauD|HV1m$BUXB`Vc zL05>@1 zd~}Tt$Ib+)@&_DUPs3{W_hAQa&;_p&7*J_ddtqY=o!C-->%FtRISR`gp!*bT^0)K( zkJA(&?ub=t1W+@A3U=J`)}~|4LNWx98Axck(bVwH2vI~~Ls92ITEpva(FU9PKthKP zT=ES(ibF$)Q?^=1zGK&|dB6MO^H}}XAws{^Cfb<(d38%vJh zRSH=_=ULhNWxt|9>;^TR)Lo;6k3!Qp&EAdy__N;KatKvwb+JH(wC{)hkpr<&M81FQ zq&yIyQ$y@-xlOs)h1*P>8DC(cGbgrrr(Qtw$MP8zTdCm^snp8K%D(#--6DfGQsI)9 zKWj#CXy(7d`5e;r>L6qS)@os_SpSiW`)CSsPQY)zI!2&U(h%XW{3~d`u61E!zkU@5 zxfF<7H(hH|)q=~~@Yd#uLTi#Joy>nX>i)ZbiRKJ}Hgm)12ck>mbpFHP*wx_7;{_pS zu^*;AJAT`w)?hnFs-`CDZ>sYr;9us`)3@M$VyXG$fsnf3FkBM2n1Soj{Xcq^(6$pj z3Dep)S~6_0Y|%S1%s|la-Hd^!%RoN_p4Y4U8Z%Exbr5xe#-(X)!`gh{J`)hH z^xvCE#G+ZRW$4){a(8{aKO#$2>-{*A%J(D|8C|`1oGVl|TVoi1tK$_|bNWjKA2=jN z9LOIFBS){-1BRsBc0wsL2nHzQ027A|=GSKs5YKFUUbkYmJI@h5AqIWA4nt4TgyABW zl{838gAovgmugObxgJ;S>{DRi_|3A8cM13$Vfdg5BvS(xr!k<R(Y>}?c_}T-0DIlAw?p44a*MJnV3eQH&L95dDQ{FwWY7-C3mj8xWQFLGyjUg1nOb+AIZiR;AP zdhH~+Igpk#@IBSsy!GcUE4ZK<5OpB2FOk}R(}N}Q-QkX3m2C{WjX%19uPgY0t-V^I zDyCJnh6KO3SI)`U=+@a@?+#-f3b*5&3HX;=I}wh1ZjF>}7PE>wg?#LTzvKgVQ76NL zdNwoScKRJ5+^|qt{^?au+M=uKyl^+D;ot=;Wu)@161)(1vr(PDAOasPL1k$Lz7H5Q zb-oInR=?&n0cVR}CK{?zux?84{xTgivSN#8dXUTb6T$5g5`g1Nd>gM`P6~+PU80`T z1pVi3uLS~8%|*5x86Kl3s$RgH*U?&!?D$iMZnhh%nelw8@)zHmz^wffCnXBIF8pd_ z&QD24*ndcLi|_eZ!Klvmv@%9*dPj|7O1IX7ICAhYLg7t^^Y!0+-z?uE1Sn6_6nZW%t7mj@&YY{DE!sv$n^>YC92DK z+7Ho#OxzTnk9<-BMn->y1eM7Ey$CkZJ)&~bFkD3 z2R&XtKYy~a$@uyy>gKP*V}Q7pko{Fnl1RU&_w6`f4->R`HaXl7d^#XV#X~nJe<~QF zIQxjGNCa#p*UH{ziuhNLop*leFHtA+$I2o!->wJlHSe8$q5K)d){?HYL#$k z#~7{A&ZLhiN89r^`UN#o6Fwkhryr5wI+59#3i5x;C882^3n(ex|KrSD!pr&TUS{Rc zY_-N=Q(*yIaSzNf0*jk6FpL6`C$Ei(oeLEZDcG6;h0S7c{ngu`Bs z0iZ2<0jMB}zPnd79kFfx_TDAld+U4z6$hTnLC*=6Z7Z5LNJ&T8BQTsj-X99(yUS%^ z9et6^CdbszlLuT69N9R5zfE}+0{~K=l~#TRY{c|ctr#y*oTsWe(desHuK(k7w6jl^ zmk&BkPl5TDem(gr?jXe1xyd7kPG8pikOZRhOJ@+5fkWQ0wDw3Jk;kveK>DXyde&x0 zg-LFkT~{nkkSYGhHadw3cW7a<%L{SuLM9oB8yshFh3NyU_-8PV)&a@bmy1K1s(WQMiNnyOpatghhazEi_TRFuJb zclJmyBEOl@^;%x-iJ{T*PU58d7T*v_UHgu$y0iA_r9|dcwg6^FinzQ$Wu2FJ2Z^O{ zmqkShpr#=UbjHybw+uy_%=7wQ-(BptGe@!Wt@Gto48twT6s)X_RsTFCof*O&84D_# z6~o8*V7M2s^Z87}%EtOh#^9p?FM{8hkE+z3sC^k`V=imnNnP{$aFp}9o>#HC@npTH zvf)Rk#zc)C)F7|UGT(6i;&OSS^2kRB^!y75-!<#a#iX=a`kNNsxw_EZc*itv4fD)N zU8^08v{^sfET%e$1RlutwL#jK%a*o~Nq>b0d%pOA;zf9sS_VxwYjK~)c6Pf(m99lX z%obM_-_NFNu`XuDEFdEW;09wxD#>NKO$#CyAp%$wA|D-o#kwLMbTxh>JHt(Tg>&!R zXW#p-1gcUM1|v(F{xu7%S*=~HMwOFg$*r1)nm0WmWadJT^nQ9N*i`Uyc5CR_N&anL z|K-a$(o{-5NaY0DLP&nL)$n|q({4<0>0SC62rVxvnkoMF!z64$vdY9viYD9*^>xnu zHwEeNJZ!BlIAhMFyN#M6ij6y#)N0CIA^p zojb(R&vz;bCPtoi2*bBS|uf1AZ!As z*zMHy49^7SbAD#Sw~DMLnDw`cBx{oJ=hLpB;M#OvteDt)KOv}@H8m@l`v1}O)=^Qe zVZZQ}5Cj1MrD2e6lu{a`JCqQlyFp5F94Q6qk_Ks!hM^e*5hQhJhLCQM9BSZusJ-9! zob#>4A1s)e=f1D_U42cXv7!UiAv*41H6zb9GW+hAB2`e+Vz#>o>yg4-T%$Q1e}B#R zu-|8ujFdiTcYb7mAXYJSTj@MWGt%F;*%dbcI$j&-Wn+(6|GeMipevr5*^e~&?6W-t z`<@zmWD*~@eI%_b{=SA3D~Y6*q2H}JHtN}&r9)NOl0t}* zqv{bwY9-S*8@uuh1EYXYM{xIIXd;}7$+VGLdP^hY;HPinj;X`+>vFrDx^caCH9%cV z@1fQ4@cg?yo&OD-bX>!q0D+{*42Px`q9M?-uNyn_tX3&NJnBRa`f_c4mbE)Zga* zt_r%zZ+cF79H7khx3Vtov3FMynd_SSAASn9ZOFDNH@*EU4hyGx2z z{SHQJdye8KZr8wny<^}6xx|l{`CVHcUHZ=EQpTS@s>Y;qY(%&g?wj-#X^-?t8vI1( zd78cx6c?)Iv)kE{A&$2|lY}GoGnP!Lh{cgV)(QddEa6EKC zsjgqF26BOe^fun84fOxxdu91!<%zdtkd1>JSs*rfQ@WG)pCH9FHNF<0lV4w+5^@Vng}U zJrKs6+Q%-O*irKsDzlS+#!XrGV^oeKvUd-TiY-3~AS%c8s=T1#ye7#UI9|)V1#RP> z&-Ew=J|WiLoIpPh^EiDm!0{XO7^(zeU)fIy+6)Z)Cn27>TNpD%p$I$QRs*uQ%ZTBB zMArZQ3VGY|PrEq1s7yVqDj(^>rA|5aw6v?oPsx@b-Lpmwp}Q;xSIUW0>`mJ5hi`5S zXN6ucLmOkArrNv~UKCC{{>mB4QvS1ExI*VxTLg0%%r zFuRtBo_F9N)v{~+bFG;qbH=qNsUa5n>7rhK`GXNsB^446He;u_)98CG9mhrs3Zc+O zKfXPI1IfKGBGp4Koc^gS!i(DSfB*}SIuf60?j=kY4H|jQmA!t@QQA3 z_wIz>Xp)MRMXupANvwS|qE9AoG0--w@9eninG%>GjopC${}BXU87av6ozdj;AoyI4 zRAuRKVeTX;KQr9ET>z(E)msnxfM;~fd)ZI{72Yb9d!7I&=arV+kC1pH4&PLvw~%OY z{5Ae9)!??P3C_R&O>ePwcY(U)4VFBtFIv=1E`G(-ws|nOCR!?RrX<9LP%Q_@d}%9b zxiPr3+0RZ6j~lDx>2E&Zds+*KU*a4Evzox_KD;riENr(BUuSO>!n8Z(tRu4PLMPdE zIeX>Khkp#!;s4`vgM;-lf1ZC?fWbDB(OA(h>M7qO&99FJ0X7q1G6Id*(>ZvWV9C+R zUmgMzhRMYrqh^R~JWp`f#_+>D9{N0Jbnm#!obVYrB}eY{Rm z-vgnw@!d!8^S^4?ay-qVQC>UoqG$`S%@=lxUiBrw$&v6PZ1k((FZeAs{4 zor@#kC>p2hi7VIFV>O-1xMyriqWHg@G|>zieW@1+u@JQm_;PdbdD%=g$HgYQaCAGz zQ>E2Eivs*#Z_cs&5CSSX(-O6B(|IB{C@2;!HA;)vnm3+KS~+5CY~n@l`hP$D=C3R) zkR)5wQya4<-sM8j;pyjf8hE2mUq3*6owH_js_%`bI=DH81no29VmK5xX`|%d9_xP~ z${4mUoJ;uXpy2Fwl29kl`3z0OOg+cRx9g_Pe;}v#YIS=L?a-P-cH82#uOIHh z{8tX|7x;XO9Qln$?1oz2X)Wgi*r(M}&@6|9XznpJJPSl=&535SP(Jm|YhKei7kot_ zw^4#a>zH$243}fMZV2*LqK>F6KS`W@qc65I{ioW4qfX{}^0UO%F-#lJ@2N&=TdFdv z|Dxf)%)Ft;1&@FpRFCN_u8@S@VVY09i6RBTXX$2UazFjmBitWdi?u}N5{BX`=kC+8 zgVVPIXg5-K5Bttn?b$~wjgNS{57EX@JW7bZo^-7l&NOm!mdcUC(KX-oYO!nFzDa-l zpN9Zyneuou4{f;mG84P~P0~Zo9ICA-z1$e_6awh)i?OBs(!%Vzxo^QMKv66`*HQSE zj0#9KX4=%;2|o`9&t31HK0&W-bJU#ShJoKZr8t>|Gh_`;$gDlZPaZ!3A3~FZZ1K zD*cr`CS6niL%%l{=tpDcea(dLK^N#JNOV)mxF?Q=;?EIRLe={<&F0HXjY)rSTRV4R zhY9d;NSq4W4n+2b6vYGWRl#tzJBY`_2tIayM{5bHYc4_F0UasTw7Nbu8a@=ATwg{Z z)t-^)P1gmUSykgtw-22PjXsuRrnKb_1c(@s1gjM^s_$s=Qyn}AN7x1StQ^$0^pQk2 zv;S-0SnvT@3~Ng=ywS>wF2&S{+h*5@{ypUF#;fB`gQW`y$9ybvvZ=lmub z(})WH)akCzQa-zg@yx!v?Q;Lc1v_dj0%JSV1IR^~m)<%apk4>J-H)%`-8g(mq+P*o z?it-*{&r?1sq*{}=ZyOTVvWLlZ0e`K_)ub{jgmIU!g^k3;~hT^-w&oA%HD`B3l#)Z zU;}n=Hkhf;vbe!A(~H_Q8TH-ZYq@`a3o^+UOpxpE_Xo4R!%DD(H+G+AV=&o*S1>6l zd;K|_-L;@P%-&&_K)j>o{xn_4@uwRth@u8)T{-tcQ@e;?ui?FSO^$4@@eNS0+`i?C zZ@{>Z%SFH{CL-0*76;md+p5W^bsMK_tz!*lg66P7)s}MtsI^Fh=9`Q6w9`hPW8COs zKcExiy#9m1S6~rGcVSDn3m#iuT|>eIo2tmNZBoG@zv`Qy$Nmx3#;%EDUp~K}fVoqp zkHYGf4u{rhtRNI3cmtJb#DFNfiZQ+nMYmI+5x9L_@@9Do85okv9^|f?`1;0yv}t_y zqbGh76c^!3;#B&h$7I+;haYRG#QgK!^*c`DH4CAnyDJ+dKjf}IH#7A@c~#|N>#i}r zf*ufOkJKAfj1z`b@_(}|GzvQxZ+CUa;pCT<;`;-}a4pd}t+6C5)|=BWD76@d@6s2-mMee3$GSSqtxM5(n17pgX|1j=QwdQq zAYck85z6~d@(pfERghDlvy>kuVuN?3&FVN8SP_){o$Q)LLG4QKy`z<%6|A?&6C0dmjN%5uGFL9{=AhAEoz}^9`M1k7XXXuCDjRelzni z*_-sTMUaH~6ZVWOqFhFr2{QUk)+&?J5KluD;PM#b`r(52E}S*akUNjDZr)nY0PrpG{iEwU z$1y@4ClB#zf-_7CXMGYElq)j^C3y_E_LbA0`#w7uD!&OwX`j)$8KdNKKAL~HXylb9 zplZ98+4<;}Jli$ALL&(;qwS5|QGZsx>WtfU%B$p!1&6Y^kDPyCUrGCWK|r|8{NIw% z{A)-7M*Z_5`T7~(Q(F=!rW-V2Kh7d%6GEQWRMV0X^e>{fi{q31dwiKc@dRW8u3G4Z_@8lLDvD|Wu+~ku;I|L5J@81zfczxYa$v>2Vp-KDnorl*K8Y3)ochwh1+O5E} z8`&DZJaFJQs3fQxLnSpoPzn5bZ7(aBjZ@r?><9qNTq3bk0(kEChK%Z(&RZDv+rS!D zu!hx4kk?kO2*oK#h5nUvy`+t+*5@V^gl(eDYSBD)RIg` z0p)ia!4Z%qJy6*dcOv-UYrJsdQDenJ3ZvWGf(fQ*$e$D=MlXkgkl5O!DOnQ0Iu8J%F>00@^E?~mKLm~

^zw>a5Sq0$}T zWvhDy7@N(gi-O~YR!H<=K-I%*a?hSQM$GCfMX%pcj$2VKw6ex;Ik7ULR=R#w`-T3S z5{MFCiX8-p2>=vKkZ;~as`?)^3ngRyUiGxxH+BjMNK9N*wKiXH>a z{tC(MbJF}F$la zDC{s&6n%~hW~%o6*9t`d{MhIWq`E#=-Gv#RhxMN;<$7!d=VE_?<($=mRR#8+8Ewd_?P< zKp`lj$8Nxl3fg0`vD(pkkbJPw!@}b8YI`6TN)w|XR`RYphtiVg{d~X9aSD8d{>Y>v zgGfiIc*8#M$o2H-bQjrwRJvPf;n{yqS}?XEIgrTixHhxM*LnndG!1SD-8g}NfZG;s z@F1hz(g&&ZrDjedx5S&z56|XKxJB4KK6)ZgN@M)bN2S=nQD=?xVy|h{MbNqj;|+|* zF$w;SQ&xZ^37)I=z8duy=7FaoSg$=2b7 zuJMjm{Ral%#Q()YcC5A8TQK*!Hg8RxhY;L39NZy0`V_oIY^)-pOnSYqanxsm<% zM_|ze8p#YJurY9n{al{r*vDDx@FAnpsrbd(4z_NOw*Lw-5{!goIX}l$KDWihLi3%} zV#eA1qE3kqYmz5ajy+fIuN=VhbLHeS{!`VP>s`d0w9^>=nYa6u1Z?fZJ?6%@S`vEt z$W9{_?MXks@-X&$Xl7T8slz&|ncR*4^@-c>MKd>gHpS$ zvo`e2`><7kxDT@P*iFu;z+&g8DA%*KQ{}O=-E-v7{E#NB3?{uz57Zk}o}QC#o@`-k zr!nwjnZ{Zc8t5O%Fm&_&4@oQC_x-eTzUh0eHNFM%NsFB9tkJ1T2p!#0IBBweowhe9 z{qeFWdqHOwE3^aW4VRyHHgo^}ew}U~z2uQ~oRYpikgC1@a2(7ua2?V@(uvC4H+^yj zV(>U$SVw&AXB@z2Xa1#TV8|{iHdZ|eK7{V=m?vNP7_`5L(R>Oj{E&z?c2aoSVJjjd-8ezxr9lwc^9@ z3ftrmTDIu^&}g_v5_kzKY%b%(D!w1Ql)-v-=6bFi4?l9Uai&QxJ|u~bMDKt#Mv5=N zI=SL@zr{Cma7LJilr8eyGSf1d5@?+TJhM9LaVy1qE(E}L* zvPLh@1~|v>!bqJB@(=Svhk2A?_7e`L%QZ`;9YOq$^exJm7g$x?5#DfFN*)$rjDUevf<>H0~#AxK}bl|q`8n)AnX2>eze6?+sxwf>Ek7!iN2yV8_! z6Lx#iIv0$WVX5C{MkHtb<<-=mmYtPs*=&bBtPTk{QBsR_WaDQ}x6r({3cn1{4lFeW zH1BT;#?5Ld@mj+45hpfXovVTn=cOSb{u-nHlpM@!i$Mt?eo&c0;$dQGOThB!IWD{b zW1T5RbYdp58+;!$)FAcQ^YWD}#Apq6G#*S%O=Qr? z4MYaJ-Mc3IR%^I;@a$a#U-6aHVG@_syhrJ-j+AjW=DhQYgAX#>m6gh)%Jn}VirtVI zD4qfSVOzF6DX)h~0J@z&0astq(Sm~ArQQ3bnklQ{BlFSdleOk)%0~NlQ)y%RaErV_ z5W7z}K|^`R@;{I7Ur`nZI7ZmJB-LwQqWmId<4@2bX}jqJm)b#a{ClY7{JLqTeY=r^ zsKAZGa3Fb+MX}nX3BK!)c<>yU>$!Y?=ENOf$jvk1#GKv2|7qFcxqLO&ksD9@h}L*SSx2cf3cp(}z{`5B(zXqZoM}nM%5LQDTE#{=@ed?OaB zTL_iG;Vu|MH6BPH-ZJJf+Z%aXO1wr^RIXdaVE64%Dtb}L`jo2th!U7KGa*6 zyp7s|u&r^zVCj=m?FoVAQQh%)GRl;i(v#*{gs0Yrol)Y$VA)ZY{f>Q)cB~)%gLkiT zqCgjUGeyq1zn8X)&%L}FOswH4F*MBObNXr<@F@fNcjms`B$XvVWYklp)BIs>FK{un>HN(gkyXXdL3TG>BU`DVP(zTz&1WhZig z0|ILtmd(1nsZeN>sF(cn&(KSz{f=GC_LmInx8=WFngK-7eh?s$PfFIXUyd`_?_ir_^Fiuj^5R`1n84w z{KQ4JhN5Tn2WM$^QVJ(d=GNTnZK;az4;#u2va1eTI*s>`lJI>M!)rcewn<(Ga&J71?j$=UKf2R|U!+d?BtUKW8MA=sn%%^*0)Mlqecb3?iz3%=1Rv7>; zx70Oc-A@AUR)=1rqld67x?*`}TFWD?$8tC@V5jH_d2$`Msa0Q*k^H!7L|o8@ABRn> zlCw?l0e(O59je7m_*2s(1Re2Q%|k5RxXwYxNycai2m~E<709F%N9D1ERI+`)X8s-tHRlX?rNhsfWmG5$dO9?tt z^wP5VK`A8iM*pOb4LNeO_nQh#yYCA9o3Gg|*A?(~GR|9iHPR&wnD6Tt{DgTam3ut5 zd}c?%Fb?$zkLhKVJEcn{E4`3kPp;tQA2gYZt2_R7o`d|_Y<+K_^HY*Xmkke5R-3lfKyRdzYpvfM@0sXk zk+c3fS7klDn>+WncS7^C&^Yc}4I!44pAXHP@Ozfxw%n4)FP{ypB^2e`s~rTx8nX}0 zZOQ?4H6`YL2A+ZpIoe*22trKn)k>xvh9y5rjmg_p^i7KtJ@R#JDbUy+7$|#Kw0CE; zyjf%Q96$NN`4&8bg!ht@5JM*8C#+{&fSy35l$5rY5Q4es>}CI;K?l|Ma$$o z@*@!oc$;5dC+{TEZzK(@GAT54(^(){99#)N&edC=oJDZsWxFz3+LKM+Oq9N;cREr_ zA-_y8OWe9M>fQs+IXkf^nQcF#wj+QXpE>DJT^5Z!wrFW`Z96KU0L;ItSaZ9}0!SuxmbPGyhDs2lsQFW|9l*vtoX%OZMm`=zDl%{KEycKg7wO4PuXA;V(m-G|mPrgD+{NE`jC| zyNJ|0pEMPA9Cg=ye4&W&JnlPw9jHQC*&MFyUM~JIYwlSeI1!XC{PYh`NLnC6Of0@mAyH9ySm4>V7dF% z?du@JYKyIKMQ}t3GxOK+b6#dC1o=d%4xqMqdzv;(%>6&vgjK!k-HZxJT@CT5U)kIL zU}0Ni@kt*1aqL+rNH)zXDmfni>3P{?2kfP-Yxx|iaqu{qeNr}A)!rT|RajH))yyU; zKW?z{Q-rB-KtG2)2c!t>W{mS-%N7#x%?>R>s0sqFoa5m#>R2{F%B{KkTs^Qx_On-gx z-K08O!vCtUz9#_uSzb_>)Qc+B)zV(BPJ6|xLAiN(3$nTAI_ieDD6HqxXjrXR+jf~M&=zYlBQ&TIb=PfttA9}7>uSrq83+zKrP*Bx@ z)a=fP$B{xkwSv1AY1h^Qv#8r9c?W;SO%8b3e+gbvXmio^%^OUW}UwiCN>lE{Z0*0bEA079<6{PSJv~Fy0?H@Gy6 z6^7g8G89^G=ogad_7$F70o5(+JuZm&7a#Csxz!dvqjnRh;CEo3D0K5GSeH1%rYgxh zMMegB^i@|o#^UChjM?T66#Gm+D92sXoe-c}wl_FN8VuG5^vv*(miyd>%2GwNr`~Jw z)hBcn+FPh$HRRmju0G+~Qu2G5_t&#x+Z0L9Gj?=p*W|*(*U`x@DeAK}l>lcC5Gk^Y zC_*X{#3z$=pX4@ADrZfqDKaD2cuyUBU!4Eoi|^MoWb*0Gh|W{Z`A$E4-__>+gQ@1K3k}{jFCsc-4K0qQ$HZ53m9NS{^B>+8n;h>J z2Qe!;^iERIxB2R$t1#EF@>cP7d}F8g>dz@EXmq9b*jGfLg33h0&Xtpk*1zKX_Gqmt zMuHOU-H3xUmim7U&-ySsWPg*k2!wiozipWcD{ea%k8dgk?&J`eOg?fv%_gY+7%^I^ zb4**92z=Q9;!#J3x-1)EgjF!S8s4we=1o%5`|#OO7ExCC(Bl>M>bb@4o}V@BD=Q*j zQ7}cn3=$eHlqADem|{pp0Fg=BQXJIm!D25yJHy=XI)v54$ek@*=_P+lW2=J@1%ilq zR4H8J<>$N8&F2U&y3Wdq^52R%*9oVzdx!73PCdIbgv+xF0(IUBceK(WVS_L7V2KP%2~U~GOytKGaoFs0%07Lw#k;6nEa z!;cRRBMMh^Vh!i|J~kUUX)O4{)|6u_@z{em2rBSr=|+V&CN35rYXo7(Bwc5mQwhiaz#(eMm4h^kOmT*vomgtgK^ zXFdz7eRo-Jx<@nZu}?L4x2Mm25_-RS3lRFeIYM8-6txQ;zXbGOn2-joO;`X&<11U_ zn>LX!g!dbv^6C{p=`}VQIC?4%`{3$d;y=;jZ8&OsD>C$Vl}3BWwN8(ih%dTXM+87O zH!SF7)(w#yD;GQPO{g16qg0ze_x#%Y{Qh^WT#y4$I#F(3liKknI{dmH$-JJtB0l z5|95Gd2ygqe=BKB5HJ0a9P{p-Yg&z6lq@L~87(W-=c>bkWEcfDpAjHa8A#2ZGq-zH z|N3qN0&-x|Xmk84e?yAeH$p0Y-nG#^Am~=6UsNU?Ps&~km7glKS$&zni6foy%wNmP z#E0a(giohV>%r%ZQ(do_)|OYf4$TMXQ;Vcr>e6fDu~G+BYs@*!3MUFpfw;2TTT5`2 zTu((mVd$UoYk*vQVf%jrq(c@%7yQS)crYvd*K-qwjqbDig|$KOhZ*sTh}OAkO{0#k z0YO3bhJZacnvMdQ>CzwqB@oU|kKicDr_VbTJibkq?cDhAgmZeE7oS~``)lUQb1RFE z9f;$<0te*4rWokyZmz~-VWSQ)(v0Xnyq(nta@RzvyNB&mW{{>NV}FyuWqEm#KU>c- z`*R^aia^la+fwSJcNn!765WW5>th%xp#AE!4T0Ct9gVP2E4uZLu5PeUbA&^YBW0%e zE(qP3DZY)PgK3?yhvefYN;y+g9FFo6K@j#5mJ__@kd}JifHRFh)zOhyWeMHh~NU9SyG~ z+4a?UDCp3{;D5mE9xfXw<3E^9B8g@C!cpb-+~%BbgQcDF-D~|fIE(|PG1XS5$h+YW zK%96d6*55qXq5UQce;DGAIC*22F@jAxfSp2!pqXzw_<{Kq!^!37Ma0yY&&`r9UPJj ziwST4h0Brb&H4dAI1RQ7ge*aF8#bP-t`3ZSO@h$YkG z!^5@duKTx^11}t;mMrW`D5X`;yGVHtpm$eYNKxEBxAcql*HTEhV#R2FR#vYoo7&}> z_=O?#PLOm*9^9=0KmVHI$CxyoAEE_WQYIuhcz)k}$(lte=mMBJz%o*r*_X6QV#A8UvA{^1N7(b*{>oK$n5VEasOgOPee@chLclL z%m=uZrjZB4!b9qHT1We88`5S5^-~N+@$=kJ?;^t ziSIyMiIXFcs#F%`2`m*0qMg8XIL0NT(tSSTR5Z_>dtgp=<+ugWJJ%#1kdl&Uw*tF@ zz8TjKQn@$T=X6@YU&2#z2o$8a0M0iPl{d(*sHeYZt=YBL&3vP~=)hs0;Ll~c7R!5o zTOj|i$NY_cMNiFsxct$F=+{8H1*h|kk0rF$8doiA)T%X6y!efHp`Wa8I)MWrf7~?d zOgD#!CWS$&H|y8TH-mM^AoC;TAQOE#xz*_F-UhdewXc24>Q_PhJr=bKRbO(k(7sf4 z$Xf?0!V(KTkyioLd*x}$^CLE^h~p-k=Q-@Hg()oMi$KJI` z0fIT>6sK$OGKa7f;>BP%pxmZBldfguM+aYF%_j7ra$D&358~vu1Sj$ePJndwp`9!Cf+S-jsu*Ca$49@UpohD>bQV9lQllN3CPtF-4<&3$;Avj3a#ej{ zB!(&LRj}6B9Hjg?EnegbMsAxBNd^P|n5-=_<%~02D8!N%`QQsF6a`j9xKMGVQL^gi z>{$WvlY=@j)M{HlXHKX{`YO{G#|M$)_ZFVk*Tn|%xA$K7)b(PnPK)t{`FK&e5nKiZ z@e0QEdDLgle)20I*`4)j#-w?Q2Z8&Xj$t@XGCL>&e+%Y$G8Cw|CVOWc>kZ#|=?99s z^AS0%o?DN`)74<56;eEWCyDY+O}wL%ROMk@_XEQeKr-X+xtNYz(n!m3W6k298hO8o zswMpLG{7Th&c8Bz=M`M7*s8KqN6Lf%yvynwKofkBX*JaPqyxv9&E3#HwG{Kc^ zR>L(?@$hhAm?lYk<|X%`ES&UK%c@GdfL2wGt-=1JZfK z<2x3$L?$U~@ihM@=G|&w`a8|7W2Jvb_$t8yfPOMCdE$LBd7{uC`0cW>N`m!b?DAwQ z%i7v*V5YC|_mM$D-sWJAhMUTP$^ydgEhU}1CA)(i%Z2O86=c|Q0L8yHQ{f7Wi8uOiBv2U?g?ttxjM90Vx^^<_;`y5r9h_Bx_{ z?Zo4j;{Wb{RA4buZ%ur8=%IcXAU{T^7!uI{4fZ!S&Tyz|*gXsp!tSs|cGkzFS=<|b zJXLL=rRfEA6_@RPNY&vw^l-)eyWZ4QpP*6Ujk zp!ZaG3lIfC!|>y+2l`-%uD1Z(SvcQIx)=d~5HUH|{@CQ^;&{d6Clccs4^Ns0Yuy{J zIb>M@@a-VSv6f}uIpqB$MGNptvyY}|AV$m&Q_KjYg@%NAk6pw{ z^80d%Dyq@PzPM{S&|k-1D_EMYG3m77T~t6MR-!RZ25k10ysYKKvEFL%FWV|!c9(9> z3q!YSmWN+Am}R3A_LZdH2CKfp~Pdm*S3M(@3=de3+X-vhN9Wm~z@&lV6y7WOm z#d&SE0$?&-4l1IQk2}Qlt&nq0+7rWLA&OGvDxwLBO=&^g!M8)j@3Bq`YXW0mGvCJ_ zVYL-VpR9n$L<(y`0mUI!^hSx%rDiiJaL$+q84+>4s*5LorX}kf4hXwc6azQLlWP+* zScnpWT!a%syxXW+(|g)d#oHeHa)$X6ydQiCY?9;sX|dVRJvF{>RypaPFHi7s{^a$^ zD-eeYGkTm?(I6r?%THi~2ImT@7o=K@)TI5;cKF-=Cie`3qIhV_BwWjI1MRtOyNtV( zZ(W)^plaLrgXm1Xx|Xvw(5u6PXUbVe&B%&=EM*++Lx1I8s=}WgxhBo+y&Au|8$tsY zN)O+0v_UFxNpsfaP88q+xs84K@x`1k0!=%f5?^54L2@b~Jn`MBS12X&)dKDpV_Y4R zX$>q({q?&GY>dxN>|%qU>Q)^`G%KJS0ab3|`*+rKeKjkbDT5Lf9LhOYds2HRNDHT) z6*`@BwmU%jQ&pF4A89*6GvYm%re97-*N;-?FzTtiX-1Um^12>xAcq*+YS|l|-WZd#F=IXj6d+z-?sB@kL zQ+q@bwOoUfv4a!){_6xf6H!nu;O7VG(=@E+i=`@kXQAn| z)fUU6MGJ%i7v65n+w)DJb&i3+XzQ+bbA2@~I+d;^W<4<-0aj5uEt`I^#N5W!(7BWoPhs zH2i0mw~p<@YsZGsxi&en#OZi%)4G(X2rsoWZXV z?cDT4Zv_6^=k|(68ucdSi$jpjsA^U7lcvX}q>h9443_+dbrgeVL__lAU)ImFcJ=V( zZOL^tPL5_~8DR==-#+reJZhc_%{;A@ubZ3$-o5*I;0p@sfE8vrOiS{9%>*?n zaaCtl$h39W-N%Kn8siL9V(!#bD*{;ma_PSkF>Mz5^zjRd(2e`Ym*2L-{-rWJW23tjE?n^Tv*ru+0L=UA24)-|K4515n942> z`*F-@OVscD`R6q&ll9K`AUL{fHSX$6Mb6`f4@M|>-348+q%8a_ea_DK~AwDOO>51IK{!vaDjG?)wXY$uf~0`o3Slzj13QnM&^{b4*W`#fHo#4iT9 zAKqfWe~T?0`?e{=m@N2JgzQH3{J|q3^%Oc z5R*C8Th?O^_e!1iw4e~Mrw&%i+H9pR#oD3tS-e-}PB2(W)Fa$sd1#W?Pqir}YR9`& zw9#BBF>(9|gVdUA4%yz~{(~W7KZ~l;E(9S~Au@LV9N`w+!4MOBK5%Pr$Dbi-{7p{v z+*4AOhJ*DD&mqDYQ)VZz+0=%7=hW^@X!@G6Z((eoFU^3qp3p(q@3$M>!7v#L(r_>4 zD67aMM|sZ-mA_+@-Ompo;~r$q?b07ENb_D|>K^t#=zY^sEV>;E z9rDCaUxHlmf*=wYt&ERu^&%)g8oyECHNXPDuM}gBVmW;XySECh6DdQXLnmu&%WWKS z<4jNoJ=V{D&~5n?AQHp#lmB3=W-hym?T^(V8S@2N-SOebcb06aja9K~6LJ}n8!12m z(yNC+=(cz$UxO3*8JTU6iz-($mMF+}i(Wu#qIg#fTI}4qD6?$Of|$avU3eiq;Vm#V zIx6_h(=W46YjhKHXvy~iFORcjz+6lqS4)>*A+V7>&SG`rvp= zD0zNTf|pEv6#0t$)923@LJ)<-csgxYD&_dewnPkj?(2Pbf;;zTQ!5Z9;jS8QU+ge5 zjPnRgJd}~-dfH-z3GK0a@44TN8dRt`g6-@=6{44f_$tT{75ikH-@o67Q`;0}Lf5BL zc`}}^q?#Fx{50b`oCt8>XDpdxKB-hbBXB{S52>cwj7Su_J^Nn0FKRE2TkWnRw9Ws% zfd`WkI>GziH9Z}Uc?>G(J%N1amAC77e18LRMyA*CCk%as8EgGb=g%dV=lBcdYAFsr z1BChXU)edyh0kx>r#K!yf-__7tn=cFfm}{ZnGLL+Dz%Ro z0o1NWkdP>@JIZ+1;nQldjsDis`�M&`!SV(xt1XiEEWwj6iVqA=a3P_qv}?Siqiy-*wG zFWk(Mx$VMH8UkRnUOmAT(d|a{1amdA4H~cTzShBYHmVghn+NNXHj;hlwwqo_j!l3+ zpDTPotR6}H8agRQSo$t~GiOXhI4O|hIo^T`FOlBu+`pse*tXrggXToE@|T((i5)1K z!~l#g3F-3Pzm^sG*tb-=l<#m((u+(Y6$t_PfwVG~Jr3UNvl>5i@1@Nmo@-!TeO->G z!_^+_Fn}l55On%<=_O1_<1ATXEW5mEoK?~v>zgNu*n zpJ9jJS&E&kSVta6P%O0u?9dQRc>8+5pT1 zW9l4!m-E+V=uRpAP5RN@c}N^5M(ikDGm4`UNQ~yL$Ah0p?T|^osh4jc_8h0nU*6fl zj|MRruI(+>+!-!Cw4Pj|{C}S?;_nUJd)j16(oJ{PvQdU7v=9 zyo+v%=!EbS;FIA?;(smlG#>>k7y4*@xe%ZGOps)qsIDsej?+x)EK`;7RAWPx&iUQktONxjv$BAFpv(ke*{P6dn%}Yp zIo(JTc|rjoBQx(O($s%C2O;~wfY5{Y2){Oks+ayW%+eDs;oqPSM#y7(hC^QWlu$#t z5H%I7bOLEJj|JKY`pⅇre{9Tw_{BhYOeM&MT|g zKMWL+Htd|#CqdRN)zRCYnW)oWy_FrxPv+~03nR@^Vm0IZG9A-u8egE8uRw$wqxB5M z4(z}e)a1jz5sHkK02W)YT)iv$N=+1)v8{QjOWc$q`hqG|5iQIGj-@gJr*^z5J6@19 zJE%JuJZUgKtZZfx@kXsC3Iv*ve<^nU=k)QCaX^^utA%{}0_N1V>Jegu)t4yM;u7MA zlwN472@7;Y@jdbn*B&}B4RUV%$fxHX(glVMc0KK%Ft$b8q-jkakT1PDQHSk>U{9Ww z`wO9qS!E97r1T#$s`vRdtavn2{>&rhs}(BHPjGo{Ubo0VIrqo4_=j#|NaN~#*|>+C zL%R=zBib|vO*EA{`sSP{2B8^jBH`2+^LOp@+3W`1O#D3xy|#j2?9r14!6w+HE(bzH zjcT~r;?+s$Xa|d~hWN*!&q+lvgax_2Spjww<_;j zjH;PE;dOiFzG3W(RluHlT8C%b?P5BkoZq6BCZfNn<9{Z=y7MYTmc)ls-IAFpFKScX zHBI(>G0uf~`RNL@B(}ra+4KTCkq-wV+AVg#s>c9yT9iQaW^&%!u`?T_2aOBp2)Q&z zuZ0Rn@7?8DmwrzA{T#j8;q;HTEyL8fi!;3EpyQX&Z6hjqalv2bQmSWk-@CI!4s0qN zoP=h-eMnCDm??BkTIt%sAu60zGb2Zf8~Q4o|6V{E{Di#@c!3diE7D zQ9cDPcM66c`L>NO1^b>2j6QbPp?B{D3m06A0_?(QB#!v|;Mw*p(Zk-8v{+jn3%eWM z`p6&qFc{Sl_koj*{X%(;RE^E<{+cqbopJBIo2vS8E3d%EKhw}TND0xgEFtKaAQ=#= z#{tIU^(&N>XI4b?z5c%P|1tI+&~Uxc+bBcyE_&}ov6KuDh=1*Jl3P+MzQi9ciVN}-+4H#}IHx#IT z$yC~>`phtqq$vF}yHi>_LTZcs4vOdjp@0Q?g}hF{vSViCqz^);%s{+1kHI8`#E|!X z@IR$0IVjl_h?3Kln+bPyqcNVxD!Y^6KrY@b>QUbJuhk z3bhV0sXbrQ8d9m(wtH1nkl}6U~3)QL8J5PC2tm$F)7n6brnD; zVp{uB#KYZml>!0{`o+St2#5)?^wTji@GbPWNtyN5F4eyz{|f?36fOGDdj7Q|E#Zlk zVQopMsd>KCVh~^jM1W4Sgu{*6_61=dbGL>DIN%WSh>Yas(KM*eeT@Y2w?#GH z%+Jv!)}A+hpKVw`bmWQpWi?As`}pjh0?!IGo^4J&Zz7jrCDQ%QTyhY9cHI%JrkFc( zz!3E3kL_cdPSS3=$Id<23rHpZ4l^!?kLW{eOATx^eFano5aq3-5YimZ8|8xvWX)Lx zq5%_R$0E{si2fBI7j5WZil`rJL9ss=K5&$}HwY6;DD5kPttgQOTHdpg!dRNJVt2jE zgima^x>$?lN*hn|UYkNLz-G;Vp7A^rVX8zLrVHl7Roc4hOfpeBMOIoe&KV|6F+b2{ zelC+J7;G>?_}KxKbRd9sazpdG zz78A3-$5bo6*+W)cc!L7f}RMjo?RXRkt#d^Lz#K*PP#j!M}7LfCI%-Jv_y`PcyAL|VOT-r!NxVvn3;aKj4!6_#bjp{EryU9_ns&j&-#8+j zawNEAKkrEnCsx05>F|Ei(L{G{5dTGfVrQ!}+uT8GTQNvrA#}0XxU{tD5xVSnt%;H( z@5~~v*uG%i;4U28j=k72PNokW39*mP2Q);%xAE=qvn`dj8a7nIODG>HnZKc2sTI%+ z-2sPz&o+PN%7@i(8iB&ToKb1@T;6J{?Jl?pYUnOr;YYTE-xQ&*I_Ip1O`H~6=yB%6 zt%sya2W^y|set2D-R|C|MCCXm@>w2T=1{@+O?c`ZY%x*MG>Xk3``k-@ygb}N0OU8- ziCS$CI#E7V(;DG2#2L6269FNEbi-A9LT$CbEb4y95)7ss2&|53T{}198QfCmVw9e# zs{-!G-1`so-Bi|VZ9QE>4%%1YrW}e560dS#dmc5K`247U@2-z_wH^%;yjD~cjHOgMwL_) zhGG(<{4nUVu#ImS6K7x=<5w1%DyK({6z@i^Q9?ywcRQ|rF5JmzR=pi}v)yf`vxklHrbx3``&S-O{Fg|%A*}<+jU#~cOnWm? zDj+rB{H+^Od98tX^URNVoC19W6ugeFtX$vRwfQPIF-h+&cZgxlopk_v&YOYUR7WU< z4`GVhzhaRFhbgRS(Q?_~nj6$*Jz%Tm(aAdK+G1XkEAJ3MQZIwIL}aPM&|Ndri&;HOzRh3DG*b6;=I1fls=CIx zOZXF@3}H?MDwva7JbUd5|781n6*JDz-VNba;fExJue4tL=WUQVfZmxo)C3&%!q_O$ zNej$IHhT~tJGSmj`+@OaVxOnniah{lAa>` zb@dxVBpRO)QZ{Dy3AkP9jSA|zqlW*t>t<__$l3`SXJGMn49(I>s>lqKTN&~W9kYee z6RQV}FQoGmv!oQY|B3pSPEP><=PN0o|1=nzAR1L}Z6DOfw9U-?S9`u!2sCZ;rd{uE zX4a-v8s&Y|5+C%lPs`_0wBfeeH^&?A0#M$e7(3rKj~P*k1k1p`WZr^$i z!1#S5D+Btp9PoDg5J}METr2!}pXx|HudxET70RNi?yI+0!^}HGB_XyH_+^k#sCosS^pf-CiKZ%WcXUW;l1aM3|(3%8!G16Kl^|d267s6 z0o0vbDy?Y)1z#C+*rJzK=nXC*zgSBpLnR4qo!sRVd>en!)+zIWm?2ZUNX5J>hEt(= zzedJ?WfLn%fCspa#Pa9=gB*Tfx$SQ7E=M$!AR`oisfl~vk4ui6kc~3h*?rIVmm~Xo zJNT4k_}W3Z|Neic`bN!6=FZ=Z2w;jghh|{^n0xLbe4MBJza6E)K7PovJ&{#nR_`Jx zrOU!jCIV_TgS+N6t|4*uV#v$2m;Z6sMkeo~n@HekoqZRx}< z1MbpXY&E-&^N%R3F)@p)w)~|sBqWyvtW{n-WHrS*Yy(dRjx!*+!grE{f`=a5tl^yo zpeH3&ib-y=+(T#|ekY`|&jUjAuc#1#h$3bT@-m(`Qtb^gPC&OGvXWqr|LhEd9y{gW z>^+}1tFga1{l{r7t-M_aGOfakkMG--aGZn;K*oDzfsJ3zto6S6op2lod-7YZ;ps`? z1{wO?E&_6E7Z6qHY>C#z$xqd7abp{5D@|XnB}LZ zR5#cDS`CWrK7&ztkmveZRwCCtW{mwaKgd_EW99AfPUu{pmZTq65IVW;2ex$3V!^kF zTFE3!Llr6`C(n1nw_=dfjeu6Q4{lFyhR9mtVoDl(x**^_TIo=6j zV~XyE{&xUuEnyCZigclD@63pU^T+=SeQV0kSauaB(S@iK4XAe#$esHK*u(u<#o)`& zoA--!A3|C27(=y-L&lq7E95&mWE`Wcs9k4m2DV(R`NWAgI}M`ir36H-)^5J`O`zgY ztP-S|gpISbeI%O^3v%z~Bo8OR)Tk2-8*ffYBHN*$T6pshYX$hbOW2tUtp-dl01}#h zWP1a9F!pN|;Ec;AEbhI$S*Q}IQR`Y>OWNvOc7DR@Z|Da=fzpcNkS!~2%?$E{lTy&$ zeD+I9ya~ur64bne!9yh@K9L4)Px>~+{Xc9hN@HHH`r*{Ukqu~yxj?O>UCBf6?l=S#w z{#yY|$i1_Rov`Kwe8edInUC%d0H2y>IEvm}Lc7&e|Iu6%#a{xf3liIzcG!?D(wC{Q zoeRW9MjI&3qqH@H@NMf*{GTO~9Zw+iK+%ovA(h>B4JUzxH2e&#XYw<%R&SXoT#mue66;2|(Nf-QN%Po5Lt!-$R@{XFyEp zcSIi<40r6LUP_Al-^wxfHUF|_huk=g9-WcrIS~pHuc}ir57P(Y`!Bfx zWzHjLv^*oj^62V)dQ|-J5`pA9gm*#`FIxQ=*@hC)*H(n@9EB9I&T;Cb!P8Ty1+Dq` z8<=C7N=?(I7Aqa!jc+U{Yrk~uFPkEqYDjq}p2q+Ao!AHyj7qEcVYd|_C3HJ|@Ydgc z*IV5+pR)K%#d+wPkDH^Ay}D)&1J;CVLMPT3w0*=@FHlgKPLUin zT_aivvh&*vGmo3T3j3oul6aHhi12qQxy7_hcJcxs>yK!_pC1ObynIRS%Wz(6c{h6@ zPd0Oic&%-`HpSi4TA{uQ$*)z-`{<(CRw3;zJ#v7reUsbfGKHmJ9Jx(CEX@$Rt4qkjgnh{Qu<)1C+xJaT%apdAYSS zAFB9a+DhG_}%!RLHU|vkFcsEpk#k#4c=amEDnp<98T=N%g z4j2Lz5_d%AR@x^Hr5GFL^Gkw-%N}5su>7RBl3Ho@}aq+s@)MUid)7f37<5p z&=oh04-*u2YIM`Lg(^IOrngMtbteT(QsHK{9A_e)>Y7FNoYsQ)J4WNJFUOHm*9!+H zGsFSC955N4gGu1M7g<{WQPDq~{;LV*D4=jm`6Azn=s=euD}}R7pe3WtR!T}Vrxvi( zq@c&PnC@rvKEO0fVJFNzkeSGsw2sA@oyYO8U^Vby&P7x0R~^*LrPvI(SKrXj~zj>JdHawBaxQ%crNY1$vFy2Mfmnu zSed~FRs-^E+fs(rm40i-nz%wuSCa!JyQU!*ip@tiBm+F};F7#ObDp7rcFqwIa4~Vl zi0X}~wMcj1cEShVy4n8EQo;U|vpWyv;|9n)4__KY~`%{|p-gODT?XJbBww6?s zEI}jQc>J!(HAP;AhfV(7gVWgcGDL3rYi~BBt~IyNn#reqxKdi|lb9~aURTp+Bd3)} zw@Zm5nuJq&Sv1ri#|U<(|m z&$A2mW$;{ni6Ipaey^!Gx+ic&FgJ~ezI3>SLNyr;NJi-3I^+kvH{Kz+$`Q?=Y~|Nzse>+cm}%<{ffK9@#7w z&9u*$`C(`t?TDguX!bNw9us^tJUS5F6=aRV_SC3a5zQ>{>hJf!zc|@Rz_3o4PTUa| zlSAh+6bDCR0I#q*3iy=liTw+(%=LPwL`qL!$DXerSD4r7$km^Ce)E-mK!_GEOq#q0h7%;%kyWKCjlIPZ;LfD#HVq9Ccb37(DIQP zRJb$qRRZTwd^jqGWZq|af>n$esI`Jvotc$w;1;g0D~0@)7iL?puX+;p1bQO0abMEA zVOiWX#DMSxkKo&fQ)fX)_6@ID6t6#Dwf)O|PiR}Zp47kQ)7ySfTNG()fsE76A~A_p zst!F4d*_ysrIht=Cp3VaSoQ_!nawA zb?4D*xc#lyOvs~XzDy5p1xdK-T#%Nj;@WU()Z)ervY6IadKDi*z}sA^_|02&f_(Ns zsD6L_EZlU!JvU_Jz`jC;+FRlfAln@os|=DxW^_h^)R4_e*5PNiL z#zkbqjoSewx1b9X()t?6h_K^GET8cD#9$ONCoG?S2D3;jR?soU^LC9CPEUamo z^-MfA*O~mE6=In%?1bzasLm~Nm83KXeMV6dfa=}qwk+5s5%GJ{1dpJmj!2pKXpvmu z0OY${BQCyQHkxfb(Yo*rJ(-tQ zY7V;z`KO?I=w7qJmdil^TN8_`Tm{17KE~4^199JLVdn8kQQF%vArGM}<4Yg>OBB42 z3GiQMbv>{Zso)0uz2{IV946imfQj-jvn8bz(X@IjHou`fJ>N@wDU!eI+h>IpZDG;(w^g$wLrr2lImnTMN zZ8fAgqPJ|LVMta-Q=I;9Z*A`ad^Nz~%=y&dR^>`o7@4MrLfUF1H{(ZTn10jh#=XHw zrw4_9Y;8ed99~?*%)xNBX+AgY9Fz$KeuJNm(R2E4Ix?nGcs!H|>tNGuw%!OlLG5GvW1z#;`$^`fVkX2m^1ckoY`Fv;CDX!!66FO`> z@Zy%vKl7=vGwcUT@eTU@)m|Ml!)=?O^Fzv7C7J9|QY?-YEIe8sH>U2te|y zw&cDw9?)Kr*hc@C$b3tUm}b2 z7rW0D3#H`7csr}Ci*=@t)9o`x+h``K?7wiGtU5{C|DFXPVQR>Qs!E|2*bl5U!XEf+ z>jyueVflPt-R!{+DEEiTHDH{!~6585A)%V4d*Cm1DApNM5J@!t~Fn7YNFC007Zkc@N3SX{nHY{4UGA!od5lKl!=hJ zV@9lr-bXO@Gcw>C1iWe}(;1UIoCwpu)}DH}|HXdou6InCpnvFLSXXn{Hk14ihtu9# zDT4W1y~^}>mDui6SJQ>vI+nfeDlyeYREr5*fY*u4f9nf4vFYmeE`V?6JvtTqW}4_; z{8w&g#@H?51Ilo{uY$Qcs88yohcFEcf`CK0WY}ZYx06LO%rh(<*J*|#A?psSmoq!s z40C+K*2KftOfD~|t}s51o5aeZWZ!3Fz^3dI)U4$GSFF5c1sshBn_OeOsBgSiH4`H8 z!5{9|YznHO_TlX{XIsyf>ygxu{MLWVgj1dJFYcI8P2aWOf#mCw+yY)b)JCwY&c1zF zNu;*otEvR*VDHM@Kn@BU^1bCWW77A`7W~cuwt6b1CIT!3!O-O@RSPmmtP6jJHdpMh zNtB)xK#!Fd$=j9=6D6nR^5*@iZ9JFN^OPyEN0g=s5vquN?rtjY5oU~1JX>Vu+S5Ms z=QqUwS4)C&&0qxp3m|}(zo2&pwK-O(C9VL)CaTW}Jb=jgE%V8ex8R$vUX!QWaNnv+ zBclHPtb^n$eK}I`9&-lBaZ{*)zbM&gxiI>m+d?s#O(Yp?;pQv9+xO2yL)V;ma7WGy z4_-eZDfB915W_>CfgOnEwd*lXcrpo4Sj%iB;=7g!Aty1))BESVqb|4!6=h93uOiV=HvcFv zignxRdB3U&H2|)05c{kgrD5b-79FrTd_=3(42Pz_6wXou#i1Fw0f7g9x|N}kURP;szp>JFH+e9U!&AIQrK0(zc&w%f-P zyj9ly;-w_t9h+1P`Kbf$;dcw){7xfS(4|gWL`%u5LfPm&gBH7>uX30tY;#3NzzdrH ztAIa$Ls`$?q{Rta=aUHYe;}e#fs+BoQt_RSN^qga)zTRXQEJscx+B0*_(nn3n`PZ4 z6x&$~*9YIFr=51~ek88Yw`q2N!$c>4FU{EwTG+*V0~3yUc9vgSnE`;mSB$l>^~x10 z?xG3!XSG(v%r_osDi4DEQ6^x?B|agaDLJ#f!v>P%LD0yKU+^)Plz@*|xv!?QBED>6 z-(&itso7kohL#Cwl-9U}p4-(YcN@5O$63FlhCQXIKio8`!@)2V&S9Htz3p8>C6}ai z7~)9W5q>@l%kZf69HGdm30!w z!61xK3P<(EQ+>!vfL-%0#g9Ksg4qR{{lzTVo(uxK{O#}cZCj_$NQmbJn9f05GcfLP zlA?f0s1dB>_KkLbKsH;OHHzWGyif@l4X^2(6LzJK|35aclzosD@7*{C)@1S;t)|Nb zK51nEym0H08JV&)l`UL)hPBB?fjqq5On&#IRS!=hJW4<2$D(Z(dGsh1qLJOg_%f0% znyrS!4E{QeaP4Mzjs*L3B4QNBN3+#Vc^K%yXwN8-SHP!hfCf>JG!KY-Phiy;>U#HP zLj>`hxo|W%#~4aA$yt&=M*Rr`#wwsZ-2tG=4q(7$=kqIuJ0@8zLZxfh7(SR285=YM zXtU>GB1Q>j1NGkxPzia1ZkxjN`U;90dVEbOaeCFcJMC_)3$z3nI3z61aI|cauRjQKX`j zds)b~LrJ)G5SlH#%yL=ga(MzK9EhN4O$1_bF}&q^)rxNTT0DT{ZQs-~o5u;&^SR>b<5ixt zQ<9H%7X-2bY8o%H<-7HmU(*?leNyO18hw8VSdu`VpmG$uI7ISZ9L|2t-3%`$h+|BL z`KLidT}v|1Dtx(EUaIuv4b&c`#v$BzZA%^&f87Du5fRK;tba^Q*)+iMokJK~Tkehd zOY3)caT=&?oyNa39lFA{SR(bX_anjNIIfbznJ%>Zmp!hk6J!r=PmN!d5$pv|w14V6 zmLg9C#sME0Wz~>c=?!3RN`@V&wULs3Gx`eUNLO zEn4Oiy^B?)^4s<9*!>H}`|VaDL|nCOTA_|Lud{smlRu{w|M_^M@>|OD`dnyQd7l8S z|LBg{Ey>F&-r&y;Ym}$MGvD8t6~Q-eWf8V2#y%H*d7+}gqkBE@1a)DocR{`j$8veH zJB14A8i%n}JkBCdolkmBt0egDQBSUwd(p7X3@3!B1X89-C~TE=?%vB|oWw-UZj*U| z8kBn>`;))BQh|?+Qw3%|_NS8^P*DJUk&$4IO1_W!6k8`0`S5C5nBn4ssjDGTW%G)Y zf>HBQxq>dLrS}!E10%M(GkwXg5jwXLsPREurSpz?jmoVf>r(h7;up%sAaLwXZ#raL64u-g1H(fB>C5` zL$i+NUvxML(OU9|dC3TdZZo|ww*X=z5U(u+{CBjf;OEb2CF^^9+_>+1|Le8tqN*^Z zz*jRaVw>80XxwCB`+s<~my&yxamG2nb>NbBOC;Yxq`O&>jyivpnN7;4i6B=7RJlSU zxc%;#Yi}I7VMoRQ8f|%&`Q%?S7f`P@@^gh$FE@6=6>GJ9sauy-&FNgL&+%?_r#@Fp z8lDFgJmRllk1eU~U3-wv7c6O!QdNPq%paz3E)xDg^nt@6=PzfrScoFStC)mrGXAfVG6}Z1uxWKhwA%x z*){l~NL*g|H34+$5XRdLo8EzKlQ!bNa{Vd}_$&^Gzx=vNwQ_(E{?ZiS<#;AAEL%Mp zv^AK=Ab5dh(V&~C0yLB{_kT*Y6UFyr&ghvp`?PqU0fB%5(Rg&8t-?dR-L%ZhApA&GR)MZ5!46of=S<1Wg=k3+F#+9K zhL_$)84P%2t0WA}VU8OUKgi}V5lKKdWRJ7OPYtlZbnu)XC9NSkpfWp~pq#j)X@t$c z78dvbJ4zu&8s>E0R0r)N{nxuUcx+Z_3J(>qSN_BPG|=?p80|Q&no81Z>s#?~ei|^} z({ImGKYlYTE~^SUvdOwS^fq^!bRB_LXp}b9XV}PC2XG(h>&CKRpPn4vj)#l#xpFSd z6Cme90r!tp2yfS0a*zeTB*kbcezaf*)ROmRc&2Rn9y26dm0P3!enzu?VckSZ z`<7Ey`}k|0ib#sP6E8EK4hB1n`wyyXRvVdbS!VlF(XW8$i1?mBU1_DUyrn9>%cXWJ z{jofG`($InPi@7CUo5;gP~*Kn!4-jR4;x_C1hFINvZsEH3Xt!QkFj(MNwq)nZb7Ju z2q&(jh#_Xr3TbU*ghXVM!#H;qo%d5vn&mkty}}PPK2?qM3-M8~LiNf~ zDu@U)gy4nClPc8`@WT~^6l?QmWr2u$7!l|f{uA+HeAQ7xx47(3cq0k=w)l1>Mcpq_ zy3uv*KtzcBhlXqz;O&=2yz9R4o=`ajus#An)vv>KR2%RQL(hE8E(v*=oHz$~z#Vj| z{$#6xuQn5LmGq-ToL_V#!CZqDY@^&0EFW^+dfXy4{Z}}P%$Ygvqb4Kvl3H#sEOwG? z_NYWg!uUH4d8ZVnl(y#kefqN~7XQG{aBc$Q(_YBy);8U_E2pfu->(+|X>Tv_+jhlL zyn;F**8{1=tE75O0bSU>Mqpl(6_8bhiC4~~UF<=9Su8mqr}shano-hOeZo<#FpFzX zIpxP4Wm%^a+6*i+PZtKyhq-f);M2WpcnHEk*p+#EQ`P8Uq6u303mA>q<-C{9jxHmJE>W z;xPh7neb!@>L@F)Z%XP!3hS^Bu2Qf}oTW(uCjFiFa<`TUTVsE*@7@iSCh8V84LLZ# zJ*(4bB2rsG9St4k`NdwtVMp*=FITgoxdNf|&C1#w)lV+lcFdlzpvcRzJEm9%nc%=(H$rPh{SFD^lpg!M7O zhZeF=iT!#Dz?~gy74#CYD;*#KfsgTb1^lI~o&~JmmnVHI=j%m$?y*XWP`vHaU-63V z9wPb1H$g=tDk%H0YcSe#w62$JShwmE)210b?)yQB>JLlq#`Uqy(>#gS98t|3*_atAXA|9+?-G>7TK_f?O6ksZ zfbh8&#qPFu?k@J%v`SL9bgyns)Y5iz#Dde%{4F<6dGah3{I0Bi$M+pK+X-8#sgiBF z=Mt7P;wAXs5muO;g&cRxF>h1ob%fF+y@K{1kh_5INcz(qn70RsnaY*roRSx{l?zo8sTvULE%~ddW+l@&J|azt+MT zex6?bm?!B@{9OB#w{28l}2|iv8~Ya+0?pi(4Q6zQG@35@P34 zt^^SIj?i1bo8Ql!8DzhIx57U2iiQAaI=CGlRv%d)ET!6TiB#v5skwa=$#n=x$ zIU?}f8h)a=$Ti_Hu}|4J@1N1WrFr{t)xy}E+Zr)vO8@cI8);r4xaQ$;k$~AygK(L4 z$=z5_(hj>E(_y=YpQn^chu%l~m%kh{lpXdqMyi|rv5xHbF}QI4S~D3=QMb4cQtP;t zT=}1Lfxd5fM3Gh{*!M=)*nFCpwij1wj!jz3^}%_tW$ttK<&S08WP;`1B$egw*XIQz zl;;@lL=;EyuJMpTtyZ6kuEam#x_h8I!CgoOn;zVJ8I3pGlX(N>y?s+Z3 zms-j(6Ldv+IPA-8Eo%O;{NT>jI}%7H785CzJgg_H+{M1w3?{XImJO=7-6pji-2&syzycq(Y)S9=w~@&9l6$KEN{yD{VUIt zvm|~nDZeh+DG3wKe|gmsi)DwW5}0}$l1w&hXzXj&pJpK^=&I1yR5cPE@aJI(Nx{l;3t8auX#dlS$#YB*TTve{zuVF2uo<%5L}=>wa0C7G`e zVo`z8+?Fn1?niQ_4W;{489mv+9VZ3Zm^ndxv3wZ*Qa;UV&Nw<(xLEg~VPuXfS-vyu z1g+iwxx7Bw&aZAalKP^@wt))gRN~L&o?i3&MFxFwNo#*I=~LX>y^c*t9I*y&J)x5C z)rx^)9i%?&m$fC;tiCwj^6~u-y6CvuxX#pP?-A`tAPNrblha=UcU%ei?Y?w+);*pu z%ch>QHvBE4IzRjZ;@Xt3j?doZY5Z)y@`b&Rm%qN@`-m>7$8Cu-i7d|L#mh6qyh$?o3b~5-bt(TU%%P`>RC?>PY@CC?+C|(5k91K7ja#F{?>(G%#%Yg2 z`Rhw1o4V|_4s^ZdY$Rfq#yZs8jwn=rb9o+2nsRXs=>4!}55k$0s`8T;jR}*Jc{@?^ zB#1ZyRK--SDYR|08@qMk;O%BV$@EFRJn~Dkw?*(;1+E)?#m+Y$%#FY9DPRS99^cy7 z$e}ll?y+koN9V*SY7y$NykKLf)E0{Et&aJsTAKa0P+!~JRxc@e{ts1de$|t$4`W;( zn@@KNZ?QCAE#=c?1x&g1rHUC-hNcmO$nPEDhIlBJ1T9}+(GqF@KZaSzL_t?(JEqY3 zT6d4N=`o_|+PIaPahG6Qi6?K-dLzd9MRRjmLIe7nxM-TLjSNbA&oR0-p55s(%(7b_J3c+Njub)C!b;LU4@ib!0`64s- zM`~CF8mogmEkbmzYkEs-vmy~4iFUe0Vxl}k!dX`f(mS_4|9)BYhlUzTc}0itfi^g5 zK-9*gS`t>h?^lq}L&Pq9yxN7WuI6dP2v-@SCNJrN(hgzNtCKl*uMN8NAwT9ekb zy)5fUKjc9G)RgdwIduml1TJTv!5wQm%>*-xv4ori;3}L2I~KI#3_E~*%BPo&pCZ-k zXTx4g!)+=FvW`$D`E7Abe`-;-f<1@Koe$mT$x%wm)NYLLdU0}VtT~=vsIMgK$nG5# zj%IpRdydU7ZJfQYXI+$Bq2=X?$$Ro|Qu@Y*K>urP2lD-XqN!CJxT?C+pV6mpm0{6- z<@@|#@u?-c@xXe4?M+-PUZ37nHnzMAdE_*Mls}j@pBXMT(Qd7suwBhTn1TZct}s$d z*iGMiZjDU~Cl#zPZ>COtf6n7FsLGS_TvPT>L)9+x!HgulZVWZjjOuvqrnGsdc^tW} z42jybpGGPa9rW&JrYeFT>ICMhSX0*BkuUrUR(?%h=40d0rl;<|3+Y@6F6)@cGf69ZW=+FRedUE_KuwM9 z2V0M{iZgj4gfN>9-_p50bjC~YEHlq8lYP7CA+`~<`Fh+Vr}-v34Dys#RkbJkAMh(P znu<9u;!mXfkZeqB^z7*s&>4;C@|}d??2Sg1~cK)qPdN zzA+L;vdgGH<3Afvc}hXbBrOrtbn8m&5C>>%jDNt0R%uG^7aHa=Jzbq{te3nx9O{eU z$+TW=Tkrby5Q;>%V~IeQznHE9@}#qVz)~(ZtC#zl`-KTD1+6?}^!6lqk4#p_5Kgez zOSTq2Mg{;P_U;KpR1j)PXzMx)bfQKeKJk~0+k{*1dA&{=vGU2ToXz$!Ke|I9z||rF zgZ(yS94J^1qvP+z4Y+!>W(fB+(l#{g0iJ!Z@KR-jpB%Uu+GmRJRZ}NigsfPxM~kFz zZ?eFAFMC^!vHYkw8Bwj`Yr%(PHC`#-AD!CsQ7#EV8 z($}S(&Wg6_k_~PCRI#?a|E|0_I%8kwA%D>IvAmDLLtS(xpt3jh9WVQZf)Hdsj=0;R?xY6S!_N3*{CjiGvphp>r zG!AawEB?*?met+H{#8eJ<6kZ%jZ@6|Xcc$n^R&vvGvfGNHQ6V(7XL~3Uon202(#Ez zo~c_nYsr9Un~=9z>$c5LnJ#i?6TdF05?wvLvDeUmpC#Wl1I!tO55I z-14)w-eo6a0{!NCWO?f7*DG@3Xm{M`S@V6{#O7-5bo2TIK?;?A*zsEb8851TJ(j64 zePpStp7V%x1-sqswaq>6E3si0=sbh5Rfb#Qim%IK^N>tM@@hsGAOdM&#HG z<)+|Mawofu8jt=5Yy5cEJihVyuj@@b@}+kA&5*{;4{TclB6<{F#CKG>5hp=XH&y?? z+Fu0nd|(EHx79b%N2KPbR;YHu7iL-SOY#Fw%DDyuHIC{DM`<|zN?vx{Fw;8HB=SmCH*1a@B;RXi3BV zhcV|?;uKDF+&(7fy(9SR&az+cz=^~2`|ps~M)z8!egLog5>gNfwk^wjyjA%r@~t1^ z)^%S^jM0l-f7j)F(W5vI>{Sd|itZPIBg<2(>o{#MqC*wwl#Nfj)@Kwnf40SX+?!vC zik83jbeVfklWNlF{inQH+>1CrsgM|@8t@{)HZNkv7CYUIadw(6#ybQrt(QM(S{*A{ zHDsn^EUkEw2Kr+Ij$SdIYf)ZajoVUp0&cNM#wok9mshRicFjTqRnO5cm16hcbCkpI z34C!~`KUR$Ms<`gfT48#JHxbKJHfyulRQp04fyzd6JbJIR-kmsZRCJ8_CKNk%tpjL z(B+}%@6ZT=t1LUv*7@<)#c-j1xqAf@@;rU0w`+yjE^)M1Y^VR~*Im=bzjqDG@#_BG zH7#!|4D@fFH#9VYp^#944>1WY=$i{*wqTY;DjhU?GrF< z99D)CFI1PVfb52H(?~^|(>!C=5RH=!r$0)(v-o>Z?MHcCjOSlsBZT)~LNgAK{Q~NdKV^o*e zyPju1vvZA@Vi|ieHkvh8-<(Tv&KmF1Qmw$X$LV~lcNuGE8Zkgl|I4j(>AoF& zcj_Y97`|>{qK|zN@ksmy`tTlRft%+t`m{Zqo;U2yVG=quU5{uq{r2^m_2bP_Xqf%> zn{D<>0F5G@+H1wYOrV=*&4;-`Wz(8iMS0}?+-z17g;Mq1lm*SggFAZ0vR9vm`s9%Z zAL#CXsZOcd#X8tKLEb}V929!_jm-1{&s4py`LuqZ*OWZ$kwx|gBX_{{e-jMOVzSGJ z`@Vy(yN&FKwmLcp-ObrKX$J>)D~&BfxA}R z^>S$Hq$L{Z?*Mm#QMSA{N5&4$<0jqJf7V_sb>iqmQe#i}yln4{%a+Q5(zbCQT90nv z-}W%lKl~^iO6s|i4`XTE?R}DVD!}~FnbVbY^~66l=#=aMX|4&V5zpJAtyge;AiINQ zxj!S#Nc>G3by{+dlg!K^{omi08xtzM&vqz1Pl=f2!m1hfmh5&_vr@V6l9Cfn3rXc> zn9A6?7=eW;vTKbsP0C#KnlnX7?XSK-sSL^X{+aK33b+cM`f4jOVSNfWVC*jPoH^Ey z9f1w;-#CMfZ0nN)9Lh?R#xho9@Q`x^RFV6+kSDLooSq4j?A1z={ z#w&P7#E0m?xcz{OL`g01XgHUvqfv$jGs!+XjY&2#a9N_IUvwpk3+!S|F!4_xr_OAx zG7fCL$|QG@%Bh%!u6?6rwnivifT31@>Xs3aLgFuS{&WqHFiBNdw|}Aox_s|?qMJ_V z3F|SaR`zNs#Iqi)rpABGc`S~Hcw#UpgXtA50e{T(?&++`L}X1+f5GaAkD09RiQM$p z*mOOY29DtjfI4;YKCM^sXjuMU{_;_f{(ShkW}l#-pvj^{FT;EQ_`)KGLoAjbbj&QO zHq&i0<75WcEIP1#s~R(QwaI4r?Ya29PdBp~g4k7C|R0kZR!ewOo)^%-{6^sBZ%PEGsQ`oi4@ zHFO@AX5~jhI1js(={Qgg#WjjmgRzcJhGB$m+;2)6kJpjFYjOJ~{4chW3XSU%XZR-Y z&ZS&cfcegc*!Q@!D*JI7{$=N@IX6jwX$BHIJ6^ zr@wFF`j@&9-QL${`5mL74f2z^30r+5sMLRMALCo1f2Iq5^2B}5swI<2Tg>za9jY)6MnUI8h6t!Y9rOv>bT{#>3eg!sFPtNWw-O+ zq{S8<@I@+^asJh+ z|84xh5Sn;M2Xe(G+wSYJ^o6bx8)%YXPYDsA+BQXh!Gb=~Q83P&-f(fmK4qW0ZF6i|w461vMfHFNb(!`!>g7>DH^%k`YAb$yn zS8zTRY5iA^7+mi?59f?a(`}mue#0tS0T^9Tw-ov9bLhUOaaax01IBbH3|0L%A1|K6 z3GW*WTjhzi*yHcgc|_)O)BoHG8Nm1w?G%#MCKjL0rS%uI<-=C1WKL*>psJ2l7NQxx z?sDXr`!KOBZepNt(5bBwgWI`_PDbGB^SlNZHX=HqF{$S+MfIdSKY{mSq}(xV zf|Kvh;A1b+1sy$;=MPsKo7bq;Pw`Kt)joHe>gs!c{4$2i{|lIY`@MPp**r2% zGnU&Dz6YEi9YZ#2jp`l_(W~P)lHud9x{2b4&cV(Z%YO8Q8U2Q!EU&Z5?kh##xEusR ziM2ELhm6r#C62PM*7Rf?HP2HP+1?w&AJneFU(ffG9D$5EuSCRf{durpIq-_QnC}Z! z>Z7aqAgB)U2_+? z{+ScSW`xy zqO8!>G1+E*Nu5>h)L8k=yOUv5*07GU2%l;Dgzhs!O^fp~^aW;})LP5F+*m1pRrvgE zLS%-u#NP^P3w~y1CZzy`@0lhIaQ5B`e-9}E|8lqJ@zTm?%C81Z%k-X}ee-enfaQ8Z zY<3IIvMB(#`xh)<>`nSj<%)$SYk)NkHI^D)T<-M@PHX0U_{Gxer9&knxiyDpXV^o0 zJ--fxiFUp?muQN>+xBT<2A{1R8``i!1EUw+E>!${pwU)VmaW*ocTWR z_=CbvsAJ=@sXBBFz1bOVrQx2d;jbbqqHvexiq33*ZumrkB`#c-U1rN*Rn?;#O}4kA zLc6~p77$giRswyy@DEhzZX5@OL=a!xC0rMJ;16Le)BhfY^_LpU)qyrRDQqzJ(w6J+ z?V@4TGSt$V0Yzrnl9 z188w14H;#m&wWU6{9@AFv+s({lxhI<+@m_5(XksJ{L+H+AOhoDOurqxUVU?|^Etf@ z&DBm~k^icML-^^au&^OPNa=4mY0eMl>|M(oS2KeG6a&eC9<5AV9Je%t>VGmFTb2wp zrOM+@WkJ|6blLAWWbLb)-HwG`U%wE0c(N1ix7rOat)-E@+!O0K=gzch+)em^@P|u+ z$XChk^$zm$w&oVA62dop!GbXdrNocz!=j7MkA3Im?M>F>GRHSWEqa*j+doBl_w|Aexj z-PZX0bPs!N;smMW+MK0wlHiY1vp6;za<^XJ_2rqHXf?leI;njxm;9tQ&ZRvAt0H>> zUTDZ{1xELT6QxX@k|^Dh2Z8+Na3u!6H#8w>pi2jZHR$Q}=qvTy*8snmU&fjSR z%Ym)G1+SNkT<1;j?|B8XFmKoUq_Kj3s}L(T zE&X5GUmZ(Rr+J1!r?r3bw$!rN&;&?xv~zk$bGnrL@y7Af!Hi^i^g zSll-s5xTc_npV@8kCGSP)E~RI`54qC+@173$4#_P?B5+Je^M804ovpz&~>YJLpP?| z=Ck)^HfGyx2mY_a*T>r)p#Iu}1RW`2Z<7(QZc{HUX=|@r=2m@{*g6HoafsNTmI?<=dN7+F@Ev`a476|&*tagp?rpm<9ykbot_*AHYRmM^G!|yin8k?4z6`Xm zW{B0AHP$0w9v=NPb$x%uWu0C;NqCjL3mB(n{PGxLAu!{fvFHd)6*nvwvpcl7up7FS zXhn#(nW4I~@$9Wg{wg;B$#fJVE3cUaUuQ?t#SR*>Qk;$|oF902_?ldW*pw5CDJ<0( zf6*gQwZE5{!Yr)BesBUgbr(`is$3_U$6v<3j(08lbqZ8cv!v(yP!nf(uWW}E@JBWy z$o-S6U<{TuuI`)Qa7Y_+tErguYw*%Qp`LO82Nd5O%!az0bzk^i` z6Fa?Dmkvq3(PVgn_XNK%37xmn;`?wf^zGwv{*^=w2rv+NpFd5U z=$|Q+&2!*gHCHLkSa2~a%>j0jYyEa?wSCg9McZY$I=i@d3Ai%1GyUE^Qyn@V7}DB4 zU)spvT%5&kr9lBobLLZ<$)wYd4`%g`6dvd^3g1of|v6Mu?H8zv>#nWq*3?8gEX$9v6s z_4cP-R4vHc$fu;cpLW@P=64`FAI>=hj?yT!<6}=j&~N{wYRL^xTpnC>eOS%G1n-`i z@A&p7XP27z&D&4qulUkL6|Olag=E^kew0!wbL1=KJPK93mhw&KLd!+v{Ldztf1FXy z;O=5iYF(n<;Y#!t*5%{y39&f z!K8QCw7r94f(R}w&ZbA)E%5{`Btkk%vEj`o8X?D!S?{|+;`3ka4PS}dBsJ7r0qVw? zLFVT}V;-Cr`WVQ>(QOz3LK%5Ng_2U`@x2U>3GJLy{?{xI+Iqsyh|dCUGEozt1g;x%VF-St(rw>QFmo)wk_&90!$_70+ zIn&jPvS3_ud&}KE$gLjz$Zepo(5aoixmfX!KHp$PbzP?!a-jPJ+Jpo#Je}(+%d`xr z+AT64BQnJNj5jZQa{Zymwi140Csp}sra+FNx2323ZV~tr)?dl|xiM=(n&8kg*jpg|9NOT`W>8f3m#&bGh|O?qhprjvsAY1U<=t>+d*=>*Rn-C(WSnGTQI7O#~N%+d5( z1M8j&5+tvYP$0&mPLK6aBCmd4o|V(oJkNy_+kF>3*mS)otnkN=t_#pRYk3lfa1W@T zTgt%sv&9T}7$Rk9H3R0HTuHNQb2lnWMeb8;JBghv0uY1$sGQZ|EgecXAM3nEm)$gJ z7!D^Vty{#HCyU*h$NE&NU0ud+i0b(-xP{YB3HcTw$0u)tMnzX(Yk4^2bJ(kHTDCoQ z1A4=8C8J;~02fCqwSnB#xy8;2NK4L>a(ZRW(5CcL(`2{b6j2Er|Fa9g=Dr$zxI<}q z;dhS-s(Pwj5d(*8yS@pYck>cMX8oYoL1hh`&_{c@vzGYXhu&}XM>K2(gm2?LmQ3fx z*?ur=KFS99+mwSfLg#(w(dt_*{aw?c+y-{>k4;9WeUkTYl^717cJ9dU#02>XueuL| zTEkFW7VDJiV$GLOv6YxBZHZprjH(;<*xrW$R|}|6IBSnIP8UDbY-#nOm808M3Y#s4 zH!Y&`Qtem#P?=Tuyb%8-o!baDBc(Xq_}eQyoV%UGOCxQ>XTc-;cAv9>FxwR6+Hu}3 ze*ed~@S^)sJL%0-08GoJ@G-j*8D%l!#fmZ+f=LQ?r_Pw0Tq;`uKmS3KeB%85kF^^| z`G&qPCH{>1H4C4A>D*@+7l1oyPLfuK&4HLr>(!(M8~!!S(wAn{sB=tAoXu^qT0XUEsXIJ#C70;IxfTyZ@a_T&l}f%BDsMDE;#p<>llf zm$l;ThqO*fmSx$hutU_r4hs$flljdM#=b7D5jbA~Ece9$K9_lGTCFk3)YT+8YxY=H z1Q+xV2juH!URz5S;w0sid63!+fV}%Uf-e=b{iIgR(IUp1g`aX@4VXC7%H5GT407R@ z^=3B!f7MmU<~{q~pu*udSJ6!tS`C)uzc~88+RiWhVZ6`IDgSXxb$}Qn0Kk5|?9@(B zx2O0=o{~~;Dr{rQoC%>vS@CO~`t0Elm3kM=t7(CY!9D-xUMD&Cd9fCHZSy0r=Xg=v za!>$IEH}i%7X_|;Pdw1qgXY9nb#_{zC(cE4r4EYkc>eb2gVpS1B7;tHvUM+)!H|38 ziSWWc^-O)Mi3e=tYcJGgk*jl$Mr&qZLx6?m+-DISh8+I5X#mu_r0z|i-Bi0Anyp)>${2$PT7rhX57`otCNqa?O7{;Sp$U7jLCUr)fVIFKN=Fi+cU|T1+NiFTXKJ>sd)a)sX>n@XiP-sYT=91E zmVWxNhPM^o7mB0;&_Q`j3OI$btG%{dSkpkrV@>CLZ^-HQ_g|%&dee_Ym(TB7KX7Mq zk&h~zJuwCP)n6VM05n(Z-V@AuQ!CE3WzXz-bj?WPU6kBj64awt2nx>^8uF(OHN=m^ z4GbIi%qRqfvDJ+?odH2y^RBCP#~RmZa!J*%GhG!sTr+ovN_Ue%AobCw9?cd|q(-r? zzAyjtD8%K3q?+{+b1O~$gcxmZ2&4dj2S)m?ur}{CTO?$yS&_GXUT-9k{e7#LHh?nV z+XXNm{u2`X+X*ksXI`h{rDeJIHez4d`S{t*Mlw=wxek{zd4pZ;1~d-XcON`TN&A+J zo1Px((q&y77I6#)Dq~g2CV3atzZfYReNNqHmYyA+aCeFbc~!nWd7vD==HZNp{nMs3 z#V2a{d#5kjtkx^h(qF_N33_6*utI~RVj9D@oHgFn9G}w6&j5|C9JWrBagW-c<1#;+ zDrPjTzU(CgpGvlyP8~QEgjR*7(=6!;u65224>7c^NxNvclhZ}vK1k&I z`i-2D24z$dtoS z>scxs*EkHt-&<1hYPiU zHXk$Aw(nk~QfCol3X$9@Ceq-sxEcV_Dba+zfe&BYZl8hL8dBXuf~H4>BK%9y)f!Q9 z;cEDdqtYh#bKn!ye1-d6?5hAQP8M-33I10JhTL@yi;WNV4tHzTms-+J-Xe|MTO4M; zY1j@TI>b2Eg%i>@4Y6=>U6|;~OPH^$u^v~gxG9I+%rf!yR$e|F8jD3Y)LP6{@*oo+ zo*w{YdXeFVQD3?s88Ju`(Yy<#?X?45T0T9fIhz)ImsZ|kO2lvg%g1Zf*AlCQsA)Qb zl-FXUP*<@mJ$)cz-mX5LJd@6P=!)IeElW06L}!Popxw!3Pc-!HZt{_c)Ux|fPs)go zD-zBaiWIQzB~#0ltc~&d_qtI|@gGZo-vwHuoW&wkD8_vVT`F;A2LJ#4YGtOhKR9{jN!+!8XtNoZO5c z?(YXyh~;DmC~x;RudQyQS!)u@qVV=MN@@+TD&NQ|^R`qAkF?~i$$t$2;szguM}2Mm z( z0E|9^AR%4DWz!)9d4b~YsgU6UI`#ry0E!oI)cB)b@crN%VX!F9ioaJc`m$1@nolza zrO`^g45g{dl}K-0-O;r>Gk}6eBfFA9M+jNU`mBhOp)I4{QcWtn@n^40b-tHi?OMdS zXmQS3d`v`*TWB3W2(!rq#B#ptLAvMha z7^h({S7At7^gKhu+|{(1?Lcd$kXPnkO5paC6!eIb_SFvwZ;P{|3#6n$-39O8D=I=) zF;>K#EJGJ8`&JKkd3bpGBts`w-{+2K8fu6A8b%yxG_Toob<@ zk~Efb{A5g0O5g+9V)9lAgi%Oj-^)A?3~QBlz|H8mvyd3YZ!5MY*-8McDy8&!b#jEYfk6p{n{L1*YJ|7)6tv;&%m~yNk{cbfQ%#kbi}XYD=P@N&dY= z2AXn@v<3<u`5wBqt(iwp*6Z+YYW9d)V@MWn#YFWlrsAmn=|cu^qhrG2;}g{XJis zoE!yQ`(h8r)Zr>L1F;V z8GPfGq^Z((zR&cG}zmD$~JD=jQLu@RfaiMJ~4RW$6z*%Eh*DC zQe6Kh?8V&fuQ8H3%T==*LmGa#pyv{;R2L)@U)${d3w$|WgssGc0R;Itb}U%ivk&g9 z%~NP}yxGkTtlv(Wl@($HY%|u9T03zy1OHlEnnM6opH;`V#@l6$j*jFRjDk3+@4)MC zR4HQH21{av?P{gosuhJO5jx8mrKTnC)Vx(IrB(ws{nC%^eHAgDHXB!DlKS}jr#pjh z-jBXYVkj$c_+RzRX57kW(&=u(#!VFu+uAJfhCC0DsZ*bCXg^ zRca@pt-AHSvR>f}wYj-O137ktK@j;^4U#=iHUwKc{-c%b&6%4GP5ugSCGt|AYEG6^ zA(HAU2wT;zYDEWqX@!{3@BP*_-O7}RmKr+#>zNWotTb|RBqTVP-`iWhhnZQ?)xFLn zG;NdrWTDndn3&UY$|e6>-pt3?75nW}Gd?gV<|Fx{SMY|bk_9Qs&>5XdEBM7P)g3mb zD{>*vdmjuW&Xr3xF?yPt@2Y~rvyO;6F$B;1@tZM5Nr7~8-&lq;M;`785Y1heK~ zvdbUx4t*tGzI>$NaCuQWsh;kbpPq&@8X^SKe#?B zl}@IOM}{lHgd0j`*3U`QXm&XWDa!IJ863WEM9lR%VMzqAofu+=QmyTev{WKUv8|Dyk5oIP%8&Jl~z3i6IS(s{WG% zFRx4oC=_^TwtkJ5TXckbCt$vtgWG6j{`gZ$$6C7)b;wUQe0eBdDZ$F!dAmZW*;hfG zx5|&DcWL6ajS=C~ubYVUg=VN`hGtrP?C6Sxrdm|HcjasxcIrj1vj^p@qs%A?42gxo zgdis;9ZQW)j9Y3D7!uXap7u=}H0hIxRkl~$is<@aoEk#LOBI3`tBbPNoy@96b;a=; zflS9YpG7&$L#+~a= zyH(okswv#$&99|~GJ45jy93jgA!vcw+9i|5+y~kzHZ83E3Eg5BFRX-ANGH|W8%gO8 zXU)*7>3?@TPomNYs&mT;4clI%XZ{{h^Q^o(GFPn~P(e4{LY{gf$7=FUPAn=@VAx8& zW3srQnfCl^Ae;O1oETD5aorF>>2k0|-Tz`#UX zFqEkK;b_qHn`g(GH_!7`_VOT1fRVF!jlQkMp-t!_W`^rR!Mx1oZWJu-`JHIdrY1~Y zPt-c2OFCIn76{7;|Ahw$vHi0?9BL1esXV2h6{o3>^}3hq@==W+m+Z+5*jx>e$Qa$9@Mcyhg{>peG+U>gJTbzK9i64_tvk%Fg?ij*`ZM5waQi*MBs#{aj zhVWK_`O3zOqcv zQ7I~lCX!rww^e7J6xt^_2K^F%(xatdAi)xtgaPHkd_{%fV;=Z^C~N-_D+(IxsQE*^-bbHSE+^#&I(L?Cu0B0K=TD%>BK;j}SJwXIX%ElK#x!HxJW&I! z&ikbHr|9vQl!wV-nt?=>u2)>IF+V^(`j5N_X zF-Oaz*2TmQCov62Vff19QD+pkvV1OBC+Fv0tjx@#fo^kt&X|W*x9l@z>3PJGbVm1l zwu13p?#yYm*rzz-!$RnU97yb@zdI!G^^6s$4;(#Q`Cd@|$ui~~@Y*iuJ-Fp)Iiqvz?Qc|eL8=&q3}nvK z))H6Ap|rz4?^2vq6c*A?-hrk83&Iqe2+ z@^FZ{`r-+B+A%?VF5l-DjsI%UJG;K_OQ2Ix0q38JeuhR z=Ftt@SU}0F|IHEU&EpZXA-nF*TDrxs5U-Yuj1h>8^d~OqQZ-C@r_0DcSt= zVQA;Dd;ht2?OplI=UH7R^eitZZ;i#IWPCo(pK@X~h*A-%{2jy0V?9fBXT5oc%V1Pr zN>P5?KB=`DztS1eVUOKsjZE7sRP->|<+{rFa$ZAQd3yn-`{IQuC(`Xda?{1L!*wQ% z7CfMu^r6~OalXv)*;0AD~uJdpcHWnwuC5`I}(m8qX6~*s&zDEJOJgaPRHxxf9P5ZdW@dqRZ z-fV6N4Hi|w0gi_)?(<$zjdpe;3h4lCH4QUaEAjF0+Jig*lZw9Jh{7^2`rnM5-|OF2 z_&)iDb_#l3^-L-VM@q+2^Lk6q$nazQp7_g$8x(AhgcooA@<5G2Nygq zN8pkjNm(fIR>h&5{#qttIjOpO=OoJ^Ffe4Ir#w53_3e;EJ*$FIEj>g~WJAA5R}r7L z%gd2`tW>AI-t`!A^b2$}(~R8`O1-vhbN#+`7s*lDp>_$v`H8VU!KdbpUyJt}e)V4d zh<7XoFM#fAuQPnE=H0yx1Yzvk-Wc=0MG5vxuEEt;AFi4PNwqY*8}vPlO`_4m^%o=O zaOrrCbF<9HdleVTN0*j+#mQlG+C?zefO`PjGlW-Z^mCQDl611E3d;YLWno+d0Pfti z{i1&zle>f+n{;Tcy7j@*54WEpx;?dRUdZwlwB=l0;Q8Tvkf8T;MnxGI6vuXJ=_inp zuMe}*j8KsxC!dqu7z_7nec{#qtk1?Y`0JC4GlmmRK2}E}8@+yd!H9Uzh_I2999X|3 zB_(BkvED9wvqdd@Q=7cw`=<`%HGjT@K-9)7(?IJc9+-mc2XJ^B<>9a}( zdeT9>&*f@teZ2e5`otge<=s)MVS$YU?d6k?{aPL?EKm9eGe4CIR60)8DcEUA$$$}B z%h)fF2%=E=ASEHO*sc81{w|*K@*|;z8BkUEoQLXpS62rbxf5!W#wi!BiAY-5=`z|x z4ioW;3AAAK$HG0aH+t&BR8xdlH?xOUc%}NSO0ZQ26+MsT=#r-6HVYhEYqf=^6EuIzRdy`e_rD(W2&`6*=iyY28*r9Kb%XVR^A=k9 zwl~}49&VphLZ#O-9}a2~D&3qNGQB`s1e6g7`ck*0e8^bay&nx2kefo!{Aii;mQsq( zAwFK^Q*elow*irK;V#v{tul$t$_L@*m}H}%BU1|409Ggas6Zf0Iix8`_Ju-^+1(`O z51@VZQr-9n253Fy>w5MTHU{1w_~Je>`TFodo_Ay-ni)Om)ZjTEr&@T}Njkb{Sm@(wjqtxWDg8 zL+Brs$!{>UK*DoBF+Nx2O!nPTmi724lIM91_>JPYWV*oMSp6N@tpXa7Fp*UeaVLyl zD$~4%E_-3A1<;wPa;xvs(7+uv*L!ZSmVMZINA)9Im=6QF^jy-FS+-YU6sidH^RO2e zhllvy+iWS8TxcR!(S6Ys5){5owGa>8h|A9Op#MmmOdPBB&wnHr`GUXMe46paif|jQ zl&fi8eg)dImpf|j#N#WXu4(F9Pxvpo%>dS_VPmf=q^qv|ot@-QJ$0st{GY7pU#riY z-Y;fopuF-oEx)Ek0iPAoBV~xdtCK#^8!zPZ_w7?%G&e9leB3X>)CFos;o4Du*_v&x z;c@r$`~sW$aEFaHt*)&ZjvoQ~Xs4?F38iU-{<1OMcn*r<*(Bd%o-me zzHC|8LK&?bUXoB(uFKoRfx-c{&6|dYw>P(O(;$rl%$9qLL;i%Vz%nj!E~e4y4^Xs0 ztOO~J!rd~Tytd_PDqyexjEIWr_w}~bY)?p64{A51F`BGePWl=X^QNG|b5f4GbGdb0 z!-TW+7XbHf2+4^2n%)eM)SX4v+atH=tX_<*;g|AYGP47R?ul4*i3zSk)ws*ITDHsb zy0ND7IM>}s$8`+yUa6A{V$~0mjkjd{e{JfjF`IY}Lt2)TePq+6{Jn(km@phK=eZ!C4s^=6b<2j9HLM zqdq;nKvntI@GTS3#t<+!8w?{k*UoH}G&`f!jk@1l!ls&;qm~cgd z@#0ngrfPFZB>391&YF6Jjr!-q-oj%{|A3M&dpeb$Wc#}tF)v=MD9zDeax({c)QjDJ zwcRD2A`|==($%eYdr)l_SYFNy0G|myey-lIrZ=Ls<6czeA@h&l|I+N*uA7OO_M03j zBMIr@=Ag^tewIXyjIa#K^m7#72%(G|^sZef@oT2l1_SXu=5FXLBr_OeS=-2N#Ix#f zu&(;>OE^<^RXAhIhrf@@{>PhdqYwn)XuQE8lc`mzo7+YbdI3jiQG&MYN?b8F*NzmO zyw0rM5?N88VanXCturQR2qG!%X~SXF5QmjniLT78j$BODz#DVJk^G_Fw1#fT5#pV> z;roF=10R*MGJ%JiU8a2MMTT_dhE=%Vsq96t?K8bp@;*BW4*%5I_%E+yo<`k5R$(>v zm!o9FgcOo?;+J0PX%F7|@Ie|vD=lA63z^v@n{@sI$k;+0WDWdy)12UwFg~ zH{=Vv{OD*MrJYYB;YFl6^b;M%C-puQ)vcviz>Oeu;}7qmCs9cMxz#@&{pHn<1sZFx zc3n{@e$ta2V_Ewiq4(LE+4s7OVNKonYtf2VfoT)4wwMEKHs1g?GyI*AmGu;%L_nnR zXQ6}qzL80GRzhRK)F-4MsSrq9TudSH5BMOj+`b!im$;VoJRi-|PG76f%x6&kwOnZb zl}yf%S2)8zu9GGRXG>a^a&f})nQ#d|TKKL(`*!AtqvAnICG5-s%O;=&gwm?6`aU_iyBc{I5`m=W-vp3u{Z@_~1 zSF?($Z%_hH1kcc@kY-TP4}}aKEStqT#@y}?!w=*x-{7pyy$=prN^D>K5L#*-KWQ7x z_12E#j=9TkFyXzi-Im0`#|5pmJqa~DhFQGxnim20Kccfp7zQS4+%V$46{QgsvLi6PJO8*eU$B8m)Z|`ox&z@z>3%#l~>>Z>MA6pl5 zBK`24Q4RykIs+3VoacMkYV1}>5iv=p0L{_#KRQarWB$|<|Dx-^7oM;;H$u?bTsPgD zAeZ!|=Ly%7I(}J@Ex_wp#z7HC!o`SUWV;84pS&MA0E(w_{u>76IpA!|peb)3-v}HD zbT&3Mt#4ooNqkb|F&)h+O>#ddt*7MSnS)f+4;HMz1`%O##?-V>!P)sjud=d|S!3W( zmlAOAEq%+qBk$xXeSPN0T~lMe_{e0dF)(BxXZ~>eB=m4uncE`s5{?ymRd(sBsx~R` z54;djp#oYp%(XJFC?J1CL3!pH%IjB_Z~`R8?)iM#8fN zMwZN$^I);*y)$zb)eF+X2XE{bN6EnE2&(~+--78#~uhBCJ z)o^tpYZ}_CP9Eda6;%~~+`v(`u-pdwnoo09^g2w?{ry#(KIRnIC7{f*H!b7fmeB0B z&3`*w!dnq)PwTo%*y!j*d$=fnbU*g19&fba4!W%g%=S`D)UkJRdKG+i>V6_RqoZa) z+ZvVM(q%q)ARQsH`$U4+w|_ai)94Yg4z#}efZpELE*&l!`m&SfITmPdhZ~I>c8aG* z|2L3uXu$KKqjn$Kk^OSw_WpKry__kMV*Bu-`8Z7L=Wp=#npOs5*^*;Lh z{hHafP5p9+6sd=}Tc&7YXTq#SK*Ik8Z@!YR;=m)sP3h3K%X)|z{jNB>zI1x)W=vPA z!mTi?O*h_CuF&lkZ6oTD>X6L4z0LH_*QKvlGphfeDq;Y3j8+V&NjU0fM+*Ip@8z^c zY-W5gCgfR>ZtHUB6n?AY;EaH5KW7wT^K&*wE#jM4qX>{XiLlDFQ( z!odjwmA4VXAh%|lH*ZMBJmRRgH<>nkP}i)W$nxYlNl>;_OjF>ABS^i`WpmkWjF z-d={Q$(yjWm9Vni_+ibf>uYs&+?~%Z!&)Tcb-uQ>kzE_c+02{cH}Tm=9M`_i{|gRSzzjC^()flS|+fbHb!9!Q6Ld>Jj&m!L^w#3bC+Ap$=oO|lL9Vw=w~ zXDyrfKU(c(r88Ji|8{h`-oVleX@#1`uNvn)h$JOr@g}V2JOod%ZLCp+Q)e&oi6qzQxIg;ZMz$fuZK*_uyzKF- zOT1OKO!!dc<~+Q$_FF4-LU%2%3>OXVhNc0HFQ_=hf6C)(sgA&ws1@#2ADlw6?jU6B z3X>tcT2u8;kDKud9r!G@a&&Zb`;>)0e}2Ne4WQT93rWC0vMyBDA4GFs_HE()R$Rv# z?n%nxb^GG@%96K0?0=(*JTCn9xMX2UI)OyNyA`-dU)V{B&zec&O}&awfoGb7=rYC? z2NTa-KPQG!F=#^xT1PB`!%Z*Bl(utAQ&ZfY5fX3+XZ5!47O4rT%8@HK_uOb2wm@9Ip4(~$Xn`Hl53E(%!sISV%Dh`ld6BxOBZU|{)2 zi#E2w-`qfh^HKw!`|PB5uz(_;82dZ|PWZow)%Nn-{r^(udNLGZZUMDMwn{#6x6ou{sm;mB3Pojx#X5i}0*X|((_Cv_C zljqkS%0bg8V54vT!U8G`k}#(Yf;CuX9!EuVou^5v<9&KcYhJef^o}^=kE~49_q}_J zWm7I(!SPtg6|ormtsmW+1y ztn6xTEK8O=b~3L}zz!YphOWqMUe2hQ)dUZ%qjrJSP^dOZ^Vr`Opg9X1w2X8RM3lW;9jeNF zcCvj0Ei#AL`j(6oL^rpW+&8?A;Z_}l`VRu=lbJ5`ji@lfTinNw-|;jEPpGiRW@`nI znOb+NJ#+sY{GXys(|eK)((cJ6{ODru6b^JsH;k647oCUb?X+@KfNfaH;v1?xcpN^nTA;GHmj0 z?u?kv)v;!*YDJ1Xc(0Mp6mL6*`+mqQaUJAGkVO%y{s5V^QfNUQc|R2wr>ZhY@il*m$8D!RCZ@ms^zXF&5Jio zJ_-V?;~I$s;g>y(iUieMgFJFB{z(-PZ&m~INh!KJvNXh67ge=Wlauk=?)Cv0FvauW zdw`cg>)KuV$z$6r>%}^$^BT5DmQ29v4k^6fr~yi#6sc@Kf0%>8G+rPgrsVU}W2MC; zHRFA@D%NZDNXf~>TCWDqWlv-3zefBoj+TdqyB^QyR?2dsq!uRF$v=Juva+&zi-IO@ z03myp1#y0WhnwH7#-vP&KBZm=%EtAiIWbv@?!o_1N_uZrE4sa(R%Nr4)hy_<&$QqH zptJdqP!PUJ^@jpN+iGRd4-o8FkXp~~<;={f_eZd#ht(*gYSy|HiaREQ43E)3HM&0-&v68M!!<+LWGzkbb|>65Nl#jfcwEf4b!u;6b>g&(3q#!PSCJ=o%ts-cOZ*=ZAT+g*E^)( zB&NXGPr=n#M7;Q^d|TK=RrU5@n3Z`$P~dA7{^04QSb8TZSmkPKy|1X=XyV+51Y>?a zF8n+z;?kUZVRCECENRo}C-oW<1CM-(jPu0WCX?qc)>^ckyabKxZ{1h84wgUtQj)8F zVA>vlB&7c-1ggcpw#J>jRB2G~stSUB8Xbo2;!J6H&=y-%LLiV7VLu_*5P0}-I`^v! z)T^SM`fldmuapAk@XGHtx1ix8W(aX`pkI^GmaVkM6|^IzOP|)wlzlVK?ng3Z^8cd6 z|1gjoPVl=!3{dlJD{J*3uf2j~Xp!Q~;V-1DtStA9T;|wgADna*-@Yd><0nR*pfe*$ zjs`WSjS9Ph+?);UO@CRlt5Te&YAqXiyg!8{4LaN}lh>;mT2uCj3uR1YA&wyBCJGZG z<%a)5)?0?fu{PbpK@v1b(BP7w!QBb&8rzD`JX_csWiq}K7t`E(*6qP@mTp!%HzsE=7HRtUbLSITfw3MAW zf)-AWJVMkneon)TBfH}6HnOiVe!RWg(Lq~eY(xv8FDEJ1Xjj`MHA8t|# z6r}Jq9&bu!mc7<|?||hqw^GK`>NAZ2Iv=CNg@X+Q-y~*}+raK(Y87ti9-VTI&ub?S zZ0IEyc-3EfaPzsn+`kv9b6jkA9ynEt>B8)9=AqneUnIOJb4DM!>Y*>k%oYi-d#k3&)q`?rZ= zhKH=6wKfdySDv(du~apze$PyZSbY;;Gob(@>`0L(@Un;mx~CrkA%!#|n4&ap&0YnP^MTPq5HL3T^=X87!$tSXYPFlWiC; zLZ^o%b%mP^tN=;r9k%RB)L|pmZ@bKjn4wUd^GJdMR{>i@o z4b`*~*fQ1XE#vmaU^H?P?lg?bo@wH&M3TffEaLWaSj=ZeLb?v0%5DiT3$1q!8>EK& zakj$03EUDzH{V0KXA{wo*;LE3uv=U20dZiG8>CnG+4BS13BF${_=VhCyXhVEt9t(4 zgWVpI=IeBoL}nxy-MHf^nWlPoP^cfP^9r^#eH-}?x^Prt0sn?tiXrDN77n8t5PlQ9 zwi~`JX?Hg_{Y`e;8>gG$;^}9-5AtlJtPW5}dnW?+-rNe8NSjxpSnRH}nddg;9&=Gj zL#qE)F36C0`Po&>vPMMB%K+AnvUahQpeGf_T{T^rOwZc5vvZzQAN z1Q54Xdj{$Wvv*59ed{0!MtiT4NXzQ2@S99{JAkZU|(l$kq;}7SKIa z%#F5RntLPM9U9l~j<|fPK1CLEo1^Pfq^t}C&fx?d&VSBDTQW#bJw*E{d3EiupEH^} zP5IQ@C`RA(Z76b%M6d~aP5aKNdNPFKL{Zs#b7A>>BNCBuIVi|53s%&`gQMQYV~AdxP^Y{osF5xuE@UP+S4@ z!OM6e%}fhZDt$y}8y^j{M>~4mcde0;JvBJ6@Fc(BA~Hz46*h!Q`G%!;6$kk${~D(# z(!%o_?FPV9y`xlrJd#cZR)+oh26WZPctQuWX~y%Bv7M8JRM^G{ z=1D$^+MGzc^Hm`uSC|`d-`y?xmd9DU9Ie_dtR{ z$y3k=Bw~V&4I7)wh%5zkRS3EsKicRo!H}iTADN*c{)%3E2tub>8(`6D-v3ZmPEh!{ zH@aI&zWUQi)JFhL%UC5|0)F)TILv=YoGGpnfROr|R&q=AG5#f+4hsA7l-XH_(enlt zD)%x`dzj4;ds4ZZ8*#oKu%X+Cp@QN}!)M5An$lhnd3IuC zc-XAF^ctYzY_qk8K?!S|Z~tv9dEr`XrpKV}SR5AI5CMtD>Kv%#?tky6p!=uQWk-eR z_(Drd{iC_Mz|Q4HE|7erl=^e_ea!ceP4QoCE$%Se4|fFp(y?Hm3H{87w7cCZ+PD$2 z1huiJ{X*<9S*y`4;}?${Js_t&uWEtG@&7}?rkY?+R$K4p!@>}q);IR|K@Jlfg(Gm* zoiEOLdDt?Lt;jZ!Och2vMQEw_GhF2PC`Lc_OyaZhve2{fpEr+W7${vu%~dp2G5jY+ z$v(uIZz~!gmblL$8V_g7-w@@lAILdSCM!1*uj8qlV%KKk3HQuNGu0BkHS)GK#2f1~ z_}gb^A(_V3Dw6viFt>HfXIsul8fFWi2~DwJ2DZ5=1~}Nu_FOm&)%BeIG*LI^T{t9! zSdnxTF)68?E82KkVECX_{=<5y=DjTML!NNw)-dYzUqYSlqc~GT7|Q4dT|UpVRqa% z{v@H{MV4F->E{sQ(&kKStPC$rJyzK@+C3=`LZhzA03WVHcg@C`$a!Bl!U&?PTHImr z>%agLn2(~mD-Blr7L;$pZQ3JG>H$&d-iqrvU?vkm>h=_+9#zE6bRm28Jh9i$ zwjPGE8)31LNWpHVulEO!EJsyA$Sppe?yc~rz1f~8iiy#iZ|!8e$+z8yTW$pxc*w@+ z-@|d8W3-mD!D=o8nq4RMq*esV2e=XJFLoA~-tKmx zEMGxaX0x?5`nCN;|4un%6y9h$Q5tF4_VF4-CK9>Uhrf&*UHd5Y6hB~2u7zI_oXCIW zLTin*2-XSS#}0U9nEgVP2*7L7A-*?c%!g%+$^X~Dxl26Y9c7g|t;!_V88-}_y*uga z)A}pGUJUHz>p0VHS%bP8sXn2om=VwOO(W$FZ zg*}ix8f0tvcQa~>121tVfC}%HS{60!>62N=LcW6ums7rrxP%1!{7Pw2SBgqZa9WIG z-Y!A^u8P+C5Trs?o7PiSZo5@MSCbVl;~*=V?q(xzV+J?hasHD`$~VGO)pXx*8h6{_ zdc#Av?kmUk`GbK|64zJn!O2g!=@uojK6}`NtRdZcVRKalF3+A!!X=x4@BrAchKQ6I z;Cx(W_7Xp6s;6%J{^7%t-K4wpSjj#MwX(*YQl5qPob1QP^@~=a_ny+;e>Ipg9=M|z zu{6yeLVB#oJ|lix=x07+5D&>b#I0z!(>3}bL{HaNT`SUz%JfyoqyNnXFV zGUg{^e%r)XlZdqwY1D;h!FJ2!uy4Rf1m*Y0AofAtLI+m8Ql>P(vdb}Eyg&W!nE!NN z)XgI5N z-IvFk@=4C)Z^x1sUlTQRFnc!Jo&`0iKU19lh(@JKwt9v`;%Lzqd^gO0p5hkB@i$}J0i+8a(veY z^(PWzFD^yr&W@FVUBfS)@A!>%xQjSaXt0HXQpsYwUGebnuy6Vqja=rpQd3J)w&;*; zJd2&iiAY$g9qWihxMEtiiqktnZi#=&C!62`)8xRtNiBa)Hby@Tb- zdJbcA?D^QvO6Z>&jYrcII$0dTeeN+X%{UG2r{Y76mOE%!f! zlQ(>enHpM?)n@IFMr9Db@xhND36Yu#JsdI4D2;1&p=1VoW643mU{Hf_D)q6eB4X(V)GZcr^Ckn>2{7IHU)zbJ~N|fWVyfXrfva(HUGy8;EPs9U2Ajz0;J4$ zUkpvP3$i9asX+QN#Y!ed*>@im6Hz;>=Hk-ZF;`4MvsE>euX_1tt@-@xhM}||lI%S` zuw!0XVIZZ!6ZxFaq*8i^4GD63dU~O6V1P<*)gY(3))LNpy*?7p5Kl;CT0SlXRWY$0 zEp)MS8$+j1CY>Iz$+~}d+&m(XHCW5ow{S{S%EZ7jOx);tbW}jhl^QrGwv*1r)yj;Y zw~-Rx6tD2p=(HN8_?|V4s%x|P*8D8dVV@T7?3)=BIuIUYg9#XsaUUfSGS>ZY_mDu1 zKI4Mp9XC2URLl~~jX z0o}jP4=zSVM#Y_(zP|6maM>!Bi}hdEBKI>hjBb4XK42ZNwUpMJY@O#$NIZP*-vCL8 zkOW}M=3Ua`J0@q-52ZKOgxTds$~1Ue&e6yP81L1qzEVh)qFCYSA6I=2GqnyK)b^zu zT8j}mo7-A&I6!K>h<7auSW2S4EZW4lKcTTIi=JPW3i&!@ZIdz>W1{rU|8scos_iG! zb2Vnk2&*=2QDFjMwqRwQ-LLAQWkbDX7b1nt@u4(<@fW_%!U&3X!fEE@Ikx7pFE zUd|KOIM~(;<$7iAlwohH2F?YlExjoz0*fY=QtzjeHG|yAh+b?dXbV~8{-c^53hcw@ zneLSu%CQ(TwzZOZ(4e(L{EjNL!`XPAuF*o@A5{QT7a8oJVK}%HD|@B-UT%Hb26ezm z26^gU8gvyhaApF6Af{cmPYEjPIy$)~khcA7bj$1BTuaUsu!~Ij?VI5!*BjyI`TkIC-j0yxZEZBVmrsI1DE zcdbU}_jy}U)?-4E&pO~_FZY9Cg&L*i#7?fZ=RZqHkYz+TYr}+_=`Y>c&OJHLX<((D zpQ0X{+qm;ZXSZQ1J&DdUyOF4u6;ybwX{gqfUCYU?i;RY{N^OP2r{>F7vBnu5IAEQI zZ~s?QfWq#ApT#yWk)#KE`b9B3(Cq!EB`f1;5bavF+cKo`#{ z2x=Kdwf?6yXND|W@13NdNb?*{bxmaI>B>-iu+C!mwC-YG*GPA^Qq;GLN27qcLF>+4 zAaY?6$-}k7;W-2^yGFJROC@Xaejew7bCnYy1~{MHi@O`Cw~oW9%o4qgMn|DjdLLZf z+>|)%SfQv6fdACY!RuP)PtZW!S&E~{e(7zW%Bgn{MEwz94-AqVVNU|4 z*>+=1nsencb*}5Q&rbk=W0)SM(#5mLu&`@6e5bz9vx=%YT zcQ;TNoWx+TaaQT5RtoLM6J)tq_5DUzSa@^2QYZLD__%b(iXu85Ir2F@oQFDs#M=r- zVf!jh$y;!D#Px0BaD%cjCH7wJE7kF8|MY&K44wid^9(4G&f*Q z_+PPARyFDdUEi}&yp6ypj)p5!j7!pd&%Q|TjaIBA|8%Uo<~b}qDW=gkP=pN@ zuV09T0oFus8izwyzSj}T6hazDJb)tQZxG#&(Zse*8DSKX6taige_+qE*A~aTlpIby zVpv(}G5LZw?>c8jneK-)}b1Z81o!C?uvSvjw`fp|z2cka~8{0w8Saa!uzZ&#j)=z{sYYSxFa3-=eP zJfYYlHz%{{pgHxQdy34hNEK9MsdyIY+yhC%a3#O$@vRGB{ad#*&C~a|xieF`^jQoE zK?N;t*#oa7KlCFd6{(qShYXYy5g1lV)S`{SS{D|)xFUgg>JUnnPVMA^2{`7?B|nEh zOzTO_G53o>)kzIDOI0IXY4}vmbN?xSpX5+lu${sZX4sP`8hy@mWGl|;?jF0 zI|l#LFn0};#%?^}V0U2vNa5S5k2qJI>G5&b^3*^Sb#9eYw_ZN~XbomGaaVX|284^O zpkBgggrGpRiCaiY2-yAsGVz8+0^_@O3KJv&=o7N@G{TNYZbl*PJA05ZaU6GMb&I|=z-EwmCJuD?_rB!64vvA}~ET@N;^ARe;8+&T2Xl-@g&9C~S z{Fw*+U88KgP`V*%NmPum9rID$6)jY}_R=JfuC9pYEYSZj4>EjL!Dt=Uz>pqQnW^WdJ>4nZr{ikNdR>aGJyo+D zn~dcc*}M3_=Wx(5a!g9FI(Z@zODVEyNzK{5=){C@p^6^6Zfa3O+pIVz?W|;)qVAg_ zKfF^wyi#Tx3nQnTPm*deec9A|pXnj3jr+a&B9yW#X3(FVq4a1$noRYiq+-z=+vdC$ zMPk5KBYjXwkZEbou)x({C0K#3p<#~s*U~-pR<{K`1DmoQcoIby*Mv42Zs0X#BM5|J zOdjbbje{)bB&;Z;7`VrPUB*G<&E*zi9OhJbHb(V`%P~Iv)n{-_f=Y;Ww=545eSf8+ z^2+d}FCx<_ZK?SKAD@RjwB$U5saOa}ZcGwfd9~Lj>R01II?<_cJZUFo;;TQ6Nv6>f?US4#W(t`h2^jVVV`;@o1XMZy^c5!tqoR5_ojk7E1@a`>Qc zbvY?UEtcn>h5ksAS|ISagK46F<8ghy*dpEiyV=16BZ1#weyx)We0oRVY+N?b@S6`M zv0gfS?xmh`^guMLrYQ@B7th<*2m7s}24?A;&>=&yUreI<9h`z`8kg^^{BH3=vTW!{ z4n`~eVU~_Quts?kdBEiKw8RFekO9sCs;)+OBZGuz;C@&5yIe&K7*rF*bx@1W$X>8- zX6E{?VS5E8+vYQz7>(2qEQ+Ng$o?koVY=aghH$QnlUhP*-iDyR0R4sDeah5*f>sXm z_mVKH5kvovNUVmR#`#PgaZ67s@Bpj!m{>UQp9*~=sEeT`Pj6BvjqMeHE?m#EkwLiz zvPI9HR$;$m!el0z3P_bbt8{`5kjZd6#Jk?qFTu#x`=v;|960{Ts(v>ttL)>FR(?gJ zvv^2L9sj%Xm-)V-72^)Ii%hY85cfF+G}UVQSw*p;QSbMf*3;8*xg);PL(F~Sg7dNU zAAn+au)eO}cyNn>tU?En9dc@T+V$Iko~gNC?cRKi``Kh(WAW;4E##YoS}jxnwb9qg zc^?0%5N>k!hxsken6PK|=J}uXmw5sJjloaxJ%ny3CWSJ`E<61V+h&KrI%bCxkfyz96UTTq47duPYVy zjKsk7po=p29d-YegX8NFGoET}K(EJ4xmUr-Xn$oTxaZ6U{JpYPJo|b%Vni>#Uv6+j z&Ba+M;_`lJ%xw6$wOfXfiu36F&eYGsgBMCx#pVu>ZS^CM=i#sN9px$Y~ZSHS3>@aMe{oAK= z(vg2IyZZZ_W9;K*M?E}M*L-5dGEn=uczC42+;8B>K2@?)k6OA_-zdXa$O`X}w4G#b zsHDl%IPQnEEzO&&A*)NO+<#ofyK=40J#K(&vtDJmbi*~o2vYMjmuFS0-*wuk&6EtS zDO{2@7Hw25J!m91Bw=;bX+Bjm8moW21E9tf~dcQmCjgd8E%s_ zVj>87mvirakuDmmyH+d6bcR|vfSk|$b&AL@XTIh4PBY(u!WlY;Y;hn@#oXo?pM$Kf z@U@q!s%8wo1{mF+tfyx4)UUa$dtvjWtZVCvjUv!D?Lwc7E(-v%_MuPinKi$R=}3%r z>zRV+a`x8~9)eZb&feK8AWj~{W9ofcR1^c)AkYmtCgf^lwefVI#(k4Qr>-BHCXjc< zhRS6Pq-w)@F>^K%v%OBDyWXM$AunI1z1>%TZnA7(sC{z!$YhH{P}gho)U7+ zvpV%}vF-lw)S+pf=T@prd|eASF`s*Ba4R`xJHRNYzEyg0FApev`wwk9zIrfQqT6dXS*@ ztfEFc!X7NPoEcov1;mMxr&xzFr|z8Wh08y?mS) zVX_cRx2V#y6IJNYgVL3u-XAD#MMA!G-UlBA;}-n!3z%Hj%K%X(R9{oUm%+mqcG?ad zE`!kOHDo4<(<@&*PI-mRmijXhhwxpmRGB8n0ZJ1&q*K$1^JxZgH*`AjY-5- z4-1;#)my0yUTmZl5C8Txa$mz+qAx-xQgZ&a$d<5L~i(AB(v3z28Y^f8?xy+o~MeE z7F$=guc_(sxF?jfe6`rkd9W$M1Zse}0I8V@Ys}pa`>z{4>QbkxPd}`2(SGq_HRKbn zms8=xp2y&%sOoh($msx_Rm|H838ndk!IA8?DjDyN%&!^9t&2}}$(HyjBt{N`8<6c; zx(e_g%}4hY;%6^iwxT?{s_=lFH{=;^d!J#*u$q)>e2w!T{=^{mJ~boxiw+Rs!AH_$ zEq&soq=f7AY=Ix$w-VK>$iKH7c6KO~llSkLiV(Q2{^_V1N62m{YoPj|5i2GP+^38g zbPMUH{T4?detC^AXCwU$POVj&3p^vWFg9tK&2Jta%}$a0yNCXCeh*iLyupL%uVS)e z?QS%m^W-(TLVBtW^3#)UF}dLEq|%2xg3}?X*Eu6|pJPcA>;9)R`p*b+Lx>n4A-AsyaK&^hiTWn9rUB zJtL(o`E^iks`p##>~$=&e&xcUm`%qOt-#7UwyCevHuu|8+#qkWf1-8@8@5WpLZ`)K zLsL^OkjLjs>`y7kh!_`T84l6Vs`G%(c+#JCjV{9bn|dEpoX5tszP(pg&RAvQBfR5t zMScv36hx>@IG8)8v0S&NIF7QPP;D7>>-df~vaw7=pPrOa!0msDQ2$wXQc+XYv%$LY zbX7p5-2N2nq_3w*^lYu2%ev#ixHOXbxoj-kBS5o7Q5QzxAz#h0kbh0-(7OA)qD0aP zHU_j+BalZp*1NUT8lO|&&X0_YoCd&fLD-66m@lE-NR0n#do~cEpL^S;ydd{c48f-n zb9uTgMs{s2{u>@Y<1_p8j%7g%dkM5b$x|twA-|=+R*<{duU;CEAX$#djq1ECK}cmd zqdqzl6r^1SJk&WX&Yy4g_P)s(%*t3+{Ax~N!oX&#!vvhFgQ1e*A0Thd3CBO70cb{o z&8T>7(Slk2HYIYNrPm>snX=APK#UgTT0vX$tsox99e8P7T9LiaS&7ow8vB-eulI1yAa)(yUs5z*!V!8E@XcX z0ip|bUdOUh(io>iC?fjJ1u(d!X0qA65^i`q!;fglzv3zzuGYt)TTz&huf*-5HJbQc z0mblFgaeraQEfG?<6=)mJxXN%Ap@ZWLa|NOc(ph6JuZY_ge-x3RqMv_;iAa znHe1q4iv9a4{AvtkPUy7A~pEuszof2C&aXSZLCy-m6Ev*5zq=qQcBwoS7*mCY|(e~ zx^HG5>Q1GZFwz8r5>-z_W*-VWc~|@=jWZ(o=>!QxA!-$M@x|fGJXJ06OiNKOx1gab zNRm&8Lq<8Ns|du!!hc(st9^K2%fQ_`=4X-&Kn809_^%g3q>)*`jzU+9<5l*gT1l6GDF(iZsHW5XQ2RXd z@3(D^gOzl^{Uw!mS#5E6uT%RQKlhz=dmG`JHm0$Exs+)9nyYRU-HydwD5=N7iqqc) zeQtW%rY`w2>I(tIseEuCbUL7{+a7BulmEO9?~raiUZC{FjHpF_WSak-?A4~#*vs+d zj{yHpCF?xPCugc&iz*FVSRk-pUVUYo&uJ62k#xg3o6cf6Lm@w!}ZEPEr$CD0GQLR ztIt`NnlYW7+rG)hLvz-Fj273ypUhzxgjd!_HS7j!>q*WlwpBARJih7`M({h~Zvz$C zEm_UAlLmXH&&#n6HhUBR^vDPsJq%IFd-`!3 zR;g&8F1J(jJ|2+f6%^>NwYfEIg#hH8dgG*g72tq5@_Xl@DLb?j{5)^R{#xPfYBl`Q zw8xiAITkri3Qb5t0~o6iYIWkkrOq5PAkbe9Oqfp358{kYO5AGZ!~)XS0a2Y`JwZH@ z22N&X8`6YFR;|Qv3U#4>bPNA$pj1cznJiihbD*`vHKuEn%m>(hCnBcLLu7Q^uyq-2 z+x~s&W~{$7S5?J++ZuxLa5^nYSRbcyWASNpdN^fKj`)q)(1JI!UB<>)WOyV&X$iOxw|7j^3-H>rh ze(y<_ryz`6M$NZmlO8F2i!7pE$fD}CL*8d>&2Ro)KRsS0@?=E8$H%9waX~?Q*W(rF zv@iexnd{SOWBer*UEo>6zo~!fH*6CK)8IFLwCl#GU61L^_Ri6bG2c-Y^si5Br& zGHN;FW!G`5IhJE;uwk9{=R4~is)Ze1GhXym-jsW8jb16CX(yWXzff~Qq{fnsL8PHQL81A;&X5^lUE$Dx_1(k`FZ!?JQ{O zm^&DOW|+zm6-f;=U$(ioNQAFIG zhL)AaOS_~3|(o81;%XKJM=#lh6-z8nry5H(2Q*9gnQp`*(9Q zkXY2!{{fpgh*74$zEjjbmb}XqSA87dH7wlmf7UK)4Q9Wbalb%oM_u+K8J%NnZgt;S zkJSC}Kl-D3i=Qqp2H65@FmTnJ+QHboe=g`dR-SMYzIX=Kiz8H#p3?Q1*pNrs!H8wE zv`g&Hy$Nv~d(B~X=Nh8RFRmu}3?5?QTcZe$g9wCsgT=akej|73_3{WU5d;Vr`%ilN~(HuG^a!d@s7_&{VZ; zCE*qyfQW1Ch3*i`7xZM`FCL5mJcdCv_!=4YhM@mF`FN}EWndvm4|rAaPaLpMgAU$V zpjw=I<03YfK>Zo^Ew3n!>%pUQb#7(6of|0yoQb=70uYoO8&l#`I3!~J>n#J2f&*}0 zxzI@qoS{S`w&U7BVA)EI@bFhI)zW;oCO18a1)p31NgGRI=*5XmPcJ8NV#o%BUqKNO z4Ua`V@MRNHoam9X=^P7`~BnnytDMI_Ppu69EX> zd#j!Dl+v$!9wrckbJe$-KCECc92_+82VS0m446^K711zn6j@D|)Ue+5cET!}#dJUZ zQM`%nA|3UG{}0>Jm-RLWdt(>UDnG57;f`J#MhAeiz;u;nw0|;2-EwZOuTKJO0OIm| zz8I8-uT+S&{#N0DSL2xiM-L&2Mn8u$?FQI4-5asCga!R*T=^EaJ(S`G?od#grx%1L zG{oKx0}dc|Yu8m1(!D<7*h}8yJj!i-@JUtVo#i;Xf>HAGD!F*(>r` zET6RWo3o~9CMKI=_DdW4L$>;GQ2*+E<7-Qo?WJMv#0r<8Q zO&-{``&uqQ@-vRT6{t?SHk08u5Fqrdn$gp^+7*VxW1%(kXH{&!? zNeAxB*llJ%?q-G-NvhYD-MI}I=w-M30}>_lLRZnW3$WA&{0kZ4|?~gv8AMYpFfBNp~ z$B-`XvnIa-ZW_2gJp@@7^fC;DJ6j?#8H*U4sZqtasCHlj^2wnA|CzX*J4@jL6y)1X^KQgo(+x=w`G+v(MwKPou%vh^AS1{1G!EFTr00SQAY^D|s;=riVz# zi$0B z6|c}$%&5vU1?wM}3OzGh0-4?(hu{1cTjR-4hLd9^yVKRgtvi>7(Tb_F(1(SJJ~!F? z+|dnHXb(}69yWsL4*V#TDU5W%$KJ`_GC9!@!mGV;|EdbA%Fe6k)bnM~tFR@%$uGCT zqRsrgyx*s*ttx!18Y@$hn&dC?h%C}V5?Z{>-dqlDU!)(0cI+oti+#rN`&xN|ZVlMZAAc^rx|rA}W0sC5&(7c5!Xio!EM z+lxGD)i=QxIihi1OZ*NCOMfv)}%$*@Ff7zFg0p=HQK*%QjPy9}iK<4#JgToF3{(L*t7erh0)_w`! zP02D9BCkc6;L}N3keumzOqJCgjdQ4N#Mr#@n9ptFw^&hBb@}?WxrxE;p6pl{*L^_g zpKDGC>y8!8+Kv~1Z1>y@sy5H?YaCh=_qMiE7q$je3bF(}KlONS{X&r;*@9{zTI)nW z?0IkBYYXhFqk@?K>2IWZIZLQdXh1#k2}w;k90dlBq$B`+G}d{D4$Vq##}*P=LUL5$ zdX$9g@mS6BlgE!;{Z3y2u{64t@DPQ4%W=yGN?g9N)sir&=3=d1I zz+;BD->P~%f1b_H>j})$#IQ4MjR2KE9)D45w-52V@kJG7{fT#0-8})dcJpc`__dja z*=b=??LZ@0v!)ZOvK>HpChQv1nPh{Omorq=vDPupBes3^shIA>o z-n1N7_Ldk~L{PR#REQyji$*DSV6ix*Nd6+T=?eHK(ZHadf4uC=dz!T!1|U)d>eYgp zzF%};Et6W@%s{QsYstPaaP6kidAHw;Rg&|=7p2`)n~v_kFHh5@Ds)O@jCq<+8x9P&SD z-&|kz;o7wMt@}LXs%_4xGpNKEMSB_`)r{Wi>Rb#Muk54!^A$zWii%4y=T%j*Bo(MC z+^m1r>B@xIqDwf0-y6>3Jin|F9E804_#}wSu-Y5Ak-%HIb@S?h1t`)>M$*Xlinxt% zRP~hB$Ccyn|C3q&8>W6}#ODo72PUYLR`Rx`4wAEUQvtR^TPqcvkw^UpY&7z_R>Ou? zls)8)e{+AShzFSr;QCKlg&F%zC9pOjz2)kzWZ0=k(aCevpooy}Wxj?jKhT+CMu)vK z=4QIZ-yYq@QTqJtaO_V^?}H(Vfg^Y(X@ciYQ%E@ZJ??_19Y0b%Tx80~NFh`8Mh8(D zAkMX77IL1sJsOr=DUlr?A@naGuU}Lh066}esY|Kx=jNW1x|$agM9j>l{geOP&Bh7z z0J&Ci%fbHVpjMNKfe<6=sJJ|}0#=-GdtS{Z+w77B&Ar-4l`r z1>4Ly(#uhnlJ%w7ORW{|ZrPt!X1!Yy*S!a*r1IDa1dFp}u? z^n0DfNZnO>1-S6|h?kpW`G=77Ak<=klfs>r6^RG$oJ{2#R^cvV1N3{3wskO@+;EzkhCmr)>uly zmhj%g+(K|#%a^*c?UPMs`Drp8K(=-9V!&vhafd4<)taVhM>`L|iD!T3#n(8*8Q0R{ zW%w7hmxv83&i;RDf`1~HT#()k<`AOGy$I%Ej+2DO};`pc~YBPAd zjx?&sN2?lAwHa<`jrXR~_N~yDNb@t`iJ|i3?~U!mTHrxB%r6DIk2#tbretOh?4xLh?J4a@>tY$gd_0HwkxH#+Atn# zKEx<{bVxs8fO~ei#`b-WpbZpmSDj36)h)0ko-3taxq7|k{&eo#vq?bJO)H?iWBDw} z9lGvOW_`NCGVoEI;qRT0VWJl#q5z&Bdh{y#i+k@hx>`g`)=Doc$Ueel`Dn444dVluu?1 z&S9;FmB6FH!^6{q>#Cd1>Ps2ypHc4JpvCjo-(}z99$*COo{8PJSTM3IXnWB7y~=#e zYEV|)MDc4g$Y3qS?cus7cFDBw6M-zlON;dVq$@2h{=Kk@8TB=sjxg^lt<_5B;61kM zW6|MF6Dd)*(kI<~T@-;alRwcR*Tr?KeSU9|IOPoMirq%f_?J8E%+{zjJt8=`yXTB= zVCQ1Cif`MO8HdGS05M65OcU_k7}s_g#CM^9P-bMU2bcO|J9uF!t@p0znz7HIVyYI8 zGm!kH%}FT|&hSS=(}hr8A&6KU+1}`Z&OkFDBqgCi3dfk<+t}Ni*Qu0Bh`#2>jGq77 zB+1toB^j&qS2cMg#=Q7rdV9I5a_9+47=q48co%G~?__nwzQaF9qsbRtTPEFGp2@AB zo3A~9mjfs!=96APJ@)##*E>R=7o-?{#hstgjjfsEtArRn5RVyG@t<_dDN00cm8H+u zb5OPAt9&=VsC|lvA!hEDZ9X9$-9GMQwi#IDv{4ZJY;k$s-+rveXdW8s1-Q>1x;W}d zn|@?^1&Yb6@K0j!5`qg+el>T++5X?{W^7b`Vg_n^UI?qn;7pigxD{wpz#}PV%#?}z zDc4||`Y-`ZE@(y@|Hk-73*e?`1_qL|sIwi>QcWLqlzw0wZr?%wn6W@4gKFCG@xNWr zh9k5#8qZ^6P@KJ~6hGXikf`W(LH4BH=4%JP@U*^KY+IMp=g&*`C1esv)>M5ahHmva zA{nPlUkT8_ht|8`+ThWqpnsA}9W~nzHaUkcFgX{l>D|3DAT@cIy|#E0W^7xhCKeD)H?MwC&S-%mD}+DzAa*uJ+x4Q z&0cyE>y#ShHO=*3ZD{TV*Mm@36+JvyJmFit@e0WXdvhv!%@+pO1T#vMzEPK=Z;m(Z zxzVib@4cRG`djLa{c&?E4$IJYxxLEDl-QPoOe^?kD99TfW`&=B^o45JYI#%~U3(yX z>srp>^X6vtK%H9Wy!pN7Oie{)8mkiMAfg%Zu9K8qGb7k4G=x9p13FNiMe*70bTj;o zIzK9GEEk@LaNnHEBhtJUAcPijCETXs?SCuoZM`8$w&o8tL;RyOM+xS$6xqAG`09X?$R ziM-Y80hrb3r%>5&<>>GF>W>@GN1)I;@(2#KW)ruU>C0b z7jzD0X&~;d52gj@?=M4Ni&L~jMY~sf&6!lwYsU<2=xQ*vF%neNT<2m?_JPc6N)0Vm zMfQgrX;lD5Pi{;t=Po9SyQUP z?wEAOfef-+jH5aA(aX{*A1{cU7HIwBNsdPzMWMj5&+$j^)0X3XXHj)ZT1u-Dj0>n{$zjh74{|TrM(%sdrR34gG`v*3qa$0Z27u z1REGs+^HfYebn$@>TotwgPPvyF^@2n2{#rr-?qp{W`kqZ|9EXOQ2;oJTH%8WU3a`q zJn@&cwoHp9+nzog;;xeTDV92uR7Pe2%qWm@=#(QACWZ`=qz4lE^$#xl_RfR^>vZpH zAp9;xe-{wgj)E3)s3U@w>m&pyK?Bgwl9G~hv=^TR3-#sJzYh(qN2qixlzpB43Ps2L zH?kNu*%?adu2LX@YD0JsffK1a;>3w+?fZU1~Ob@k&cpOOiTPK;qJ3(6z$bpVQXxqMHl__wwainn9m zuR(%np6~r^>na^q8cu{CW2?EBRwJJP^#+@QqHwD6)RRlPZG zO*4U5rz=!a5#95RZq;V{d`tVQEz7X>YU;KZo?q15K6qJg7u;c8pQDJ0rkyfiRJP)xmyK^2m!s)<~?i*6(WCr#u`;Xkeq! z(n0=#!nX$|3<+R3_I&(UeQ+`Jt0cepW$Ms1!*3-8B)J<81(P{16~;6S`@Y5k>x`;| zq($g>TMh7OJ-o6O1<&V7)$-GSO@$X$wwB3~H^5Pstqv4f=~9t={y9NXQ3!?POwLcH zD2^Skkm`>Dbe4cEn>HF6+O89@X}qJQqcD74Wx=y!UBcCAIrdNjMUrLU^V^O1ro%?v zDb$dGx50G#94W|Ned-;NqVGJ7GG^X!h(5Z1m_JL0>0M`1-CNy>Gc244G8PNEd1R=S zztl!0XggQpz*5Fk8TDEmUB`yD=^V%bX^DV(x7BL>!ot__71mP;w3VM8Q$+k;T1IK= zrHF;RZU6 z+-kWTT2PM+x@AY#R<5p+`Ji?J{U`Lq1Sf=Ibc8*L^811IWIZAs1g> zgHep`jtHc~3{L0XHXdBG*mg}Hs5p?QJ49Vb^*7?QCA}#bQln+pn<}6n{-oAckF4vI z2&v-LATArjDaJSSZziV`;&ZBRyc{_aTm%-Q*vGJZTTDX2=t~N%$hsze)`$Va(zqk? zJU20QN;loqt9AjrDSTl4>4M*Y1dgs(h4%YLR^Dkt5xIsd5#rS{qjKaKNPff~SPD=e zNi`se-8vHN=J3m^$Hy2a`hdo*z2qNa@`B~m?T6*X&d}&Z&){K23?!OxSLPJeB1lK< zmVCJBCXVm6po2fpRq654*d_wwIB;U^DvZU|E){tViGozCJ8CWD$|_SK3tkEo8|%K< z8My|fyRBj;>UMBh)6}?#0>!G$LbzD3&sQa(r7->?dRu=8o|U$ZQxXW-J`G-K%YC83 zTE=y=Gfa8eHj)t$FPhu+DG|}yEjArvG720!ndQNW3=P(q~$uVyLU3MQA}t1(9p|x*8h{`6PpyF+r7K) z;_@n8>mo8L(FY)Ms)aB;r&cl-9w1$;c#8_&Ou|`MFbP}Z+&69eW_x%igvh%SD95oa zq4XYQ`_Aat5ipyH1m?uX6(msww!X+Z&xP*bE#C8cQ?oqzN|b$!yZUJUuf*p z_5Mqi|5i5$3IhBZQ0f-Llk%jRT9t{$5-;{Zgx zE4=At7meKczERVLMN_Nu-S0cn4~-aDw92d)g?T<8o8k}IVxC0GEqLlec`3Y2`v@quO1?$-(W~gV82(K(m__&lD>@m-m)BS z;Iph^=yUjj*+0CaE3TW8N*fQ|ycqJBYf!_i7m}vjkIJX2EwYwJTwH1}smb(~UdJDh zx!I$8*NDvhpFNWIHgD%$K2}&Gn8hX?mGSQlZfYnJ0NqXlz_1+UR*4Uf8VnhmHs}{0 z-mBC7*t_k(XG&$eHkf#r-hG3+OonEw2R`)hGa)|S&>1N3bq9#Juk_O$&PC95Q@E^C z7iylUygl3sXfiHLaY5XWg$92U5;Ejq8rUFuQ=KA&vL=d2*6e2<|6DO_v!FQV)RT-< zZC_aycx57}O;&=)=Zd~r3y!hAhCIF9F?TT9a#4p4zv&A7Laow3e7v7w(odsmT3h7hXhYr2 zZ7S({on*F3c8hYaf^`Y@-&p4#+*uIrd!ZqBr0MtS)h4lW3p8*`yqD=Z=nfF(Dz=Ck zq*;neul5=&;|RUg(Jo|*X?t3>ds%jjEx<$k3-XW)QT$6-^X)eX@|RD>THs1bwGG?q z{B~5DTu8KNr&oh^Wxm|dKlO6KAFIoe`KgZx%x$l_-hD7U!3!rpwTUf2EM@FmJ3a0j zEGe6p*(X!iy*^`W5vH&xtsT$7n?zQQFUok^eQ<4+vFlsEM# z7A$Ecsbr&wI$VzsW5F5f8(;kcQf+VSYsjL8w5UaIC`zM6cZ6E{Jz}zv7tF(Z6CC>w zlIKZG4fH&|!%T?HsYXANkpJmG!{^^jF#S3CX-J~Wu<+aNoe(|FZHMe|5U;4 z#k3FX3U^u_rZV0+MU=z)9UV;rlN7DqW{TfPa7EuBbv>Dgno4wNXlDqK4{=aR5f3XX z29nMhF5lWU-34t8eM#{Xl0yTrg;X35qSV>IjL&VgeNq|CKQ~-Fgef8^n34~P=iw;| zXpirpUcC8~1P-)4Ctd@}NmBe`tQANXGI{OzYu6Zgj^L9smTuk#Lim@!^b0R0UbTpB zVT{2Ac{=gu+ZGR$wDpkM52n(+_!4;Ted0L9zNMAUcq{w2@3eZsOT$r3{+!5vmgH~j z*GWw(aHKYz_aleZCa3!I5;P(rB2-znI@Cj39C|9nRy?`XS>I)v9zsBuuMaOTf;WMz z^@pVk?Q~!x0(mP*@Y+mC4^(tbTvJ|(K`FF$nCmdX)X*R8A6-xTkluUGyZ+M1ApQE8 z%Z8Eb3PDtX-$D>ymdGU+KCCmEpM}<~C1nT1Ox~&BJNsiQimqU?IwT`Y_4ewfQxor` z?_?~k3XO(xi`pIfGP)%xP&F7%&T0K8Y&L;4}~M!F?G4(r!Nb7OTvLDaV3MRq$&(?doQZPW-%3J;sg-^Z!gZ z4b05!Z1(3y?0nM+fac<^)fMrtngVIHNr6vogZ#4IOvvH9ll|1wVcm{c(eaeu`V24^ z$P^GZ?wJ@iiY^aG2O%+UQ*x?%CBH_LJ0-q3^2#}00P%q~KcjH|6ay~X_{pnxUy#N86IK3+ zqX&oxykKH!dBUaXm(vHonaU)F6G7Rpk>*r_Hl;Bq@-E(~V`TG;rGTZeI#zGfls3j+ ztBj>d#MWZ}I9pBNet{Dox!r1gRWf*CEs;yJ0Nl2o#(0|^!Ng?aJQ!k*@B*J2h{^Yx zMNNeZWUW}{j4H+t4h zvs(M(M;KE^A@MSl-msZ{F_*7n=V_F42S+X$rI1?|o);iT~ttQd_;VC}* z3WTX*GI(4$wz_XM*w-3HJ>AYbZ$@%AYeuz)#5zqusvYzaWo z&iZ9SSTF9tP=U=^R!HBunL*hj;wQ=R@bcQd-0#r}0HifGj!S!_lIz4A-~X3EW)I2Up)2*sCY4_h14p zcovs1nu=0du>rx2pEpN($|{-frH)y69P9^DLPv%~==110&AFw>I}d>hie=nvq!uN{ z(n+NOpPd(ss>!Q~nK^dpaE=1MT(l0;J+)t9@jUN-a?1$RVa)h|k1~{&q`PD@!TM+Z z%EWzlMYcXR&yi-Uj*)8@R#Kow>uwB(l!n1ae~a|o(%S&l!V)bv;<75&DfHYQ4X|%i ztG@HE&AOg+&@TlzVCmHFEqGR@32#bDg0Xr#WwEL7sDQXcrDd|RJfb$@i-k9o{HXTYcH?1ioD;CmMzJ{T@Aj#fsT*Z8%w7Q*Mep>2hpf*Tv+4Ol z?a8bbCOdO|rVbkWvqd8%k46D|o<}$7794@7@65NPrkr(bB0~mtMye=$PE=xgDXuk4Lt>WrB7&9pf=> zd1+W8$UE#igql+VmV5D9%(MYo=#19O%mU_=w44hlQuw6ZYl$g3Oco)WHo0 zXZCk+gsz?EA1&DSp7xpbDuRpP1ghT2U?}-FW@u#a8}xI+W~xg77r^A6_i~dXPzJnM0X9owYwAy$Y0kE@=aZTlrU^LC=g|C{W_wRFSQfsgUJ zA3vqu=6ykSvj$piGoU4S4_E=nARPNk2A49h;R z&p;{l5T2Fq;z*+zym&nT)9)a$w#%?#|7S&wpl~Q;_#yhOyx-3uBfCPB-_ilDdqgV+ zrGcyGuRN2M9|M=~@5I%bx*d$bRQkd`{1<%uo?Jvy+Y(bwbrH!WhdVj=@`%vHMO>P+ z;(gju9ILZCRH>Tw-kYZ+4!YH6+E00fbVdEQ4h_XpgkD$jSBJAw1d0?Au2zj%BfPG* zlO~O1*CdgV{6ql9OR~(4NCLiN_LKjNAwAywHnq13g5~(nK$>@`#B;l5Joo~jYH-3 zm{nFAB`nY{!1bhQZ-xW)t%6;I+zYYbTy~Wr3ehBF#)-<2aWTVRs8uf1xvv}=aY|GY zteL0?v*=O@Pc1x)$uvANVz=H2HZr}F{4TwL7EJ11tbl!a$UMo}>xN+-8%7|(agYlo z#s<9NcykC<>y!kPS079^5)O3^YIV6JQD$+wFidI~T?V=uNkUTscoXOe8OTHM3nT16 z>lAA)!MMf(TuS#hPV3x(CcB2~@x9~*8oVrWO&&r%3Np4KtlY3bprznKRc~q-RHv1z z`PPvX&he^rXtzGERM{xEA4cW9vH0e?L&dzCQ+P<_{d|node#kBozc?5z3TJoX!!ieUf>dlS^qD&z6b%P1uG)EBvK$N zPdR41MQ1HxVU%26cCFaaR*ja|B<9AU+xcd4RoJ0xp7%{#xA>5$dO>Y7GPyEPk$C*w z&PZ{DVJTBmR%s9RV7&cgx<{w)c5LTDIjcKOjuP24mx|fwb{>j3=Lj?|8-+1Z;DI)b&*v=K#!KUZ9C~KJK z8Z8YcJP1=GXBePy8^M&s3Ue36w}aRDgv~%(@ZM-UARc@R+22EjDa1j(Y2>{1Fy;!> zTQ+1x?aofoCnBn`R8EO~Q7AbA4qAQjF7uh$^8p2uVQOHa%0+c&oW4~ZF;-y{voun9 za+|=FyZKOg`lmI(%PS=WTwS%_nT+w5LzY-YsT7r|CeLQoi+$Y_>JgU5tP!WOd-4SW z8T*scBGxXdW!YATXM5J}cSN4Z&yTmN1KKC;jg3nI`*=>cK0#aU2_i0VJZP1;UDbWKjI$QN+j91TL6z2uUj|_rP_rgf@4enqqh6M zTv9=-Kn-{)!lSLLDh!)C(T zA+g@h;Q9&y>qjWQkNNzeK}eCpR#v3hUVsc@`GdOx3-2e9#Q-AfEvrOLbAuu`8)63f z#-R}(H@lG7@+p}y0KxQxEIH#bi-@B5NwZlft7)o|?hQzyjm159el&wLMa(lT&8tP- zQh`6UnAxwmH3F75hA z9a5QOzdjd8C*0o~-&4yAdZ{kiQB8!_6Jo|GAX)_qjQCX5$NAZ@%ff)Uf6UD)Sd@Q! zY>|dPXvaj9Dl%|$$FtSVyXR&%_XUe7<0z1KAmVM-NLm3LnAj|)v0QT>Hu&rsa~|=x zRgDcix+D$JZIZir?lII0Q5akQlG5b@og?#F=G=^p24(mR>Irv%*jEJ?DC{&TBrwrl zgaE0ASH{5F8XY4W?px8=Oztmp*jCH4R*FEdG=ax|;?p26E^*X$S6N>bmyr=2Lg-#7 zPe3DqKmamuDnnT?^02Gb=fCJ}Ri~=Wi72sWwK64{6kVLKB+vaA`1@`Y;0DQo=mf~# zT?=)X8U4H;&@(Kbz3Eh?9~60*afKMT1CgyFWrukWADXKqCy|Czu#d^2%11Y5jHb#B z|IsG=3#<6$!=SS%<0mYtX&4Wca3{SP2ejXSEUzM;f!7$vt@B5ag#>clR4&YdX*U~L zqy_`!ReB3RDXwGFL_E0g&w6t|84u>y7=VU!L&*;H4zdEiUVJYl!Na zTjTTp2)z+2w`IfltE)t3tZP3oBG=ZZ!~3}s=p7QwYYa9QEb_|*UtujVV{xjw`q0E| z2~_W405(_FvF%G&sQ`_HH1Iu!&s^$p^E=6|6sicuWo~|BHq3W(;BTuu`L3`uhjMPz zuH+@YSq5W??k49n#mgV1)gFm1>{!pFS&`ZUXP7G3SMR~@tb>oCr>J3eVJV%b%imlL zS4b~DyEQ$y=Npu~Ro~ctr9+mMbG%+R2FubG?aApKs+N@N9D3LpU8(Zy9AB0_(L6$x zGY;nod~;Nuv@5-KCwQ26xDm}Cz1|6B^-Rsdao5P$r^i=?jp-2`IU`YaR{+qF%V?(7 zUV$Fq*?pg7ti5tEYiy55Yiss{zLYi4T>WOC0VTvf2NgaqWxxI<(t7G^A6)NG8j2*Q*yBrpLfDV{jq=X^wvo2c1KY8 zWlqzkYr4h_u2t#B^m-Hkc8rv;Gz<@W70_U+4%0@PA6sCHVCdP*2hBs5OMkGLnaWEt zaD_oq<`K|iW&VVWg;W{;&ASIeFo&~;opvilc6^6F9aUpkxrr6KBC`3_zYt@NxAZMU zOz+wce&G>Ni;T!~2>D*C*adt6wOhE6jsy9Gf2}bv3SCiOHe}Oj`D6Jwyz#+kFqNd6 z@=?)#^Rb*bpu6C2AMbd=4%-2S4Am8;>JCias!&yk95G$CdGg9|-@UyahB8*?h8ILo z^lv=YNbDExi=XbZ0^#{^ach6|$i4$QWz>Nfo!q7b%7+7>eEhXZ4m1DLtg*119j||> zyEXINIuO(rRlg%_C)&wq8XR8>1~L1S>+h%gn>=#5$*jlEo@gV3nJmhims!n+sL`itmgk`y&dl0xwvZLhwl0%W19DXopCk z1sYBg{4{I;ZxecNerCuMyhk^|-IgC`WU{38qW?q3eIj>81x$t9PeaR0`;#Og@|@|u zl=cF-mfU2R6k{9uTPS!_rGh+2YyH=O2~xTKWNi!e%w6>~gX=M(gE?`_7dad&h@l|* z5hXhNIMD=2yLJuJ8;3ua86W{S5~`vvHehm>4Ic=$K^oGue&&4lmSNx5F#F8z7_veC z+N6Pf7DF7m(i)S!`Fqh3x~xo3Pmctw;|H@=lG0AIl4qjLRI&br zWQoO1RK!QN!{1^CkLEV75|>d-PhbxfH{yL+ORR(@l7NvhH3DxJ%)s_mhLx~Q>h6jP zB4%ggm1(H2T_p9y9zS4#%V##q%1QD#L2bZd6ELjIxQgbz5K zIsksy40CqV-Q1eo5elPkR*lQ1!aKCdVb4F%ZV&^k>4KVtQ1GL2yX}smuR?+`Ow#8& z2@C7%xg`m?;J3d&tWy~#S2hX@bCX%#RYDO$?cz}8ieMh{=bPWMly}quk`5G29SN8Dsp4ks zxlJ`W8fv^4Dl#LY8tOL3iRj$sM}d#9e{(ok6tRl+NTlwcL{?cz?yhES$ifILK2&bE z7^SjVrPY#E>-U8R2jc7b)ot+JrT&|t`VHvwVPeH>o`tZGSNiBWH8d4N@=m|p;);#_ z0z?yHQuNY5>&U;6@non#!=&Fn>J$;R>J=o++OL%ouK~X2t7!#aR(g7A4Sm)xER5+n zVAb(u>e{9=iyyIKEt;@Il-RisFtxsejX9S0sckjC)5HfW;Tsci3Nrr5_MR7AOH4%l z+uqy1mxw8j-{hp+vh#qI>(h^cogn_#ry6t(X=&*`pcU#@R9z>ds1`hkTsYCEQlmtx z@|FBjkMj!EubCyqKl2GI4_QrQSi#6UHI!ecqzFP~YrLsnOg6l+rRvsjwcIJQvj$*! z>Mzi+0U`o7{qW`T`Oo2G<^RU4JG#C6L%X|U!Y`X46jFGphjp5b;esM$0)n8($WPCS z-*+9gm{er1AMM;N!J}5C@L(&gwbP?|xXlQVp%LK(mN_w8?XMlFYu$h4-_!lw8!Ti3 z-A`Z{B(BF{x98IZ)PocAktKYWqaPG}ojQHNbO9tZ^Z(Dr&!EnzoFq;VAK7LJy{TPh z;gU`T5jju$f!@GrNYbN02eRx{27X26=0D0a4GChv12lf@`L%UemuVe914(hPY}U)6 zeGym&-GMOsKxq#)tEn4`UTSfhlj^f#PZ0eBDSJTRfcXZibPGfCSM~cIHRJAw3dCZl zkg@&3i+^(De#vMOFb5G4Exf)Lh{~yt+n=$c(r)*Fa`7+~trxIR2)KdwmwW7%cLF`& zSX`@&{ST#ZQMakWMvr>~z29Z3--HWw;o!mC9bFHHk6N>D(bH9`;8~na2wl3^Ee3TB z(yexPmvC{&{GnsnP_aK4K|frd7MKGQeo00BBM@I%&x?X?ZZXHC=hB%=`}10K#$wTe z^ID`w4@!S-8dk!gc@Q}YT**#=aWCmeEubL46WTKPy@&+hpksFqfxgkd8Hhs~R1K{< za|{XJy0F-3kuN3a1Vu!J8O_6uZs~s)^*`9W%*0w z4PV`)jRuIP)%8(!uj_YE`6-JW!aWeF@4D>;sY^TVuv)4jH9Cv)w zzG-`V{WHY&b|GAa!hARClM>QApt38rAA2x2lzhX52&=mHmy-a*K`3X=x1up;dZ|CW z*vdaXB0cf?qu}6#*(nB>K<$oYhIHqEQ+F1_j^Wf$_%%g3+)(~jf)L&ieP|ejSR??m zc@KJU)}qPz(f`BDrUf(US*Q&eb+1H{%_}n~yxZLE)6R;RXpzHzqz_BbKrGQ4Nm%pa z@j#j5xORG56cQ5B_4#_i()p1uhDI5iJlZcL9AHB^+o20*N{%OlLYh zHjFhB@Oz$rrK4k{9mtD`>YJU{f??OgY#A?Am!qL6T;rIdPrpJjyPC~#q_t`Rb7wUB z;0LL;H2Qrrx*p`$(AUO@b}o#<*ic1egmBd7%xNICEEPmK4~mQaBBYDqnpljJRP^KSY< zp5I*-;_|7~T$KwfJv4OHSpmt~i{7Fbd{RYiD*IysIf{+T$l>3tDU%;2@;>^qc;kf| zvNxfieN|z93qF>XC&{XZNrvT41Y>*Wa+AZx>1j+g^YwHvX2NGTG~;jBuTiMTum7X0 zu|@au5$WL-$xReC03Wu6D*L+c0b9bydddo~8wNd?Ni0IWUC z(n+fXy&gzBNsCXi8V7wHEto1xx4aU~$ZU6Ob-Q^R!yu{^RnKxU3g_^{t#LuJWL;Vk z<_D;``WDyZ=_fVVoUWc+c%T}!c zXTU8}pzR9pHT9^wFlFSfHRcuQZP<~><_Zll*5({O8QX|G19(SOd!*UdBDpJk z1XlgGzXQ?{QkK(}&dRI=?G34Hqa*!Ru1tCZ36kXUA<+mi1rMqD zP1K@_mi(?fGlw|Ll+cV#$fbZ~H&({i~RAZW6`WNRI8O<&ug;d$x8pPA+E|24^uF8^&P zfb}ndy+k6Xs2Wh52D`DP`lu>iMD_gwApAZ-h0lU?=%;8>%}eC8EYRnb!kYEet||M{p(dnTj5<^o&p0kmE{9kpQC=^#J!=V-nXyh_w&m6SUn|U zCIx={c4(5{it6O#m9G8n-fR2hQ9Zc{E&%7iM6=9ic78YTH|AYmhA{7o@vT18$C0fq z$Yt83zxhRfdp?9(5?PzVNM8*VuCz2zpoEoJcjddd{A|;_iOA!53tVAy^_vaP$pMoP z{D$SqY*C}-9A7QAdxx*v;CXqgR2E;9t%d~(O886bU+pYbP+A$R`Nc&fIn~f&JJcLE zl({fg1#3FfYQAHmQ)BZi3s}c;{u#(cTrdp?ke;Gc6azgGC`k6>n74+=GK*6p@|O>ya;AZ;~&rkwe^GqIZESKB^H?sS1|)c(-Kpw%p)x7*RKU4oT8awc&H!C^chxf1x7+@U0w zla?e=R;CJ?#VnIM;`2$d5I508(d-lK(VJwkR2 zvAMjsp$9NjE$)#YbH{61*815fQZ_2Ifc{Vd<6KL{O`qveKO1e#<`a3S_iUy{Z{!qW z0+gdd0T)t)F0Fky)bA{tsT71n$rpVv#D>PDW+N480r6@7dPj9)`nvc@nYIxk)KQWW z``SVgaIOdccn>uHN>wc<3_`cx9n?)37xCIT4FR#k&faQe2cV0buZ)e6P4j&@$t|d7 zF}*(8&mmzaWnK(5(e}nEv>`V#4OD=x$`dUA6BnCGgQqFaxC>4{j3M*OUiOFTv{?`- z+??5x&bxw|6D)WlsG{3??Gzm{+6LG_*0?XYEztZOmBLoN=rAt1n0~d{2jU15Uoqu4 z{s=|?P(go|tqh4D;zJuM8d{b4EDg|_OBK`2-92aTCt2KHo6a|sKRF_>Z*SDC==0R9 zb|60SuNX=`Vt1|T^J+kE0{$owAzhmIfGMW^NW#0V8ib29-jQe!)9|~XkeLSx~8U7#xbw>7S%r8v>oGe4ZyQx{&^N4-r0UBm{JOs zWQo7FK>>F^CzqBRJKiI zY?VanV|&`$w%%Kq;rH8Um|W6e)1? z@j8V~nnkLNY*})cs9T`~+4GuN?vOU7=#z$35-hP{TT-A2E0~7;-giwI7Z(e69O9qc zsAlp#lSaM9mQN~*&D_-`wR#<9@56v>g~I=jx^2y&$@Uw;MGlm@Snr2=1NNFzncgyFMnQ;6R0HOLXf{Gqqk1~TN8!JZx<$(t`jIcC!e4O zxAulnDN$u;xXGieKA?zBQbZL)*Gnq$TcVPwH6`uV4Px>|l+%s%^6D{bn}gA5i>=3} z&TDA3j{`18bJES3R2rv2@!-<+vy_u>4gLNDdeX7Ms3B;zy&Y%~)mhj1lsqTebr}|~ zyf6VtDF}|o3l9*aL8>4Ev0sd7e1`vZ59f_nWO4zZN3Jv+LNzq85YjQ^ZMd|}V5i5i z%K+|>c@lxI6k^;>Q_~m%;6b#lx*Kzpqm(L*R|mcvujBhzPPom{Pnm#ryS&s4`U@T3 zm*-Meo{<4c;ZFPa2ZoCq7XPCjTciZL2o;cr0_v)H-Yl3rkmSo|MgysOd1IPxn|FNR z)H8K`l{U(RY_F+Nt)t8==N`HAh%gzeC(yDN!izXnGK^GxPfOe4k=$P4Fs{z@fj)&; z!>}xZ>3|D#gpk9$L$GEN>`r|(@^XM2*-V|3nHkk#Z(Lb4MT_Tc0#{7`#FtAsO%o#& zxmmlLPs*YQY>eaG8AeT1d4d9pl3!lu1FX?ZZZr?lUn;vRcOL(g=x=YF~4v8cV%Kuc8L}$fHSP z%@!Q@#=q@sJf3z`gip)F*c1X@0K0$KUYYba&TaAI@GpoPJFqG~36W6DOH<@H%W zfrfG_^t~_2BuoN4-Dg;)aW^v=x&oDCXKP%SiI{{03}{o*2%5KYMCa-C=l{oS%)tph zM(rzC!gm6;&&Gk0!H3spY?lCc3kd0p{V1~ALGcSoV)1R|ut5W3|B~}i*bYOhxuGo9 zSzuLHvrNEvdwc&zVn*UJvgv*@f^AjX>y$qgYum@5tK;ANY;dU8y;ZOmTznB4kJbmO ze!Qa=CS|(c$T&k#fCBrbUpcx)P59O&CknaHp+xlcL-WQ22egaMR7P~H2+>tO0Y((dhv7G6#J zt*wOqgs91ft1=t8gnunYzFS}{c7&_G5kXw1^NBz@b;(8+B3>L48cfAXB#GRdTa74( z7BEr5b6`N=lhk(U0?Y6XGaX59Z*Nv*<>X=BU2uts9y_L{$`;RG?&@clz{CdwfYCkQ zkTfhE5+4xUrq7qR?PYwV?9&Pk!zSH46LZ=2--RyhYXe#{0p4byXS~EL!TTV?;L_TK zS^hEqOFKn)h69s0%c2x>X1E{S9P(S9M}MQGb^puPET+ zaO-O$@~*LoJH@{@EP7Qf^{t{Cin+8CkWgNs;xzBQTT+y*Z-HDTeMedaa544iXBj4m zr-es^hj;H~`XGLnzl{Ebsb0Z%gjhOw;D37C=69$uHYTDOP-Aj0U&NZ;(SbkPf1?)| zx_lq#MC`)%UIfusGAo0%An)(%o-kH#)T2zDTK1UrrH`M>%vdMrp1DVW`z=5wH!FM z2Is;@d(rDJ^=Q9`CUxSO-y6ZH#4A?dAu_slKnUY9_z@-+*&;<=-WW{f$OvJldgf!x z>7cb)3vOC`IO%DR)xVOBok=jUN4>P7>6Du)C`%T1n(&eRl0{`*!{A3YQa%dv(y2^> zJM!S? z#kafG{DjBniE6w0;J|iaI#E{bb=-6fh`7gl(mf{1HZD2ls(i3W;ZKC_@~c{$F?D${ z8x?(I0J8*)^T1^$U%tET?iZ8pv9hY4;+3joRD`rjvr1V)rJ2g1Zt=Uru-W+frPRWC zsRm{lv-*&Q8S)~MCFZoEG&vdJo=#=*qc!v?C{!G3(TqgPS%Io(vMrce<37k4FWCc! z&R@AyRW&1iVd&^CJ#p(5Q}qLhD`J)(NN>k&y!PZq*WCCdmGvjArjZu1?rT<#o1Bou zGHbR_PI^78>F>#0U2$Tk7!fsJ#}iXn?{2L4b+qaIx}ES)00LV~0+6TvjCxbV^AT)r zf8Ugr)_yPDA-Z)aHkJB!p1_Cj0bldt1vNq>Eh~D(eLX997_Q`O?AI93l4(NF#dO7Z z=-b3xrbBM!Qlhv!R}@99Owj!i8HW-07}^=ajZgsz%JA{e6QicWqH+xWeLVUnv%~r^ z_C6CC0*TZX`WD}ntBM_$2ZsDaBNkS8wPGk2{WISma zvy|q3O_^721DO3@&2G&zwzm-_d`EFYSxQE=`{-+8_AXxCTX69aq^#3@CC}S!%o*0z z&~vkQ!rLT&!`^>rN)ARpEYW!xG(hiLz-_}W+ib!fim8oeX8@<6a75uKZ5QqK(PFvL zj*Wfm_$0)%!w|=+DHOs&LL!8&K?QaLpE}Wi21}?yl&XwoCQvts*wQSiQigI^0y!B% z_vo;cbtA21{t)uZpjugz1)>a&uivfLTNj&--QtFYi_Tsac=xk+rEcyturB4DFHVqZ z_jVM)?iitfT=WkTy>Bl#15_@I)Lnu+pFX&z2uLZS=S2bC5#(f zRyu9#UzUjF9O0xS222BOy$MxM{A}J&cW5UWDJ_gtgtC*f$#|)U#qvNecGA9UNiugy z|Ek#=g9% zHhx(e;f`D8heshO(+-n4XgoAOI*Yn%H-0HV_efx0>S%wn7H%S_&9S5@=V6_BI#5Z= zJ?zzx4{@@z*l6H>4+ExBHqYAKTrCP+Gw3iE1XKB@#0qKc&?m+!#kR~@L?DIbhoWY% znEBVkSJjLqzNfQkmqV_p!ztlL*>u~FwzT6PzmR{v!xxZQ&^}1c>P#iA%?t`+t2$Gp zSw@Y$srjI0PCxcR5{v!*-S9O@CnX)>318W5bs?92NMs&25$fdgS@3M*_6`^{vnluS>9-7|P`!?LML~2FD)s?Zgq?_Xj=b^S?C*N%q1EZKB39u z3YVL#I|*`%RjFXZbW$dtJy%3SPQ#j1_5+VEe5BH9g|+LC5Dnxr@<4pg~3<`s?fPZq34cC+0I>GIn{ z?)W>g{oGLtsK8sb?3Dq9b$4|Xl++Oq?vCp!UG>O}en&jIpU@gaGaH#s3^ z5Lo!rTpi?Je2y;W`LT@d^(T&#eYW`mwCA*MmnGDm1MpP0rc!z{Gy@%eH64t~k{vHI zW&<=ZMMsHju2*|L3Rv}|INmS*aJ9*O%W25r#Nw6$u-XC1F5*B^S^np%{0a^+YHGWE zhrDF-ITco^5|9qRtfW*4riyCG&|z^XV2zB!&D&9~ZkHUWqfeKee?BEfw44iEA$^oK zPw6g6j`5|dzHNOVw{+d1Ik0{jFlpGBQiak`;mo_g)*%uCkgS{Ru8-so%@xbr>hDvq zRkXjFo}b6HiyGMuURY?Mtj>wsWos14;3_lH-uz0L0n0va(=Zi|Y0HL#`aJnyUHa>MhPLi|4_LQ6ax`rtqjHDoiM5Q?Fbl zH3{;O!rIKqDPFJ@CF?D=5ZgfnC?ZeoG7bq!pSc3IN1&7HQ~VDy_hS0n>^eGrRBoA46CDJD2>f{}#^ zIl5gg9)YbqZ=nn9eboOMDZ>@a_N6};s%vBbO*v^6;gM*$vJ-KjBZ6%|C|p4mQPQ}K zUSK_?FquD(?b=t9uN70v?rWj+T6{yq&kRU4GfR4c%vs5iI4$xV82U&8Zp_tH%KFoz zlU*3304yGa)k3EV6vpAYLWg;XosVx+9!-3o_I43nP!L!_un|rt%os*>ggK*cm2)s* zTI$Jw+JD-4%II@m=nrdc$WSr$r-$E3-fG}uJHEQy?JZo!bsWob8sk3hA91mlx=Y6V z{@j8PEp?43SsP6_)Qgz_7gKy5&$rh-Y~RvoBz5Tq;|++xRzMrhdJmV+>u8gM=4M~9 z3koW!zNQiuFUQ7-X{Vq_Zm19mKVH&j$CMuDNnXofY3GqdV!zK3y`&fmddVPRW(&&7 zA6KgQCAc`11|C|DB8@w>U0x9QdF=<3Y#YWEb6mks1*v`3d=1a%I|{wLFYqOAic-cQ zpHr=E{Fd)~5jkZU$}50N0jPd4(ct`jE6cXm(So-^{m-t=bkLA{dMbAk`XYrU`6mhq ztQ4T5Lum*B-afb}2b)!Y>pTU^Ijc0Qcytc#N|fyrexKs{rA2 zXyAXhgfGk+Y4hoK1z*kpUrV4%!?S%yR+(=xT~RV@<&4hjol6D$ zgr()V?HU!le0SdfB;h3AT>%fo&v%Meq$W+@kbRdeeB!>b z#nJ5jK9B-Y#XJ264S4DAk()LVJh|tgxLH2D#ckHQAd3(B0kn--US1aPUjl=e9ed5g3bDcqFL8nq^sNMmwKba*f*ugWxrz2#A=vZ}j7#}Ks#$rk z$=FEF*X#XkOMao1ls)ziU|ZHrG+INlq*4k&qxdA}HXPg6@HA|Rt{f-kSU!0OvND|t zXF$i?qN5)$v-T`y;0t=^_Bc{`@g3ex{4^GO<8b%cnYy)@G*1*9cWde~#P>7l^v@vz zH`fpfR+{S({AA|WxgA-g{%I8NK|9E%qUrh~SJ$+yBs_x71t(}?kSx0wb+W`TW4)VDRiWQ8c zZ(a3F>-c;+Z6@Fu;xviJK;bljl>!-EUs9;kRxl+=lU zl6wFLj@~cj{r|_+S4OqjHr=)qcXurgrMO#*ySuvussba6@9(`CsKOG zL{;cMFj@`lo#K(Q+~$ecK-q!o_dgg!J+d8MB8eiDw<|#$Y`PSM!VNzyz%*sr4xA-1 zm_o`&i|_P#oWRF!%dtUq$1aa!)<-?laBKUG`CD`!M(t=m6==IJeVwHzs&TR)%qR@Az}yPr#M;DRNcb?Q>#jPrTAertp=M;mdWC-t(eA)Ygk!EzbJo?_)UM z0QTM1j~;ldZ#xKVd>mgFLzvgeo?egJ5PV7d-S1zW@!Ed8LBx-RZ7CL$)i{gZX{4#Q z6s$Lh=kxd=7%l%2mci!|X7BIfUmyRJKJa{3Yp}!TSZyI+LjzI)CM@{8{rpCvKgaOh z%h~AK@z@$!XG_oN+Q(Bg(cu&aPN#OC#>F(R-dg+HnR3k}9)W^RIRXE|w*tZ71kU** z;TjlwMu1S$nFlr-gdu5&d1hnxNUPgv%onAZIyA@WGDxMlS{<|Wu#jSEn>cG~x8a$y z+0>~DWH&!2?w8+w3K`OuSP$!BX3O_x$G$46BJAB#9{0TiWZ{X17aP<*U@u&FzP<}^ zJyWVs5v`$B8(6)le%v@1ZzX+SWZWr&@w43tFgo-lspVy1$v$@rSqz zrvjim9ek$+-E4IVZjAff@6I1y(Q=(h#JdZrXg+XV5WHr%Rw57mj5UN1C6fEDfP>tWtcAybcopQXlQ#hG3MTp4ODawnG@HQxFB>WismIQPcg0Wb4( zZ!T&%yZuFkkc>O>qDtI@kkudMs2~cuGVtFUPOfnFd(Mqm(ldsii*5zc%8NC zmoR&I)4Q#VArN5isGN84j#e>rD@($wjRHhkpOyfppmW!aM+Wc&&w{OIA06t=lj=&mfip-PTL+@-&J=x`!- zYzQgGc!BGvW{In%r6Hj|*`tu}*+fzALX+pY>V68}Vc3P+*=zcb166UC7&;qyT8D2v z{p?6ORoMU81t8RFMK6sr*()>B(fD+bGVL>+vT8y9W}@zdXTN#t4EkzbwAbBfq0_+h za$)-)o^AmYx4zorO5`D>70pa-6dnv!g$-Sg|9}3rpm&xNUa6T&vv1o&kL>zwqVcuh zd5QPKftS{eSBWK{7=}R)qku8pQ@CZ`g0I-f4tkI8JKT6tVU>rcvkqf@w^DsixVn>X z;GOZsa=ec%Pk1UFpCcB&Zt(iC`L98^d6^f!d|7zwv~rHM(CA|B?Ji|gea#uMyq?r- zxEoPg{`GY3Wg_8)r=mDWCD@q%Pt>1H>9M20iq@N$8&^g=y^n2V-DoGd!_wr!tnaM_ zW&yhxDhe;x>31*9+O3?bUZbTV2H0bB#gz$j9WQ4tqbv+V9A-5O7ZU(*4&D70OIASA z@i$AJ62-Nm8pEof7{!8z#A>PbphqF6d7S`vBd$UmkSC9_Ekl}jm!J~1jFo+vUny^g zPuBA$(JFR=*m8p2FF!ha9kWg7NnNt8(0MxWI(s;k{i$Skg?{}@XR@yQASPhzM3thX zGNJhFcW0^W&C9okI;)JN!-$KB_0kM|Jy+#?PwUXN!9)FcuXYyKbxJ3akbmy|f1nRWSAfSZn`mzxdzmHipHfH~}uZ+4b(3 z%y=hI9Zj-UmLy;PX;R2S;5NBSg1na5`h|GW?4oFH(9MLSfodh1ULV=pg+4% z_IpkEw)Ubz>;1#8#e94X4Na4)a*Axky@L&T?^dv7x1JE5Y?fq0)BY?F!{HUath@y_ zk<}O-3?^FdS$k5gC1mSs!=1dgT@kS-ttroM^Ig(<{=;kqH`mL#O`QCR2Syi=UFR@# z&oSmvSsG1iR$KLGZ5^zT(`;Gw#Yd|YQG4(Um~F=TP>|N_vqHMu2!*&QZk(v_Udbfk z@r%=mQ+95FdXS9sA%viX3>o3`|KI|_bTgS0L$Jlv_00&m&g#u4PFx-Rd7mrZ;R|{M zlWS7Rn;vHg+Cy23opL}fpxH)kZSeOKd%7|fWV&`OhMcRdSwWv~M{dhkf3RnwIqcPr7JW7Lwn@@B1daA1*R zAy023b#6TPE@}aR3tzF7gauIz=ZTaU@FM~;9VQ6i)x^*sL#nogVhm&ZUfI|~&zdUa z#ev8GZVgzQ(%>NzG!3}J@2K7fYX(yfH%Gx7n=32ntp+}gt}$0;IFxsDbpt0!fpuOJ zwjtLqvVJye9%;vNvb#4&l`!GnN$Dxy^I|eW)7Dlk1rc_k6RPk4yXS$tKISUWfUQjZ zwR}`sWhm(OhHcRYm*GJlJ8uKRqUUz?2(#;g4OHCWlMOFg;$!TWCuX)CDZ??h!rN`< zcxL7)La25h*7|mvVm)gwJ6J=?t0=E4+_UrReJ65Rld$7+IcLK#go!RzG61i`qitU{ z+UUhX&wn+|qI&ysXTD99p-QbkEKD7_Y{dAn7%xH;+`hM$dN<3FE@Rhm8p9PJw+}eH z+N5}SH$rPxg&>UCA{hWQDzxP&L8gzl5i8-Dfpy=svO#Qk%UDsb}N)e`>@RE!_S+#h9|6_JuH1`-CDZXyX;;Vku%Nl@;YOiT$+skr>iCi{wCfsox zkhY$&*;}_S<7+9xKBz%p*X2@+rpKh!eR=p%CxFUtKUslU%Y7hsNwp|bYdH4KN|lgcqZRHd*`xU z3&Yz4xW0=co>->YM2%5%y%F~zuc>LZ!V?mNt=vDugtB;yao34ADaAVuQ0-~!+e}gju&HM z1R?ie0Qi|Iwf@}8KIGr#kEZcjm-3~aP*O0nz?5~SzFyrXM65ow^m|%Ro$q~%#Agx> zSq50}XPu~oAj}ocYGQ(}=dwt3QRVI)@kUE0L3zyj#`1p%b?Y#H9CS?w4xRwGt$lvN z?>hO`g?DA?i2^?4hSJ!yTw+jZtlA)IzNbCpylJ}|q(H&{ZK12ZDHrTt*ZqsN^L;;o z$xy+Kn@lUAb{K(W&G*8jZP%LbQW5*e>9M&yNjKRJKD~i(F<>qE_FaG zbtsh=>yk-qi#{sPhAJwA0(Y(m_2cn@jjoS+Gksh*ws*pT;j^;PCldO9WyVLu35;-- zMk3J<$M(Y!+(#&=QOwL;6s_liZOa0So-&-@Xqr;%*Z%NakR}x!6tEZjL*>mY zUUB!-j^wfN zb`@o=`sA+YO%eYry(<^DabsL+hAkJ}cVjdY4xFQ@10!AzfJXCXkl>ae#KsL3@|q^J z!LmxH??0{41;N-W$`djjw(i^{xAQ*4NWe}ugOkLoYQ_w|Mo=npdb83)L!QgSgL}^& zBcWwtL^K{4|5S35`E1U{4JvN}Z=|X-((Ad=K))sJkvXm#;4Z-awhm3{ZaXrD)MO&{ z$MtU}x6LSEYf@34j9zUKI0W=YARXCOsp!eF3vPL_&Q|>aLL_8zbMJ*W2O(b>AnH|b z1lp+?R~Vc%s=rq2gOnp4M`2wlKOvV{w14t~)^jImc5XI9hDq1tgrS2u$(Z3nHbC$9 z@T(Yp;%o=PeCWds3y;(RkTg*jrK_U30vKMPpyM}AOWKs;v;0oEuw)rHEmKHjI9rGq zsxE(CaP2qM0s^fX52=WczNWxTHCxwWLR;uAtMe@iYd(n+mXHrCyREQaIo+FcEe*jZ z&_7A8<|ht_AXtq;&|Z)=*hCTMH?{bmmaRLHQ@1zQ^+h(|jM~_rkI|)%AGvJ{HMjDfTE-T+ve&vDc{BnB-z&n3$WV z-FiwWEG518#^~WiTI`jKPT#{*F%_bY;3wAfsTz<&o^OidWp5k$SpJu2D|BYSiu^Qt zCUP`(EBYpx1k#mLSwB@=<|xT00`;uRL^X>GEtdZp?g6u?M90Zng^V3eDjWxOK8=2j z=*nt^Q)9Ye+aP`iWku)qY~0vC+z|S*w{pF^`i5R*ARiLb@kwkRl;Jd$qxnJNQH0<^ ze!M{|ygKdjVDv}}=y_|&oN+fds3cXxeB-pvMcZ3>wXwjz#ECX3J6i2@k~(amEhkWE zSZjFJp<>cDCIUT~m3eb0N|!}a&`J9RFz_K&fAlbdD(Eb_`r_>-@b(tNX)Y^bHh8f~ zH(6F89PPRwU`3B=3nM&LkD_qjX*j3J4DLgv9s-%>kxMx>tf8Ud0p8A{*u#nL-75ik z+uH_k<7)B^vdF%`Uzl5_-b@-@;bWOC%@6xN;&xE~x{3a7Cu!TESuN(*y1D{)3vQQ~Y%jk_wMzFKw_iIeIExqaf+!;@eB_`@C z-gEpPuR7~osHF4hjMKGu5c#`3t6^GJJmj)#33Q<;G;T6~!gsFsvBGhxh>XQ@tdzL9 z^xO0WnnMMln8cjmno}~G9YzUnePH55opH!(nT#SKvXkA4R$7c_Zu;xb<@K#wcV>%S zfqxBI2+5mZ$h{bi%Q2;&tK;WNNO%n58C8(Tu8K?B1lo?ngvMGxKWfCu$#TPw^|Me5wXD)?71**)y?ybjy>@jwuwusP<7B~5Qh9ph zcy}j~qF1HqG?@0ejS9jA1#K0R0>4y+8ub7mME+3flm=jTL+E{wSb`+Z4|KI#aitDv zzv@}}qDBZXaJSwIKV^b7>95dRoBddPz!)V_ruqnOmVb@cGY$q)uRa zrx0q_&IcLoCwYFlb1@8txD;W28YlZo&YF!laAhS1X3aK;-}It}wpL*k#pJ%+%e`)l$+nO53CJ zc?HVZ)YdcSEQWbjbZ_&G`52FRbXV7(j+?ksbe&vzX_dBoFrIGuIIbT7XnJTCD*9QrJjcc2SgVGNrC)f`c&E+a9rq1pqTq%bS{1 z1Gul$^T@(ztv4W77@f6Y>l1GuR4Ij>7jim5+(WPOeG=W#U^026S{VBmzFCCy*H!r= z^vi@RmKAbiz15n0-o8UWP;>Ln!(%lq8lP$y6he|ZKLzs&Z|V+)5%S%LDY$Y1uBP2m zUk8gqzP?TU$mBpv3iBCNCIEu&Ar5a5ktt>6r_p$?IQjWM;TYsvy zu;2^_y`q>LolJm6IX=-;;nevQ8RnWf0XM>B4m-Pf(MMEb@M`;un9pnF$+ok|H7PVD zenJ=FNR&dCSj{$CqM*Pwv}=f$C2ss^+8CEEP?&soM;_8E>T8 zOYMv?;`h+g5`5bFa$EawJEuQ7N!iB^lej#-b);|W8+h*p2)UU^bh1m}ZGEZ{G}S#q z6d zC5(@>pPu9%(o^>+k=wF)NF7g1+84zTde=#C{<%@$&16_mLdsl-JpW{1;~;UNhAARk zfZB=BozEofe#}1A%6$wY4sDYXhf9e)%XsThue4R)7TcU4HBEjJrV{bn!N3Jf>K5#) zaiuwE5PP#S=?%_Ap5Ds~m!N9!IPP+U$yG>8Wd=Vg`H{7^K8{mWDLg9450-V{wKQRn zCn_rC4hkhzOl)VqIO-KAts4;691-&)Dc+REuX}*^g1t)6c>iMqg)w#dzi9hbmU%dd|PDS8lB383(ZnJ!;+0 z6PyN$Q@3R2w5z7k&cIs#P~sK&l=#ohVE6Z)2SU(RV{ z>zR{{`%n>pvr`X$wj|tc7FoXf(=2z^((G|tR;JYK zCE9~$z#$diW4!CxWO#FS^H_F;Xrs1`56vS}311rSBJCDhW`05?Qj%YHZ4vBf=7#ldIS`$QvA6CMi5jF2^Jn6`>QPWR4ZLe}1ee)<0`W&`Y zj6j-Lc2JJy40AjyDb`7}4G&*rT?|b=q^I%0uk)W{u&J7IEKQ-I&Db|VtE0>xRiupC z|EwKl^WDj1{af#rG{a=oc1ZbR)~ z#UfLlPlm@V3C{)4#S?N&B@cKY|BQTol6Iu9 zSH5KWvM3Av{j=EJfaLsQ-Cp$hpFhs~BGD!Kluz-MIQJh*$`oed3QAY9n2yAoO)=!` zT~YV_4_JWbqdFu}>Is0ZsZ>s8<~5T4Ayh zaf)WdD_Dom906Y38XxqxyuC88V-sX$1KjC;y-8I7fd`p|uf|^_GXAW4>Ueo>tHk~> zEk0v86w|z=-&xDcYbST_+~bG66Jbh>{5wYYpHpjMi$h6EDh#f#KitV?MJ=_j4_6c| z#w}EZSaFEJt2e6t&OLy2z$sw~9O92(O3d~}&aI=Z6B4n{kd`Qc6_dKe6<8_3B@>K^ zcz%mG%Y{F)eZ4_+9?V2(u&g%mOwb%AbO>jkN=Z%>=pu-xqb>(E>OM)N{}IhDYZ{bs zvyzo_Q_c{~X%c2-s!}7|u9G-I*J7?JYVbpK2MWH5a{ha9;GZkL4v7Ny zkUk|flPD!RlQJjx!)0b6gg3%fAdV#08I1$5;Vu&a^U&c^G~Ugko^&4xu)LflYDoCz zdlJ0s_^sHyiE*tJTA##i7qtnW3dD(Rnc0yo#Fy!dmKmQ(D%fZ1Ms0LLOCYM5yGr^ z6$GgXz)HxYsIkl)_s1Vvn2A$~#x1>662Mv!{ja+c8cvD00d!zqP0c3e?^^eh6ZpIg zXDX!wB??14I2Wove}_yl!+AWRJ4YiM!lyUMox@#C;H#c)cf#KTW9TDlG~7@c z#fv!Y^co@b5UJBbY#F_)r`O`VQgCO_?!*@Fc&wiciVy{Xexif?2do zGWPjlL)#y*WTfa#q5b0T+?Ds=zaXRgUOi9LUXdeiuIxs~tYT<792xL>gpi;G8`)Ck zd3NT)TxC(@<2;qPtBvJrNZLEXF{wu&CoTX$r|50#UQotqV5z3b)qb3~Q2Xa}Te|bQ zWZ-P>SF@hB#D@|iH){g1-u>Z9LYFZ0`0;Ml5sgKM5-0rvCL<1at=ph74U$$Y&})42t5EMv zS`cFw84!+L4+go6AEi_#PGPlZSnL0mzP%{ENx(bjp3B*O{0?umBQ zG6BbM&X4>*-8O*-x{6>ediko6#S$*tZ9z1Z_ppm)-ojNK7jb_WgMU>yoQuopeLd`0 zUN%hBHC<7q;7@Ku10T%HoqYN^*6dyZ(s_&7%zAXbh z_fr`-ed`$YP^Ri1QpKWowPKZk&uhH?o@3%S_c&$xawSSce}Cj=Mz}j~Cs_ya31Rm# zJPqKB4C%l{Pe~Y|4mZyTe%P*e!!7^F($uf7nrcGZSn6Y1QZW|+(?dFfw5OIgd6kTT zl!xjNNfWcPWk!}MR53*;o2fM@?j>ZK75BN`8b`I&b^0y=Xp27IBprPkxVXR-neh!> zYwcDE(ZqsZP8@+8&+Citr$|M-#gnybGf>CdE~^NPA}%bH&a~&OOKVbbyfpGnhk-J&MDe#)>Apj zbQE{6fDT5T?NKg4^AaB`OPfS7j47lbm=<%+xWrG*J43<7G|ASh(+^a)19P6X~bAVhnIyy@99y0Va5t%7w)1s2=5=ED2rG~<)iX2QI zDXJF1`Yp3h#+Fub{wRX47*${9TPu=*Nhd#RTp7>Pc`J9F8Ciw$obf)@u2&{x97Dkn zw;I|L5png#*IiJ0=c!bT{E6lMh_o`Vz;hPewI{Z;gV9}r> z1$k83#n&F6{b!-ae<~u%fY;wA$}W%aj%U9eN(wtVqvJwfeNpoij=Z4HJ=g?$t(XxM zP{PT^H~o&9^ZAn;KbFQ2Q4EGSm+6Qn{=36#!jR8Oql3D_PGZjo&0FT5kP8zuFe>twE13cIO&+Jw#3!_=YKsS!<$%#GQXMSrSW7dx~Be$!~bxo2H)Vux2%Yzn@ z3fi8BI$Y)Dz12Th6NVMPV2&2Uda9rf5k8)J66*F0=)Ic}^h?3|a~Wit1)%PY;Ir0q zJZ?hn@@LRIvT|v_ke;CzcgR|&bx5-$+;AlT=dUq?p~d0&I+Tf0XZFp7>@~cyJtq&% zuBfO6{r$9C0l&MP7T~3=eYcSKhnHpmwOhh|ZU)`_L%WRYPnbcak*NM3+z|Cd8@&2hV^?O0vatrverQQ_!)!|%m2sq{_fx}MKXv7 zsqAXapCvw^=THLDir5Nogw#yPQu*m|U7{mJP*&r_E3|%xDG{N&|Cm+u@_6n}(&nlR z`YThbURdS&bolp7d>)N3n7xcrS)5{wMqv8tM<=5Y_4%wKt1~=p4YW>;vQ=ap6xX9X zVA$$mz^ZSEm~+h0TR7pB*ZY?fPmN|Sk7nsbrO)G*X%xgCPi+Hcj_e(A!&7l}c2?A0 zV8eY7e4rPz0r=|^m5Xgxe+glw2Lb>A$I`9wX48iYi#AOgVEd0a$YmE{Sjk=PP8@7h zPCFc61Mij(xjis{5VWt;=~KlN43#q&SneG;Qy!uC3a8W*0W?p7SXT>C*VU z@|-ZF5cO=8$=^d1=eprPA*_+=zH|=T%H_jJ!dX=t)RhBcG!3CwNLsV+*mGjv|BBUkJ zGVu1%+2Mc`X(G9kPhg<*unV<5p`8M&r13EBYi?0&uTAeD3vo4aWBuCQVZ3h zjA}!H<$}n(hE)ISI6)=n>Ymnr(CB}Pgzx%@`&V={6Mn0nJt$j*Csd<;ar!}uNz_=} z{@Stg9|hG6_blJTz%yp5g`4ef4U$jcNip9#G=!(WFkhvi6}3=2d22xx@;iWLM=0|4 z&S=Fp$}gqFMC?gar|3(KF8(WH!i?1?YZo$GGZ}RgO<`Ep51Oae&=W1?CTqU1S~AH; zV^)iasN1reC^*<%K{GyRiv*tFU;MN|t+Q9;{D`n3l4btfJZGu9;1kn(y(%12bWX}| z(H4oTV;R1&V5?@*3wbIm=N+D|AQgQ6eB$@sv8Pd%%=+bFYh14?4caWD7A~;H%m4mE z-h33TUv7q;_-uZS6}fYxP`?ZSfuq!(%HJwzO7Kv3ilCS8Ly^Cb7+kAqSbI}@7Z8IV z)2Ur`(9sojS&+UKmC-`lAUd8Ge=SzVESU9E1Z7?1-1EGo==A%z=#|y|QhP^87U?s= zw~dk4;G#!=IE0YQ@6%fdCYX5oMSr#=HH`{HluUmTryQ$@#m-I4NKwsAsPS-nP7&)_ z=GLg~=ql)zt2@}UyMyjH06?kR8dEffHW*%LWL|FZ_MBF>!qNG}9Ka~tQiMv2R1x{# zwS%3{U!TEz{>MdG{}?d&Ue=AjA9Mw;SJI7<1xSnfvI{*C9GR}qpH3Ztn<>jzXsAn% z8d}=J^`>FDeN4b4-~4YLcXskF{O>O_J$s6>{NMSyy-IId_rIcf+^#UUhNly+hlWt@ z85Fe2xhiL?{!sAn^&B3gu|-pK*fB?Au~c^}>QgeNM9d*R8Fw%n*vKEMS>U+J%F4!h znq~hSjdLZRuug=$cXdo)gr@9wc(<`#*>2Gc>EvC2S}~AXA*%(0y`F5P$7n%r zb673HcT{p|Tvk`bWyk5XOStO|uRja-CBkeW=r}VHLzQhUrR{^z^;TPFeh>`J?D$tugw5{l8JbqJ1X6+ppEjXrV`LzQFE+3A3$W5^s zVNRvMmT>guCD$k^nf+_?@%?Fqt`}AUq|P$sVw&}5J9?2&(tPomV*mtX+TmY|$ZQTK%vNHQRZtN>SdB@?xYn zv>rvl)CrhHQ66mL_^2F4=XNL8%s04ya_+Sf+3*`>Uc1PshTfHel-vuxaIytN@axc$ zQAO20V!#4Tu>{k8{|gMiEm}E;s;*^xX{@V&rINn6a;FLd^5L^=7B1NWO7eIh((0!)<-&U6#)i z0w&qXxW{5{j4a)+$z0~Ije+6bS|;v@UexvCII;}4G*)!iav|tKOhfDjI+5v%D76>GM}12Cj=!=!yeQ`a7Bp1W z6|0d1adoof!almCd3Vo3*T?=x3i+EJm2oEa6pIN&V#-fTkP|>P*+Av|dUbV^g7pxn z)coUVR3`+vG%BrPKJAi`TxaKOhZDosP*gei6rtm-8{vURhFyvt?QmzA*sXU6`ooF6yJlxI~ z@J133r+=ZOBr_QhN}~IUQPvgi>o{Cs_gJagA(-DF&#Zrw2VLUYPp3*-K0n?3helvH zC!g4VqMf8)2_j-Te+Pe6L@x@c$$)~JmTg;m_}%;p7ab_~=1FXUJO_==qZOC) zHCWW;(0U5}#g5RT6zE)RjXNey2Y$JT_t`dWJpQqcaA>`{bn;LQrI(*M1PacI2*yO7 z9G+F4LrSR(DizBP5m=aHQW!P3sxS4-Fv_BZt6QoIe4O)y=JUrXAy1hch&a#cJu~-T z5QF#J?QQr|a$JXQe#8yC+qFSEcFC(x{RmdqQo|&I*EM)>L}Q_c$Qu%x*#>Uum=UO5 zBPtCxW{?}Fz0)VGk2$2APs2bdHB z5ZYRI3;pSfkGE_WUt;_aevxJUH89y;rKWPN7(fZ?RF zoZd&)Gi3yIZkb_D2v@-m-|U2%4XCQ59eRd>Y>ZVbuU0gYI9`6HbMz@T*XdpuC_VlI zF3agw5@_pIH7sVPmS!(k(;A|$hkU}XWRQ%DhtxFI;n3|8vyPP<|eAE{WY$q%W|g0iT8O~ayK1nkBLZ16{(Q?8<3)#iIKDf5Kq_lID1Q*H z+0Y<#wo|qgD;p|RBv&kusTgf2YbMPM;8bg;)7@Bcg-KKA9ui7|zrLH|fEp%2>#H6L z66zUH&|4>g=!v(OAJ0UJYA7}NGPt%$l_x>0)#7o!FcpoIB8!@l3xX%OgXa^y=yk>K zj$5Ta;EqLJi+PE+Yz7drIyGH;JkvEqXXJk=b)ACq@XGnP0%Omlx2ej$y z2I2YsKku@F$pMzkZ&bgJf(e*ElLgKMs`f;nt;LU@c#OZ6A{94VO?b@OZ?2fH_xvR1 zbavdk%f7l>NAPP=NpU}JZM41_K821664%M`w=fn4KXh*ga89vspU#(}Iux>P(Zt&DrlE1o}P0 zA_QgeSGNvaP4%GKW_Ru6RF|y<#j}!t7|BP6?MY#1@!p`nR(It{K~dKGTzp1 zxAH@`^}BY;=p?WTURh5$xt&;)3W;5yw}C#?5Qst@+jr@~&;#J;-J;8wEfz8gZ2DTr z=vInFIKiG+ISbS`%&K58aql&tz+rHKvBD?=qP#Y`kd#V{R54s4+<5xP_a=xImQ4AU z<=AqBmF4iRL5L7FdBbCcLEK!7HtEW3Ch^>LWJU~{bkM2pHzn>M2VG6k*&e7CX#4OP zyCU_qTQWVzS>%Tg1!k5Xh%P)QhTaD~j5w`<2nS>-$Hy$N_`ES+%Ty&m1iw<hm_eJM&D5CuiObMe|)1PZz}95zedwMM0a{bR^f zS=xPy*opWq1EmA<+8yss!`qGbBH*BjYxY6D>9}{ZG9ioLF5|!0j}p$tT`%d+VXyfY zYyS%4j04Frwt-56Ii%|}x2J8#O?UU3;Wd;Zz?G(plY76_W^A_dcoT;bF8`MlTpSKNY(q(S9ek5(cRD1M@sM$Q6_r z>YSCJc|pij^l+XPUs6;g?J!oCm#Jd`aXNpw+q`v(C@Y$AoTuBVnh%Jr%|faCuC-^l zv#{8>BX!QWHdMlTn9dxNF(+&YE#~FjKdyd+b7ZY-k_|Nm#Z5hN)n*|*^mKQM6S`;T zgtFbs5P8N1AUIHQoOZH&h!g^SrN-cR!3-hq(o>bb#orYb;4}}{vR+Dgt)TVV^*^4~ zT7mZNenSneey|3WVd?t3_e=;#$#o*;oU+$y`xyl0H^HCK{k3YZ>bUE$9c3&n@u-~Nb%rXXo2>6!qR;8cKMF_&9+?VM z93ln|L@zt-q~BvyalrM!GkblhhE{mV*{e@DjZzHph9PHAw2C)+`l)(0U|UR_@vD); z_41NVsI)M_(dJalTZbFhY2eYWNu_=g*IDKw1lL97g6>l|UxK|HUw&-u!q?klT`=-= zw6|H@4*j|R65o#WC*y7P4uq@H+;7?l2^Zh{O0T_?nQ7Gj##+j0Q~DAVr&1#A&u~7c zwH|Rm>5HkymG<7$iH=+LS{w|!r2o~Q)U?w&8oRPl3m-F!{m*N^2LVBAln? zY|DiNfhIa9;RTkJdRq^;f6-^7_#gD?C<}$iWA7U5I3-CY)rDk6Nt<4-m=X${7h~pr zzzpnD zBjzR|NF1#Y6ZYWp-j~iCrUg|a-%}u^D5C3e5$SrUC+@BJlo7d`PldNy6?Hn&&HC_M zkV=18aKX^?2S!?L_b=GwKL_0aU+XBKcWmUs%%1;1Z*)7J z*&Uj#l5V;ad1W4+U@ouviLW)``c&47#Dwgp9bsHv2BemqTB| zN{EFBjPZa@sK#D0M{i(D776)iHZD)I!AYkA=urx&)JL)NCZO|83%tDzd?NCjpz$pS=mee8a)g zxd}HU-^`NynqU3+X{CRw*i>lHx82JIXDKk^drD-;(Ef+cW;4AcFj5I5Iso7rsZbk- zBrJifSRJgCWo#NPD%Xo8l3f>o1T2$yh(`7t-YPcan}gf!I{7QwV>6*q06mOXOLlpw!) zWyr~6HKogKTRY}CjxGE`dTWmYtzpdmdqBhlCW6XY_s}Y3Pc2DUwAjsdG0|nmlpU* z=8e8k71P>5a>q;ZP2lR2-_YL}+h4fpE^FS!|9V^M=R`;F$jNfYOevX z5=o!;NuMUshX#h2bMQXzA3y71U=*65iRCEWDrS&y)$R*FO98WP05~C&=MJm_vwYF# zx#_{{IKHP71~m^rC0F4D4eiIvogVZHO>o4#Tp9PL9)KFWKdsp*QH-*8#lorh_I*?wc_iX}Z3&^Os_ zN>;#>w+!UUGhFTCPqLVaCV4wFhgh{l5jmw1>jusqat00FlPbjYX6Js~PH_aSOJA=# zg!00%>WkB>=bUTAP-ux5Ko6E;3#1>Y^35YEL!K%<2DIODj+6HKL02J0YCOW>rL>Nd zOO~eRk1Xph?#G|4Rd$AZE!UhbocH$!Di zk&C@06W)W5_OCOx)h8ma--`?Lyx*lCh?Hx2X{~O$P9p82=)Hy3Ebv2U1^PwpE%sVY z!w(ZHd>(Ri^Am)R#z;vsi0G~SfAq0E-ki967I%6U(+j*}Qro&BDe6QRiG zw};I=DM-?Mr$z}~3J_x<-tsva!Q zf~PwmpvHz~bIysjJn5{dqncro?@;6^;QgXbm6#5{)Mvt0gfI5uYV*M<^cDoJ0$FkX z^&{zs7p;zL`4GpxKRbjDTT5F|9x}pd-8>GQo;kAWi4PA#zH=Oy?CeEBSQVxnUj~CN z+(FTXDDuE{UDnVmMtR-P()7zCKU3{#n#teC3<|%c-9X=4^LT^wbI|(7#!}0x${}={ zquh!C?jMSV5_cuH$yaom>KkJED1dDz&L{shd+bc|Hud;HdZkX5wLwC3ovCG0lkw+VJNJ+3^#$G<8AMZsd1AAQmt4D+`@q?SN*mqs ztRuj!HCx&f(|y8#?!%xY?o1Pn=Hgc@%~VIr7dlcRL1?XqyO$s55`u|OdVZLi_K;!H zzqbxkcctah>LjY5Y-;04T@6#()s04LNFaqKeG1>@8rOBb8b#i7B3J78sLuDg!fL`p z-aFtbz@hF2RRs!qsArpkW%ngdp#bxCY1G)SqBh*E8p;7)%d~|thdl{y+)@E&8ErpE z-JZ?|_HqPWF>_VbX~t|xXdnV}5UCaa@5^aliq|7FoBc&#O8{7(>Nf$*GHkzeP;UY~k2$W|07i?W_tBP2*-YwM*(sA9iU0)_p!J+s;vHdK|2L)`tp>I^W z&S_&q%rzbD_TpRGZkB9Db8~?G(t&MResX^t(=?l#f@RwjA9NS{jdF9rfk}$Y{+;q= z$?&?G#&2>3J?%y1y>GOqTLFq&8#=YYCr{>nl zEp+ncr&aj8RvF+syKjF5Vjot!o_mL9K(YiMDVl2sK2U8k-N)63sR|NL|k_ zX>@L-2cu=A{ND}eVU#tx9LL_7cp;0xfTcfCvLLob{7Zy$d+VWzvxdR@g?iw?qWRBB z6H0Xa#)h8RN-gTmVQx%p)Z4ttZRh3rW*C{stANx%wv3w~>fbd?CVR^()i)Z=JK4Iv zS)u^C`b*bHVUy7fcIQEoG(HBdLFrTOJ2o$HM(4l9J=5NcLym2P*6qQO@rkdyvHXf|h%;l_+XNmn5KIkfaqAQ6O)GL0CD_4*fX6u82APMD zHsyyg_mnPD{mBJP11^eqZk$O`7`uX6Mn%W1&2udjvDk-wDz8reKvE$vnM)#D1d-ek ziknLGZ`C&MHTh$IKnpX0G0_m5gi#9&iR^&?2?;(QL&n6u7o(jj8e5zU#wz=*Sas%F z4*F`iq9Q5CX`n%YH;ji5oHgc-bJ?3%weUaE1(09ajSen#M!%%eg}kUW5B?UC{RMW6 zgM01_mgopIFOL=+p!bb*&w-xFpsKu}f7Shk<_HzyJr`E(E*#M)W9vFN8q8`1W#XV{ z>d2(6a@uePISr#voWbXt5{ufEdNL4FGXmau>g46pz`X?)f(5LN%42CbA5d66F4x`y z=#@y86$TrHW?#v($+v1QO}oEqk&8)ctAZRk3-8*Ab_*$POqPfY?8Th}92Oo8a4 zgTGJpI=xxU=Fg+#l53_cZ3oGUC68YfI=qEOo*08aiS3i^FGSRMdJp1A|D!*B^!)B- zUuO49>p}l~Fni~*p>PIo%tOG0Pteb)eXC7^eWc`MD7i4E-Td#Y0^OwCKYy{b(OCJT z7XOe=JjG#wEA8}R!E<5vX||(+)oN^hNv@B6%D^>7&WYsv8o>V8=g_qE%U^xEOlwkv z7fYb1XD>ctIW=lxB5lkVoMHZ}CxrOwmA##$bUT(e9sK0P1dM90Q1Jzh&-s@!fSA-W zyR-WOHs`kmv~49B6<(+Y9Ux4w=nXhEiW6U9P)rfQy)kDeXhF)CY5f5jkl*x^LRwy; z)GB_7A|epEE47+7Ra5DQ>i^#I_TP|5LIauOR`K!STUT+x0r~)-=`X zZo{q2=zE{x(YybF+hCgoFC?n+3ft5r=D}@GJbB2sH!F-hxHCQz2OR54K$ToG-QLge z-j*T;x1Ew&)_JW>kot+_yb-DtiQjt|Jtf}t(xFz6cd7fN?zWl!ln1VO+LAbt4H|A5 z2_vPW28t7mRu|Eyv0I7}6%M8AeDRF$Ie_^_?2Phr0^o432=!gskfJhnEwR1ru!!Mi zH?6N&UB?r;4Lyi@V zt*^H(mDf&Q5`l@wG!bz!R6#wBXg;MbL?fl7qQW45W&}u80^)jQ<1}zwd7VgWCw|KC z;{0)om81811RQm$SHg~34z<(H4|u_#G=PtC&k?Q%i{Qr(89Gs!HB>)UAkZ_5Oh|_9dLv2X! zGT*T%Mu~Fid%?oTQBr~9V|mKR#A2ipdTbS~C8HQAt`YSKSi`(M|0aFAA@=k;GpdiG z>V@O0RA!GHuc-)S`^>Q$E?0={I&QBmZ$8$fDH?@K(Vb;))|iE|h1tOCQr2R-vc-2s za0i$R8`%RRW4d|wgS>#m2agt7is!ODKl5Wf7PhM3JMqDWE2_l!1$C@-oX{c83lr;z2(t=6O{LkTUWItY22K2q+9v{)@PP-oM0lb@o59K}Am5sMXsjQ$KQ1t7h(c74wX$Mq zY1QoA$aG(00pa z3}~5{=~PxwYU($$QZ*`8H?4_y2bkPY5TS9#6a~yr0II= zW$Aj-9+J@G#T3ZxBV>2dUxw;Q4ff;0usOTgEu_O%74B3qexPSy|sI|9)(A`mv$Wnr{Bl0WGf4Gv2z;`*NKF3lBeNaZtvr;yj}% zi&TSAHg^dLo3Fu^cb|b2eKv$UM30@|E-(&bR5V>~c2G7=DJ?TSvnf(bCdEgp&boe7 zDs#Nr;@k*8ZRfB81NdV)(_io90Ivgo6rYlXgeY=ghv3O}+hsw$LdXdT<9^Ab_m|-dmrH6}WCcH~x(AhuQ5FjYNL`mdr3fY$1YN(j2 z{G;@(C}i^VqsC%05L*wxX`*t%r= zM)3PFM_}X&g!cg{ObPdJU$b`Diz&q!xX%L3$M0P`wl*N;Ug<#ps4ZVxX^y)(tF8<$ zdVVb?PVvw=yyBE+bw#Gf38grpV44T_L%2&ixG6n4-wn>Rw`zJ)&*hd`+IOQ^6%53gNpz4f6hW-{16ASHznrrJ0 z)JH~gx6iUuv2x3fR^O31NyUUe-f3%^59Rj_W~YsgNFV^G+*?wMFaV=ST1Cfs*Uj6M z#FnMrmvM4bB%ERv6Pu{?eBd0cu(G=qf?#jW?hU~j%69Mna;nH6x5dU;mx;UtuC-xr zbgjL@yzcG3%E2V*YT;REQavXA+4=-L{)a=>#E2V{Myn^5IWhSJ{84-t?=eE^n~*e? z76PRe7s0Kf+WDHrD6&Xeh?GTRyEUYqo*>jRyfS}(&#a`R#Q)S!#NdpN<&A=_dZ7)a z^&RE)5;(;w?KC{LV}R|{bFERP4SOTm2<~*%PQUTicREV(an1vl1D>-#GMu%NN*bcd z_|z0ta(WOdqS*<>Uv+_GmYFF?HWdS46sw#*)ff(8DCud3!n-BJW9X#0w1i%hvqi$P z?dK{}Zb93W*VA>>8blGnig6SpP1)O5=hIi;jL1W)EYOh}jAW2tFfvyf8HpYlZvgM; z5)1(3>yW%WLEy;@JCFvrhv{)!n1Y2&Q&l)Z^y9aigmqhe`DOnr)gtl%Q0wELX=pOe zxeeb!Sxqj7%z}b5n;;_(l87c>JZ)yE@HCuuJ4oH0JjX7MuR*~eEXyq#Yu{Gh&Iz3h z%hz8Ne=bf0zcMZs?JXQ-^9m9HWFUIS0I$1mUQ8(dg^ap_sfOX}St@LU`r2s+jLBoS z`D?rPw%yQ?Fnut7HvPZ76 z-jK_0!sn<%T&?oX?9?3%8vfOnrD<%$g4$qxXfy2;-j#t$0#UDKgeAxl8H_}xl^1F= zQc?u{XOhkhM0{(KiOS=PSA3_NNyX=G(S0_v%IW^}6;XuH3!zjRb_X0bhA z*Y{U}5tbDox8_!QX6!83J^an;yxbZg;8d&Uv@!KR&lXV`l28lX_t*$ji2`p0c&Yh+ z>Zlf4B2+1VKcsqtwrs&p(dZ@nX8cvt3psYsY^=BTsB%d8a-+#}8|3ad-IoBO$|D3e zwALl}fXf+^$@Y{OCFzElVpr*ZnAoGxmSyGU|UU@)%yg8u^~ z}|D@bTDJd&QO3BEC z%fZlfKIhTGQDtUfd4RZ+)8lT|BO^q&=eLs0xUjj=WnZ!l!Zr=7NO2UQ;X4O|8Uj|b zYk|zQCyYm53vYx!f>oK>J`QDz7m-=%Dl{|?bWI0TSd6MG|DLaPV}sfZne)c34&Qw! zNPqg{y|G(=a!nzeR&t_-foA}9_XTP@iSxCMke&d>6zNI7Kg`GIFBfr&)P4y&60^0m zZ&?&TX%~3rZWVQkROVkx!qOrAv?hf`vO2yWE_gg|c_Y5vT-dRfy=G0!Aq$XzJ-=w7 zHK$YLr{p!(g;T+({2M5mYEbG9*&d~?#`dMgneF_5+Uo9iCVkG4?7_)nAX%7?P%&?o zYxkfgK5Mc2CiHdscaYu#WwK-(5WPP9T;@42VEeqh!NWiyq&c--S5eILcs$RQ3cPg# zyxZ)sQ}5i%X8IdY7Q+hAUhDRSOMJlA4jMzKz(f5%IR?4-(+oT>-^*-)9nP5d$_<;P zL|v5_qdze2Pq~pc+~Xp1Ad#1uzn1taW}Urn!~lhyqM`#Xx>^;Xc$Vj_I6K}7BA16Z zXozUm&#KazUASgn@cWG)XlhWhU~Y)eO3PeswZ4>|W>cG4b)gZ>X$4@M?8pHdyOO~gE>pr@Qf^vt#b`7)B^MJrX8{>^@-a5Q#XF+cG%juXDE9fSk?KQ ze4WU1kt}EARCY`4R6T(S1X-Y|>JmKnQ%XAD>g1FC;CgcQZ1mY-kF zdlR>t10_E_R5v7>0&;XG6OlacSN zi-=4>zaCRgy_%ev8xW;Yb{cc#M5FK4KdYi%&M1X@!~qrI$@~lQA?kUvYfKd{l)u+y z4Bp?r{(edqB;-4EPEGdpWYZ_`_Fs#1yAL8RJdJ%0mPt=2x-BVkO6A`5T}kNp#D1v*8UE6ahtkY0nbDkM58EwG%X zm9j3Z#>vESUw=?Il(qFetnllV(%@?ZjX$Ift2i-FxdTwEe1I;5$@cDi&X;h_@>w?S z5}UTK``_5-Km6C7ykr(T{i+|)b2a!|b!^ii626DRGt|;Be4ZO+?&_{7RKn4(NZ$2k zg!+!=EgJ54iUICSAXFYZCFHbHUSD4UqUdF7xZ&bF2d(yHjOuxs{H-z)rVx2SzRM{l z(0WgQACG6F%x$va(S8d_s(->Y^4#)@dAqJFuvp{*i-q_4jEeF?zLv?qhi+2c;-D~= z=P{Po?+o!Zjojy_TA4UltDjt|W2b?(LLLOO>GCVbP(Nr(tmu8dP`t!LE$T^(1RlXh zA~q`Z6789BEv^5?fTQ`V%Te1z09u&@YN~X?u>S58tSTul|J}QoIgM!BLkpTc5Uv}s zc64jGUES{eJYMkKoqT8tv%V-pCK4!O+Ep~nirt_UfWpJ&O7%HZbd2*U8~OEH+dV4P z%keH2SOtyHxF0$ue=w?ooBN_EFD>aJs5Sz4+uQG^wxgT8Apw9t9y4JaXv2f;%7E}N}AM?37~>;cq49- z%*TIv{uli%XhKXEZjp?|CWzu~ZRwv6#7fX~%Kq95@vbetbSd&6bde`Gl4^jv&F@ST z#GF=C=Qr`TW{7x9h=H~?JYpswTN&tvW9P@MJw#y(YFv-l`(Tv{nz)Vp^3{x}s4}t> zqa*$mF`fJ&yGGt#<9Nj%=ivXbV^B-^Qn9Hkm^{7i#mE2$@OANAC~v71zuMT2KolH; zw^@9Wtf6my0_j)->f29^+hG4Ttl|LsiICn&#r>j|77oIJ5(QcNiQK|6Y)|haS8z`O z$|qF;w;Wipl_Fv}_{0=dNb_7qHK>Ii_Ywpae(7qE)7~F)sOG)41%oaZbXlKpA2U03 z>!nw=Ju;vVrK84K7MiUMA79*Qqo+uT)GqO&gXYCwFL~=cgoVX$H$S24bzY!a9=%4i z+0sM%t_3b+E59w-m!)60tWLd0M#s**D?>{F=ZH&a9u@z+9nS;t;tB5^-E zv+Tp5q-JIp6-}kP_XT>N0BMHjxpf$-YI0Lzdbdk@B^OIZaO$2}qb<8oS#yW3Aj+L* zFOb~tLZd~>RE2ZY-{){em2+e$cVhD3skY6+*-IRS-b(7RDVaFhhSLFt(L6<)hO6?j zwkzo1_hGLq`x(zTTEHf@$Tlbt6Dbc4&y(B!iNS7RdlO1h!_VpdSn(N;lSr++%X5ixnvN!72Zk+9F(ts)!gi&bT$2#)x*7#Zg&W~b1oD=EZX5f1)|2|lyt?nA`N!HKA}_wPqz-` zjR5?mhZr~Qd;Fph-pb2+N>^%S{QE)>BsQG}Er6$vo~)UT$5kU+&Z+l0fIK}Ac*@9_ z`JT&SL(J}+CsNO99jA(Hb^Ax-`z7->DMn7|yHMf9EbFtsa)hZt; zUObRTR{5R0Uy3_4A&64J!TrJf@;NuB=pF_q^#ulKk!(!>w$wH|QGw&B%mAHpdm6u7 zS=8;W_jR~+Y=j;w;^Z!^>?>S7krKc9$1I<&lv7rhs&bCf%JKq31N@?Xl*wFzTuRw^~Qj)DTTewd6e>Ly{_Mytiwpv*la-xk@^i)il7LX$@{7|_nI>sYrQLJ0(|F*X^#<$i#iXJ z)$-LkQczW9NU9(I9RB{f?yC0#NTTkR_=VJA=BOph*xnfuG_+uTe&7A4vCla2%ap)1h2KA^k9xeIqBr2bx+@Cc z%mBB8E(#-DVQ4bWUQY=~&89~@WM$p%KuY#~$j{9Upr5iXiS}23;IF82(Hl$xDjVm{ zbJ}A3HkvgeGM&_Nox9Xb$zCY~1xAig;d0$6yZ3kjdPR4zHfeZ-xZ9XYn6bhmm3o8{i8=*1{yP3O zGiWCuT5v8YoqH){09NWO;V5gG*k6?;ILAv85PlUj+Wl^KOpWr{pvne3{kl6Bw72a3 zL=MfF7Ckw6Lpk7A_>zMkd0t zC}erw0avc-b{y~m9qBJguDheNo?$f*y>TfVeo0_P@v?4zgHTBue!fUXfGZJVhtV7b z+S*<6$73h7Ryvpv1;4Lqh87%?P zm6KOmKvOG}55=ezWdbJxL;UqD5HBcG#4KsX_?k1~E%Wa|jxS7ez!!D%t^i@6btEJc z>Wx2Ubtyve-0hF!gKCbir~&fFUb~{zH<6#vez)eu1C-_-Bg^%bfr=|gC=$>zF#Yc! z;R9W;*(^83W|4g%_?UMWojD&C(lTC+x4%qTkwwYcGj}oUQ{?9?D}*9EjYga+l+I7G zIgL3tl)Lb}?-lz7F%wmGqPUtHQ{%@A8+qa=RrgHg&?E$+JfX)02Qk zrmI)p$C_lN-JI2LyrEI|u%tNFT9C%Z1}|d7wJfH#@jttB=toee%eh4A$|*{arGrGOcN|N zsW3Ys@I-XJfp7j8E~?~w68R+nrRK-t6Po4T{o)dRRqfZ^XP!BPPjLMklx9RafS_ZX z!cdXkpRXjHwpHXpW+b8Xs^2#*o(>$C*+bC-e7IytM}+ZhC0P;@NBNT|k_Z9_2tG_( zJbKoz;Er`vS&VV-@skxsiK6X@J)^IC!}UEu%_#|f{6#VrOqJ>d zLrlqgPN`R)qir%!)u10)59gGuh>Bg%)YvP2dV(tp%jbgHE+kmpKMoYm?LJ$j&N0?z z`HGv(Z!rxubLS4qQ3%P^;inukO>c*dkY=c{LqvrjGqpG%@e^)mD6X^G|1i$PJ$R4~fZ2ri$#MIhOsqE$q z>5Q1<<^wGl-bqwCqzRzWX+psVL>tBk%PMrpZo<*nxP_?pHwjO676af=6bit3ur2K8i=43(Z=2h8!B@FZqiW1{(Sc2MUD?x0o`ni*W^qwNDJfP-!L>?%xThX>#_b zeqYf;<3b0%AEDIj+jBYsoynUID&Ys!las}EjE|nL8J>9wB@Q-aq3-NjbJ#J?TE3nwnmsmQ}gMd+)19C*bj34|FUSW zY1>;s*hb-$^%mCnTJPF{?U?wrk~TuMhkKC`H_fsA5GKTj*;l5|MTVtWf z*W-4h!G;tW%q$y1s0=i5NN&qFwCuG(@)UPV++MjVQoO&Tddz?s;!bkgvN?7<+ALWA zIJp&`(#P_YcTf@v_mBo|sj!LBOZB;VuIlnmmD7uq{hdRxtSfYu=tD>#IkLac(`mCL zE9`T|MV-@9x60(T#q~^iFFM&x63s2>?lP-SN>k1G39eoBq=wxEndx)AzB^0If5T-x z5P9p5LR|$=+GKY$S8y$gd$XQOc3SuX@1O9R_42KsJa~h)_TW|?=)j*43c+AU&ESKS z-C}T!s0yjKV9Xr-mEQgQmsml$U#9z`wRy-Zuk-siA{1aq4M^M=q5aH1+Ogb+#>L%) zkbU}qz2y6leg^DJ^(H%a!2!JoS!}0vd;;V5Je-iP+NKIbB~T?ImI@XurZk>uxFrs= z6C9{xx>v2r074xP!}(yRm1Sc%HabOPE{{7lX9o(@_dFkpVp7o&v}N2GdO3|l?;yY# z?47QQ6Dlpcd4(K%wt|e%8^L1R6}iQ+nZ1S@Z#M24++W5VqO%WXr_1r&^@`~xw9C~S zJubEzY?j9*2Nu?Jm%~U~2rF0VQ;88AL2s51I&fpgzg$e{{RWU?tn-bsY_vOE`yy_O z45JZ*9CA=8?E_R5+@z6c-IPO+TxySy?zta%szN)>I6ShSk%e2I!rW=SMWB@CFh*Er*TmpsTF&?vNlND+#5wrZXC21n z>Vy7$e;JdA0VjYNLM5KHEK>WpvF0+ay3m; z&m5La$NcQdmY3$$ZDG|sM!`K8D`pBGf6cS%d{E9F@kGSYbx&q5YWYO;TEjy8lX2=g zuX9{R9hOT%f<~9a_SH_r({^BWEdEelQKU64&BCnp`LlMj{N%s99=q{Drwachw(wMZ zkJRuXY-8a07KC1G9U!ao;qbiuSk<$}>I19O@JQSIYk=V6^E?wu4{c#_NR|_(?UwepdY^=%Iw)C`4$J0hYN*1 zD(gs6>dNOryvNaGrpMIFL|j`c$CDOk=f=u>+W_I|baQbdYCvj+mTYqo*8ur(6;+e> zhmtV+Zo+}->|quDlC`#_C$s3>S*5k%u){SX!KYvH@zf}3_--eJZpiy)h)1{A`(#D0 zd{JpG?n+-pJm22f;@ujC>(h(+|oVM@u$plS{D8*Pb7yk zuJ+s$Ol!`N8E3;Bx}(+_liMYC7AMPz^aSdv0{4}fQ7{@FstCo#vZCUEvWhScuSfS+ za|rE_`$9f5u8k3XZK%eA%Au2<5Px3V29Rz(&-XQgl?2X5mo5j%ENL?J(uK>Y55%YO zk8#8OZb`>2JlP-!*tR5P_3FA%Wm=fp)hCBfx9@Qz>VdvaNru6gB|~oh3vxO;I}6bH zA#vS=PkPiLhp``V<=1HwadmzZ15$@+(cF4=Wag zx~SQ4>!~vkP}0dp`k3{RPr(5@dvuyh?;JY12~?V^j_fJa*9{kvg}n+gm@&kDO&UTlbi%iQNh0V z4e55=*(IpRoe~hpH91^{X1G}6Qj$D8JvI1f?e=?Vg<^KH?dvwMZ=o5V^;qxX3~-D| z;>}^t5>&!-fP_uoCty3>?vFSws+-CySSp?_)_0MgW>mH3%?>!2fJ`M0tZJ!~JCH}% zWhJ=PWgLk~yG|xJC^#!6w4B}73YF0uK^)NP3N2_*$ZVaNoacMt+)_F$d|kb{qnSUO z6}9LX9HqEZTvEIpdNL;$9{M@shWG$i#<5Mn3kaw>mY8G1-fYqS?5g(05ae*(oJ>q; zbE;uAn#!#xHO4&iQr%?c>I)sO%js78wtZW%u8WE>G6dCZQ|7!>&9^D2$AYUS^krpG zs3bXpx~ZlVo;5Q2Q21k{LyMB9uyQnaH^W}Sy!54xwfRoZC1##DE2^s;?#Cy2q6DMM zToy;RoC_d;6D4k$npF}7vX^)s=H10&xUZ)R!bW$lQ&w?wS%Y8w%tY~|huM5yLKhG3 zbV{Ik5H)?kS-e*;MUBdf?*-GD@ZPN5SCWfclrkl&dm5zJ=XES$3s8n8oqm<~NP;>H zT!(yI=FW5|n!Vz7#`QnNS2mQi?4P0 z#FC^CZ|_ASd15-xg)R^)lAkGVeU?7obW zj|$G>*ayj`OBRKR*?nxyD}2_+;%7xy`L)CpR&SSX=4{8r5|&mS*ZWQ&M4G%26>%uo zLvi%bs-U0OH_7zxT^)^KnS?Zy7gtr59dR>DN1B!xYh>zHlAvLrvD{;Ca$(ZXQs?}& z2!0loEr*#fsO0S_v9Y;cJu+FbGF?(yVl4-$tLv$XNdj)hsjIje$J?0X2OO`GD5g5g znOgaV`6u-=9cHTzBdbYjIJ{oIkbLhvc1G%F>%Q)0tuB{G33YC{&V|CeN^2ua`DKvW zpa9{W-K{v1OJ{=ha0M0oedQj>60=WM^Yxoy$PL*t?eCumq$7U{G=t)=1K(ji(64klFH;m6z?W5pmp|^)3Z0ii_ocfpj^fQ(vvq z)_?4}ytq_fteLVL--t6f&cgHbx`3Us*M5KAczgnxQa*2gjXU}hB!`UE2L^BYD|BxT ztrNf@&(6k{Y%-oAkgV;Tii^)024)V@6)=tY>pJ7anR_9xR^r|s!;XD{q}H9J?i5*F zav6%>XUcim@7;`W29wU`hCS`|*xc%Ao~Mqlpu>T0!8=5Fp#Nni;3YlbpeVj>xoud- zS&M1xvT>UrYbx5)hV!FQ=YFhiMB_SiU-mMeC>mh&>0*6HKGP%ZV=?WKxgOm5)eJcc zXWCbu?IYE?UBe$>j7=j4Y?!|wl!psnxEWs*lRTEQSN6Ise6I1aaB{w0gondkY;6#1 z{m5e*^4@OIQ2BvbjatTDi*fzOSiPgX6I`;gs@J}%IqnDSb3}C+z9yV2WVvB3LzO@G z-sI-}+gZ9>p(02Hz6)OF^ZwUxE40o;Rww3MnTgDIIdMxgO|r0yUP3LHztt}9dBVdd z{gIIBVJf<02;xaWs=XmEX7R}ZSRjdN)D`lJYDmAACp?yqj+_e`=4L6KoDhtwvM0)Nb+;3zWtSp?&}m0E zIx|?MXPaokr9+Dx@aL^HmzALYzKT%cyWP~kmVU!Hj+Zp`K5{@mbgPf}$nWUD9P-a! zl|RlmD+`t=n@zjoc(2*Cd&k{;F-H0V^3Ts4a*U~VEm_gya+X&ChGpLU zt~RceA>u(ok)rhq$OTUz`$7s48EZuT*(SE&obEFJ8s_O$-IzUl%Fn28^>F0~9Wv%Z zpS#0u26gJdTy`WQwm-vk$*usGMIHl!;{!P|4SZn@j6o%yG(T&!+JJ+3N~bI;lP z$S~KG9B1Wa@L-z2smXHbJKYgKAnueE^5vQO@>4tsQ@se%?R%YbR|vXCzTc=<-;xDF z`Ro)$wx@k4SXtSufTn}W>}Kyof_#P`l;o4eT~WIsz2a#P2zVtPUrOf7qQzAYwYCiP zDek7O5uk1*m?i;dCMulcTvoXjUW<5h(rQCD@3f>;cQO(a=JM!fH z`9<%KQ6_a?X}$&qCCdN8? zWpB0yO5Ebs-z6`Vv=gRB;Q|Q4ZD#+FFZ@uYTYEXzWDjEXx}g_MU(of=e7csOZx?#b z{OIB1CMYJ6wU9)SCwDLd8?}p7AI4Dp9MaEyX^X#Z9$FZD~i?3DXzXidZgfE z`Rw_UU%1*2xBROO-_zsV^fN7)PAQ!hifQJwQInB`D%vRQY<;5~`}tN=WccKv-}@Xi zA$3+$ooOd_hqF?Zx~~e8I&Jacn=iD@7x$X4DbWh#?_RXiA15QN5=f)g16*(F5@%a= zpWNmnBO}~~9FSUj_qgZzlZMoY7yMX|>YMB-%G9HhCyQjWWzyMfio8Ew?et2uUP&Gt zG?H0VJzNNnCbEp4>g07Z$}fbQ4dpQo2q?)`AQlm*O5JdsPPa0EKPQNExAfcJ2Vbdm zzdv*Tt;>OC(rLQ##3m*m*az_l-AG?NOj>>B+5Fl&`YeJ)U?n?Q4~fdd!0Y>*tf+M1 zvGT${Fa;8}xEKzLTOalB9tmo_YQ;njox>RMj|-M1yNa?2e~&_92WGy$g-5nbTFD_! zDy|zF#R}+wrLSZglSHuZF_UWdW6G-Ht=kRZ-EVqal?r9IurE=DD6ORB$Tf;ObIlJY z@$M$%N8roT#;iR(E0okv3n=2NiJYY0@f2UwXGYf*_^LX4d?H;;SAElFvPNPtH;6)? z*}i&N4yq?y=WBLGMn6hWbkri@eCdiPv3t`YS|FMaKsP?m5s}R(=g1#1zh3L&U19!2 zc-NQL+0ARBO%+9$+&ICVG}Z(ym_|Mx7b;|NWY@{W9E#ux2d+7-{2-PQ6U*^>dz3Fz zc_qk4`K2s^%%Rp>mOIAO;HF5NI+me6erMFIJAba8F0{qdp0@8mMMX`%sI&I6e@=nU zBXG|3%Sbkk64pcPtj zh_YAo9Kfs)gQsp3>c9J^%h>kV0&;jl}N@bV_#j=0nTx@X)g?{UI}>i@@j-{&Z6#cOKiw zJb;$9gTg+byl)_vl%9r{7og|n)^g~?WIT%IemB8J#UQWygGlq)a!OO$1CDjhFHUKc zTAXQvn$B)N;RlS#y9=0AstdoN>C%PgYt~E@t>>OR3$*RBLe~h zinruTV5Y6n5$GMoOTgBYPPOS0nmlW9Kl^BnVnbl}I2w$B->) z171X(*_Sb*Z2t3gYJLb{+Wy&K{9_#=Z)_fZWl!B8SGLZnh~ zl(^^2HMsXls{;Xvtd{6nXUy3AdHhL7I~X90C~A0tCp3jR@iJM_IF)HR(s_ee)kV!! zwW*`&kx>T@;&%CfFs3$>w6!+ZOO^fuXG6}CAJk8bl(6l1^Yw+VUm(tijs3+-dnc}H zU{O{a_>GF6CXVlg+9e`Kq+{6iuf;t3ln$ksEhuOPQO1!qmAm0xYI?6egtmGWZ%`Ai z0cYMUm0v*eWWV_zc2m<2c3zWOU3uV&45?k^lObf8INkmZM8tj$0nqARtNBoG({+0+ z9hf6zNe&|92PS1wvd-Wm!)K5|kRm54pW0i(p`=$Nu>!ItpGu`le?WTWA_yil*+%Ez zBEeg1=|jR}GkZK=@4RezJ!v!5+;&%T|M-(H>{_o7z9)h*q`N4BN^@d`soleO?^0_w z=GuCDu;)tCuB@bTTu}d_JKGtgNLeGz79Ji3*C8l?RrB4Hk3+J3Eh5v-!{(H$+zaU^ ztaWl($J+BSKVQv0gp07`BW;8tX{i=puk1fs5kG#E7$|wRI?q93d6A1WjB0&@vVA`> zU3-e!oYqW7@<|m_+xns+E1Z%4yrUw}_gL%U!ZG6lId)nO8n1mI(Lzc`w3)2M7aL8z zxT^vYkSIt{i$m)5=*ML~>yi{(%kjG_sklIdrkfMoD7K&9`4&xp5fdai-(TgkS{9c) z5q19Ta29DbCH9MnKGP=pv5^aCIyQ@WeRF;NJt>2uk2$#&fF|=b+pq#U#JNkn8WG$R zIaKy{E^-xOUMiiAp*O=ES;oSXfXCE+B~LcTEG zhU>aaM*zv9W(w`=-;(6Zp99_hb4XrLig;j%RGuUWe8dpPp@o5Lkkw97Q6k(p%;F~y z){&cQPMRm}L`OeFpBdwGdMJrYO#R3?`m$PZWBOk&0EjYF_HIbe0sAs6Vz{ehuXw@Y z%~?r=*#E)7-bo1^OzWN()9R&;`73ao3Y^7x%r>KGN(e0)?R7TBHuQ(s_)>q`GqG2{ zx;#k}NG$O&P5;RPPuPneJzI%alnzx3s6R+6~-!2>M7q4HzzRJS+ap+22}UdofzJ;L4r^OS4#e zVZMqn8^LLLPgjy8!R9y z>LT%T0D5(>+UU9E`?ep`sLPFb6fx>j)ib!3t6d9#3wF2w8#?r}t zFd}hbHp;KNF$2q(F|XHfaQ56fqM01W%W>^< zKklJ7rzDa9!WTw_Nn|tWA~eFRm*ZV7hAZw_9`#W zhifCflgXc<<1b0Y2hDP~mbluyCI^U#k9XLk!32<_XAlaUMyd^nado=Qc?t`1i1Xy9~s!dr^_vtF^m zsN{q^C9F>h%)&erOHsg@A1ct^z=P^$Z(vG*MU>(+Q*Qw;*cDLbgi_q6wjdBmv?ik6=l3 zC1A^&zGnjl%B1~Zz&Wg(EdA1$#t>^6J@vy*ngZ)lb7tFjh zKO_K#u&s4y-G*f*{LPWL%r?KOw@@)<6>CCte8V>((ep>=9&ivJkb{FR_8hDQF;6Lf z-;H}mR*s0yuJ_|2Z!dy-2znk)ADX~xWztFC6uqKPPwZxpa3+%6stc)s1L|7x^o!BU zd6s#jU)K(v^Qd|4xV3*ViPA`Fqs4vJavKn28?V1dH_;NYr-Zzz{u|9Ic?#&Zl9N%60io>*3$`qGAHF4-DHJ1mVytm#G>ddgF`~L%6qCFVQp6lBMRiB zMh2v`y(0SQjx&Svfl*3uOj=B!aQ?JxG*wVw|k$im6vxpZn!sSfHf?1ox{w zkWyn==Pt#k=0aJ*d_81b53~7?do5U- zTnGo$1RPfS#yS&FRzk;%E(Gz$Zh5ztw)IOS)Yb8tZ!dS!)^l=lYJYk`6urDt5xIHgySC6G0VozneT<*Dx@8cqDe$kQ9uW)qVoxsWt zeenqDm!>Pq+x80h(G1EDu8kxGPCtamy6v^7fb<$oe$YNm{t;#MYq7U+$NgMj^VMtu zg)VtEB?)EW1E7r;g<3~^35?L&@n%_s2%Pq-T91n|5>L~rtG-s&#p9bSe4@cDv0(Cs zz8nzTDvKasGG78dKA*$u>EhAXPPL36Usx516a|Bp=hWat>S|4m-b|*$NNzn=Fk8Yl zGLODILa9HySqQbQ-P>_rBrF;}J}vu7ALOM%AM}m%qfcZH#t5)Se-0FClHtyMgDnOv z!tdJRrXJGSUbv1?zpdjV7$A8*YS>!-ARTVx4$_)Cr*+;g$P*oIxAw+1B(QG5$irtK zOSNeUGJhvwfuIU~R#Zc7)UI(6SXta^Je-QTaqz-ssWLC zuKCt@k~)=L##9)$iN?ys?9+(amTX*|*y<-~C!7iB(w#n!;s5KIKrjfVj7%Y|iZ}yK zF8GA6t5DcXaT~hUP0ZFFTn08Q&+*hKu&I?`x(EMTMJ{gchUiokOV*&IbAAjsJuDccbs&B}s3Hg6b;G@G;C#_#9UBvqouZOZ zUZOMCw@5ORRr;Bf?y16E{5B41@$CYm9*vfi6U~(&(83la$i-6{zW&ogjvJI#C8aSr zS;f%uoSv#z=n-78gkXv;B&929P0=AWG;%L0nYt^`nfgpZYS~!w-9s;rFwuq{_h`>+q5U;;ixQ(N1r({=oueUTx z6OuhR&OnOgH5&UF$}1zWeG~Nx54@YERR+A)OBrYL$-yens>*G5kw;I`YrQ9KH`RtP z3BENkOr7HF)K)!_8wuL3SH>ElABWs5@#_gHSm1x#SWmKLt2krZt%$jv(O+ziBWdk} zRqs2VTrT^dDPdi7(MCc3io}7b{(Za=(|v_Tsnbu1rrZ$5!>E{gpq?acnm66_OS<|k z>b;GKlGI%&HIA*I6Ju$W|3Hoa2IszVB!|=I6E(qyB)SB1fQ6y@IQ%)G zx8MTOn*MXm9v<`3A0lqEOpz$9dE=hQ-0~4v>eJ0EnlSgnVW%RIDG{(Bmc_5c9T(X< z$dk_&eOW>XLHMW&Qxx#vnD_7u~LMJ&y3eb+L6PipRS_F!uLm84PEgc6>!c?3yV zpar1Af^D;K6_R6{9cyAnw~F;^Hcn|hkd80lsTz>}ECllGK0G+Y`fHYc&(*ffvVF_i zvv__`hpKlJyG`D3*{7_F0S_0B1?R4X*pY_lqyEYQ&zzB{B8hrPWuyEnJE8Q4Rcr!# zfgovo=37EUs8M5b`~9|R?3pb5(W3Jh+hBq#Dn52Lko3DH-z&n*EAN9r-WRy zO=@es*&h}O3%=7T2<8mEp~JAojvgqWi!R+p=DN3e-{acahaKfF<6RB3>2UW|5PF3& z7~z)Nwuwt5?~*dH5hs${8gR^$+!A6E1>T&#y6Hk;y$p%Tm#^dm`#m^F!8U1KsUhae zgu__MDA20rBAR+y2-8v-%2ZoOjV#itAKs zUD2gCc|+lhm6TN95%Zt23?{LKWN~KszviMR+zsp);@l0)=2|Xe310&^JA*I9j+j+m z$5jOCg}ZCMfntMPHP`7VXXKF{coxOty6X`9g5utSg2fz_D{~h|{U&1(o(x+bq);dk z;|DHLXF4XPJfPC(rCh8kv=`%SM0Oe=RpFB{kT~14l_mhGs#n;aKhAZz1#Mg?#yqv$ zkL2bDcr{VGU%&K!eR{*+f zi_|onCldMh@&`V6i!DELWN4ff51+7ttvGET1Xb+3fknQ#y*glZ*j07juisc3iTRMd zlCS29FU=cg4^leJ=Cb|>9%5Hemf>(F}QN z`;f%KP07e+Ye`jBlj5_5JC+}BCuR~5>m>#YzBIMPf+*k&Mttn>N3nx2)Q+t6#h|HO z%g3zC@5+qP3QUHlpe2HRDVS<~fNhWnT3Zzg>D;zxM(MNtCp8oSdz|h5RhUr@R1*Kvmk! zB_l-wG4QdeDPWA&+xp!MD~=08f4&TfXTOURGB4%xk!jU`1x@%F&yPAYC1x(2A@H5n z9{W8$Ec`QI{Rs-A1J=gfL$9(2;5j%k5$~QTWuU)?<5U-iK1SoUx;TPD?igulM_YVL zwW|F3aqKc1#ROJ=kkF@2hM?bkLlqeKQQ|6DRpJ(m8bp;P_WVaLnM^ocJDPvlLIeWk zQ{a(ss9uF+2gR;A6I?=t#y%k@>xoTj@gsy990Lg2F;u#skXE3#F_YIx#$_A+jvXoPlADUo%p)0vb^@blye6 zX+`**!zk^%11?;%5(Wsl30y$;ex~+lvjw1V*i@Lf_96n8qT6;23V`N@pPvk#SL4 zHG}J#P&9u)zmpp_tK~*^>MHiAe~H_KvVc0{-0fk!v&>L=vv#V*V>}>&Wc6%oczIS#Q22d#&@4nl zE#GA|jAN|A0FtlM>dxQuoO(QTWudC|6SYCyZjMjRR;)_AgcCGTJradlU{3HsQ|Pg* zY}V?C(>0|GL@pcshxleoCFW9JkM^_AXr7bRy3wKK)-zdc7?wR#^bub>YH_<7v!Cw! z=n0MiEq+g{O*(sSmOBuwWIi+il)}g} z^EY4wu?7`Q5U3eM1%ilzLe{%TMn#_Qh5oS4VwHX6^li1!E}EtP8(YO;WJ1y*8Ad{3 zsa4tIqW#U_1U{I0zWele`4t>RQg8ZdQxv5B!VOdqP!EdD>k`U*9I|Gmi$duX3V$!X zzYcU-IEu9`#CUAbtjUtMH*CXh1%#D)>f>ON6X!1N{5#~amb{XB`l|ebG%T9?MA>r? zalq98E5~wl$%)-^p$d4M;hMxC2NtocEdHURR`eHC+JhZx`ir|?yBklZGz!Ebi0qE~ zs7=8lKt+Cffq3My_=2De7MbTc^}TyOqbfT+c+7gRV2(-fR@+-Lw#ig&s*)rz!gHQH zZUA#cc*|BC6=CXaBtcF4Cm*!A@9QWO3PX7yWOEC{{6WZvjY<4ZBgQ7nlv42{`SRJD z5T;foaKNBJa#~Zx;|7+()r=MqAudaK5{+Mg$q)J2!Z}w0A&$G>U-xX;nJi7-^RBR{-ep|vV+oYZ#+a?f zD8|dwn=G!u6DUxlWdwhs=T%#i-(BX4scng&q5O@jVZ&n9Unc+sHK+28##^-FD_p8^ zfyI)De(7AI37G$2fg0$&y7XW56Nj@z`j`FM>;~DUM*xn)ySUekNez2!gGfC%Q|&<1 z4&>W@o=v9T59i!V;9E`edm)VI5uHBOnwa~Fd}>~%s<~ymPivXqbX1!MRFGw-`4Dl7q#i6DQqa_#WRzRmJEOTQ0ZhxwrGPH~Xm5 zmB4A5S-BQ{YNE98__WPNxCU|iIn0LT8@%^kgGARJwlUsdT*uN9{88ylpnTfLblp;| zh%PcY+>=*0hGvh{256iMLP?BuU@;=*F%Q}T)>f3y>BKY9p6?{)U~A=rtb+KrSMyC4;QJBOl7L2e}8Z;LRjr;jC>?;!!!#ibcs==lA%|nJ< zsZiLMZ6mSjHNuHO&=TD$GYiYs35!g&Db=$N)XKx&*7kV6Bj0$z{T9r7o}Eo>L+tVP zmDw)m93BB)2hr;LG`$Nn2qg36SW`9tyS2Lb$Z6bKzbSfzsk<@Rh>3+2V8c4qQaA^^ z|1jzDbVp1@n-)38iIzkx+~KqK>vKdr@4u(h z6O80K5RCS|Uh(DOLhaJ%x_7wU4=gc7rC4X;lA(t@66OlMcV10gfWPYiPxCT;p=7Q$ zDKgbsE-NXC&)$|k-==cMl=`McPfuGP@VNcr0)oT~7r0tH?_X7Kjdse^wBy`s`=JDh zq2+7o7E5?+eP>v|+ixfbKWARV*^^rFam1%?QJ~d4E&>pL;b}jDSzvg+v4R*prD^!Y z#OXz%ajNs=V&Pxz8CqYj?OCn&_^KF9iBeRsN$zb=4LM0seT%y;WgHGC+uUW^i1~C< zy+-M)`b-8jfgo5qr!TpDW_864bNC+~836u)o9Pj)%If}Ht*q^bL5998$7z6rih2#R z0ftAo7l}F2u4;;e?cz-CZVV;KbOlLDG@1ABWadv&OP1W`W_EiZ3Rm%KFM@*uzvy>C z0D=`57M=~@6!}u){1arQXwq(0d~xeMtc~i`Ca3rSaBk#8{$7vNCsiUN3^&!V zjhV|4x*ox~b_}XLNMbmY$BT%A>TatIKYveq$t%13@8~ABjp4cl&p|7Up8cin<1E1}#aGH43z@thXqvKq zGejGcOW!#!c(OV%Mc+tBJ;aE$$U53LtG>$LNXsGtU%&cQ0KBlV0UiJ1f;&2CiJB#I zlU!u-8$tuZ^~9W$Q#X2Afo!G9g6RyP#-7B*bL_Q&UlA0xcQ7RlrBa?uD}+L!ZXNWS z!z%%iwe-k$DOJImgW+E?r_bcT_n?7_mLmvDV+AQvm&_bIH#t2YKpHs0-UfRYW$ux^K;KFF2pRJR5HRq zCoht7P4n=RM{rOVRd_J64e;w@Ku?3S`59z5g-7@1YR3Uep>%~}!k6{I}0jhATaD_qXN!pA)RnF8Qcg$L4YKq3?_eIK#gP7odmiRSVG=z zNay^j1r)P((U~3_8@r&;So6iu2S3MCNy9a5SODto)Jb1V_QOQ{oY-bqUS3{`j}4-? z8RJ0vngw)`gig{tfS*vLwf&ghB61b0Z;H3@SGYsJR@AVe9oQO0Y6c>$$EL~ zg`(7s2Jp@hhA6q=6m${u_n4H3K5LI6|8XzCI`NEZyK*fH6|v#TbD612P~AtajJn{x zbpkc{`EXDU&&n>{x?soQIN#PBrU^eF1Mj4kl4qH^suQ|f*jl`Kc6rlIvL(-wIbwE3bOtB)7rUbb*zOYufd1m2n zp#xk+iL55_Y&}&N5#BKYR9^}mF$?DL6aDk;(V}y)yBpf+ z2~i;~7EG(>xVU#AxSpU5uBsRXD*))A&9nxc5>9D@N&iSW>)dCeby5N#47?_g|KhCP z>dv=deWMoZBDW}DDnUe}1zDLu`}to;3na8mQ5#QJ>pONTXTzd%EX?;6xtkK9e%w|S zc-=?N6LGb;Y>`;}+wGqK%QUM{^e>xm2xLX$pI__s{uoL9Ipbt6AtdB=-7;S5RvAhu zHz6pPmF|Ca+lL@YhVI_C64WWVE5LVfvq#ZLuGK3)poc|~&-)M%sfGxC{Fu95$88vX zZM-azZYAN+RCSE{l1NV;kr+l;_cVgf1A0gSK+-N%s4_xCAFm_2j^)TXJA9j`MJ!No z{57x~v;gSY8oLvu42YS@miR>yORUo zo_178VdQV{ABi>0GKb;h5Zc6TItAjsIT@#=rHD%W4V%3?VM_f?@Ux0%dp=pl7cmBmMti(70^V8|j=&Ktv$ z(?cMH*?4T>SN-`?ewEQ5o2sHd%6@-?%{YFy__{Eh8SqdPn1?@2w=Xu*^Gr*d<0Yj| zyDIa5jxOi({e_6@(`lWbzPy+MZ9yNwgkVJ)T5VLkdp#>3t0fb2 zpe1xHFfW5Gqm3Q}(iMOdo8Gj(4yeZrUd_};ioNb26*-mtpsgt;ohPAHy9bOTu1jcgXX}8!q5r+=5S42{gfPMDe=b>A%y#qI|dCc zZRSpCYZ{T;iF|Y~**?FW@8{&)}XswfdxHID7=*)EuQv8B?X~3xgQjq;Yx(P&(?Qk)x9gh^ z@yT#}z(k?S>O$ff>}HMgMga^GtmlMu{9I~s$$Go^;&@K2D@K(<>|co6y^bO>!8ln_ z&#^$xkLidwt~l1msX3T3*~ZVJIYWL^6<#8)Rp{NUxu}eVPERXXm+$KjAK;-@tZu)W zI$&YhL?kqm)#cWp8LnYhs)sH*Xp^trXalJw$N&wWa?l>0zv|PjS*vc;s(W)$GLv_hCTQ4g`wAmKuYfNxt z%A3KeiOe3@$i^0u$oR4uf%IuI4y@TwY+%A*!;>u z5QzY~FAas+Yl2A)QryD^fwkL!B49{vYvenbfkYI+5Gg#LQcCnu;kK*QGguOIJlKjy=8&exCtD0sn^!&4BmULN zVUO>=A@r18N5Btnh3R{1C?D7<5p9l!-iCd{BiH5bc~VSm2iO*LM%X2#Iy%Rm?ydnl zZcPS0$-~lg=u+<3nTVR*D3F0A=xKbC$$Wr#evp;fO5h9c{W%)1G zj;Uo*jG!fmyk{A@&akaz{u>qI`-ucGhI@G@3>S8NERXZGN^^5LV4ngRl`xK%^iIu~ zJz99?WRVI!%2GSRN)sJd^eNH{Vk)&IJk`I^r27OP0TxqYUl_+J%1t0-gU9qT9yZB! z_$l{MRgK-cmY4$2oPELi;9}v@1+8Vc7%ogWN-p1{aX}0e@%Ti<#5SxZCL1p{O~HoQ z*RN}!MDexe`Z0vkQJa2xs_uFhpL4K7sBndj$T%pnwn691TFS3>^kpM6%0mh3P)IZ28-8n*mz;;J4xXvDr498=qcf0asl{@N&aHrn#uSsk)tJwTX2mR9Q%nvF9lz zI?Sf0XIJZ#3?WhTX_GBpw~0SjV=Vl7U@~-f zKz*NXGUpAHaxI@`e4E0tu5MsMUmILM!SzdZNUAwDbxiq4GC-O7bm*y={ptNe(0vRW zhaDGN!5t87xTm)<`Iw@S=7pk?of2tnVw-(wxvmC0dCEvU@yd@i{x7}`s;gqG`^=@9@V13|t=uF1lTECT*G+XD}GY){gH!pCX z1>Hqy2k)#J&vg6h%B@Sao1gll5^}o&J?##f@=9yn`A3fwMI2$s%EGU>5)J4ex|4Yj zc?pm3xi>rs`s45s#D0FnPoDvakXIyTq!F1^>r{4-k_&BR0(O)*>iFA2^e6uf63`JT zydvr z(f7Vhv5nP3<=N)^GLXfyycy0_Sj@F!Gz9E2K@R*Axa!b9y4^+EL$aGsu917sAW)4E z+`q+~ZIdY?1GYf8V%8;92{%aG+o9MNBngEZ`?Tv(uGz~R4i(3dwc#XSecukZvdq}& zrM=m5wVypME3jb8pOi)WIyZL+lAqR#?sVi#n)aw`td^e?)y5r@eIEBY5fZG8rGR~6E7bJMkEWgSVMQQ{37F4w7J%hrW^Z#8Yt2>FDu+z<;_ z-<`hrI;Xd99mer}KIu&(P~nk6)mcRnQ+_M^;iZu;iH5Pb(V>0oZXSymkr*}Vfka%8 zfl_x-`fmU}5)O!^vcUrTlPx5T&*px0QVFZ1os{Zv+}_+I3Gq1WmhBMHFfl`V)bw50 zC~UF54{irx70uHJQLRV-!XBlbF3FHTUlg2cgPqrzyxo;2fk7>b`KPC6Bvis>cYZK%F80jaWuHNy&KK zm^k!}nG!))cPGHsR(REb_9;D%SrKnt#!@Z5d(*a{uxO1-iT?Oy^FTfOu=9qxeQ|2( zSh(2Oxzc6KAfxxC?3ytAsAa&<)FiXyRE};{%2w+inME4M)>`m1yOS zEfwOorPDHv9^$PgAjdfNui8`oC3{UKMUL58U+k>Um%oljoIJMFv((T_%`9oWD?Au- zt!A5`&8f$?yj$ciny8<1E{%6sB)8Ofn_9bRykoAi=Paus`>j@nZM#@!qq)KPZacI; zY}RE?87ig}t0GB@KV}R$YOac-G|+nj=5+J03=I#L*${_X0`ThGJh_8CRPU`jMN$Kw@*>XNAn>_CQBmss^My(4fd--}r=iX}7J^ z*jQl|9XGbH7B^Nx0sTiji<=sk`3$a|7-IbAmA6Pmbi=Ep^z-d3vs0a~z9p>kbvQ{;rhtL5 z;?0!Z=7`t*_DT&vVTP89D)(}WuIC8htKs6r`n&U{f^}KP6Lf45VF;yUU`jjgf!ZN~3=#T-$O&gm7n;7+n-Tl|T|?Bhgk36gBA=&9>%M?>N#04Y zH?5nr2AQfA!Wka!nC!4ttk`1d*g=W-x@{<>Qc9mFOxIcKaj%B5Y|h&bPaZ8cy97+U z&ZBBBRywe6!ProR3X>0UG?0he^pB>Re$nP?53UTesffx`F4W{pu-!l2bQWH*QnOZ&&Rt;$~gpRnZnVaSx zhpqa`~8+Cxw<#4BPu-709s3BAs zALo|DCdU+^tzYHE6j9=xXzb5B(of9Bc@nM<$++6;FA{XE1nFzv_)}6)jBstFbc_x# zs zv_IE4nFfM7?!Jnz2Z?~9l5)#_s5FMPMjbN^K5M@!868mf*Qq^MgSXa=1%jhgpX;g) zT#-^XwuJS=g7+X>wITg-^=QlL;j&&s&iF1v?)A@ln`d*D(*){s)utPj(pvPB?2k>P zh^_0QDq7qk_C2$w2^%QAYHmEODXWS=#R(>VvC_KYnf5^Hhf~QRXk&LtZ{q zoNf#}Ba!3r@-9}GZQdo>`w)-}^o8XY%cYTmrJE7#`%R|veh!A@nq5_Iza*IX>uKqzGoTEy*(rWcOxpdvo@>O%R7lE=5ts#fc}K z;TY9us0ej&G0!~T`xYSk+hx{*I9}stZPkuz8iq_of zXf=5ZlCA7r+HjCocC%0zzcCu7ra%mB^4U~ZB-e9GW;f3V-Z7PAw>KO2L9e-#QOT|# z$!05$e`A78xkcLyJwVh!bi~FWrr)%3FrB@wWqmjve8H6}V=m;bbz17KrB$l3SF}-K z-XKmmj%mS!ipy;Iaq_1+Yb_cK&tPiFubTNm8#JjBCMab_cR^lSp$IE6vp!;w@702@ z8^6MjYGHQ)JfJ_gkwB$ZWH;KVBYG{RKd0rg{DWL`gdh>U)`Y5LL{j+9?5)+!q@8w{ z-Ws_*y~@`wd@g53^Qh+?dsW;Cb8dr2kEJlAD5%Qs@Nle`o7gd7>de`>%NE=0brKWd z2~=+yd8~9#2m54np1Wz}PtSG@@tX}vffCA<03r*A;vFrTKse7nr#V|yabP@CSH&1a zrf*X<8z3%NZjld_Eb0{~YFvL(0hwz2>V0Y`t&d&l(#29cslsoYdeu~{Fr^|0!Iirh zqum#EW~5gQE6T2B5}xJSqeH1RG+?cThFs)b2{78i-%-0s6ub{H_(JUwqfSa+WLkY! zr6$a@W51vF%a-C!V$7zayp+3{t#Ql`(@v8ck-|p3OslfHGO-GwOLys*=pF)*v+nd@wmb;wn4 z;mbRDtVO>k8&rIM*n>%r?PTKdYx+~?c2H0cRf}hzIdBGPzElShb~w8|&Jw>(r&(lw*3;jFeIh2uj2{My&CW)+%=$UzEW6W8zKuSQ=WxwJ$$xPh=jLkNj|8mp2uCDIs-i4ik zLG0W&rhYe2xR-h&Pl=7pEVwkr)Wk$b@lLa}`)JLA(Dm$~sJYS7$-i)m#<_qyn}45s zm`i6b=c_u8{qAVb@a;uOLC@&exS08+cz%-aF~d2o`R(3<&g$N z5KVx7VXsZwKK-3x=JcSXVUc>31!0`0%sGK|YI300JOZi9Rb`}K8mTMM6HnGf~@-EAfB|g6BJugQZg&^ZNQkk7@-KJ(^t!-7Ins=tT0ovVl^0R(xH|d^2>^pdqI$(>!!l?4rqa)@O zfe(|k3>;|*Pj!0y{QOR!2|8ni;3A`*aKhNsRIMB|9fT%I)hDGH?nOW)u|tbT@s+?Y zZ(nYD^j|)4ylDo{A;%!FvQR>N{1_1ATOkx%N!#Lu###2=uIG@gcz7tl+%95s&w%c&4}2H5Qh`fc$_w(7;x-tGHY4 z8mRS=g%@ps#7X2*J$K~*!o9<6L4NF>(=Y=cC-D%ow*jJ57%dA+)v9O7m4;GMz z*HB0H;qXJqzP-3{hI#Ko_G;t^4sZXPffJnjOSwtY-cl*DtkJh-P;(Hmuy6I*Ya86; zBN~W@>Y7v;=SIrF%JI1n;F|{%S;Vj`RwXdws-5+fPZf07n6Juwdma_`mp?5920a;mOK{$B~EX8^pMyRkK$w`>B)_B4>W3)PKcpyO_umaG2BAv)A zg2gooj8#(m5Q9?V$4^P4Xieeuf$bryu@=Cm@q{@^L#?z+*GK%fEmM4o&L`2YwzJdC zy0UOKCOYR;m(XuQZ+h!iPIe*g)~-sZqus}BT6()2a+fc!SmW`>TCMT9_0qd-w61V_ zY>;yarY;QQnbg+HgtxD0=fWfv>fL=GNX7pGE0n3r!cfqirgg>15*jOd&poPPzfbdU z>e!Ekl{$OuSh-pK3K|y%+VZyfVoF_LZ@>LG2&pdRt~_LVFVZja@(VZC=AepM=`r}M ze)&B+(7jJ|^;xMadcpOiP=n&dSZDC&HImLSQhS>UP>*7!9=H{IsDTT)9#5_&?QInX$S<($uI~G7KM}~-BgkKon3AV zU#^M;wkb^%Zxql=K-IuBKbCTAtIDX$m7aqoRZ;-FBSyG5EjHVCa?kUY1pNc-iWN&b z)G=P*JB9?2Fu9j!lwj1AAhg?7nLY%CxDm=ZWbkvS8dRc+D&gehBrlb~$oGW^`yS6} zgR3b6G51n2Cf{bl>;(QD75|w%FEWxLQ;h}8eEw0id-&peswbz#Vh^(y)V&d3BgDjl+o`{Lu;)KO!ZILd@C;XC6hZeL%AtBjwnbW(3TP+r?m z;bip4C$4WD(VqoBeUmPW%oCFi&6#RpBAPnnc&#$N!Rf`YF?{>utBcS1K<5>`4kCFk)({!VMW|pd2sLja1Pc^flii>#+rDY#i>hBC zT$Ak2ry-go9*UQE5S3JchKupsnwCMU=HjDZED_E^dLW z%cB8bN=fERFF8qwz(m%s-bx~zhw_*kQd4JcU*mc>C~tQ9-5tmW0QXZSkQI!zZlF~j zd>Pn?K-G_~i2f*`dcwcvUPQWhuwklLQu~I4B;Ku70%Q)rt6N{qFdQ_7Jr}!M2Qkc( z8JucqxFxWzcQbSF6+g}}DV;4`ojtltJ2niRy{wr#t$KFDSsv3d1w~ zOEr!u z{XPWC5H))UAycE#58c=gUw-##`^3H~4>b7vGoJgp2Ps_tJg(T$J|D`D0Mp-t`1c8B z!T<&RcH!N4z}P7Ou>M`LK!kyrk<@Ql$@&E_5AIrhvb%>D;(dr$?B`=&->N${L1U8e8dT%|!zMo>dWf6p99JfpLu zvY)}|&02w(V^4?Rg8aA(8AAUE_t`Ojt|q8NcTZ67M&v^~&q?8)-kk-;zTCq@{C51%yNY zSCaqt8;pC+!zicfe*b4L0K3ZfbpPm;Z(v(BZ@BPf!u_5L4@m&+IQs>w2J%|Net&3XtdBG`#z#M&@aVe%6p) z_T;Zy{xz`=x5s<4Y}-;73$66G^ZsWgy%W)`)xeKKP~;kKwTfT&zd;uhY;!!RplkjQ6D{Ev6+s`6Q?Ku@+#v;tV^B zaTx}CA9H6kU7Ww>?Q^21`nNAOEIUsZ{yp@6e|{si%t*9P@{g33(Y(YRw6XbP<|8q= z?&1vcuU@?T>vF1bf^_t^cp+;T=|GdvlGr0u8h{x||7UT)WT{yo(naV~{fsi>HT%tD zD8u%8Y+>puz3W_7m4`o;DNBu2L!R14^#bqkX#$-0A=3?tJ8)EW5R^#GI zlFW{rCg)(-d+UbiN89#QcP0*8C>-XmWzatcZrKWsX#L58Dn-OZ>^^%F$3{gn#zP

_*U^nhzi4k*_&;I(@ z!FoSS`P49KC~kIqr6@fHMNV=vE8zbC&R)yts$2WY&~Go{oB!r;sscQ)h{hNwa_vzG zSL?Kv=mQ6W1U(T>pR^+##7r$tH_dWgg$JORh&LG=QFTz|JKAPaa~q?H(>onMb4H* zZHt&vd`HIr%P3@I!qL_FXsA(@?S8!Yvr#PxGg7Zi24-kk>fE&@*Ik>IsB)a==t24R z%NT-hUXv6Y?O7(A^Ug){Ssw+>|0s9V~jzF)Jjv(yxrw>m6m{I+nBpucDiYncI0*etO zyC3fsg&gf4Cw{w_1j(GgW=1b#@cL<%=sS+hHC3bBUY3N|TN7_{K*>Dvl8x;R-=8NJ z41;@em`JV4J^dY)`uFnPl@Ux)nOvF_(d38hKbanFaeU^gU9wsYWFCJ>QCAv3ZfCcz zMhm7`VbnQ11x;I&n_VJ@y<&4v{}@qjW^ZZGwoAL8;2)z$l7X?=CQw_J&vk^x$T2pg zdJq7Td-)Bs*pE)YdMS7uJk{|y%ng{2GNO$lm~As@!J1m7yL{yO(14(tmW5ZSe?;-1 zUxGjcz1-ydDZ@&akC(jakE3b4Ct)t^8!hY=>~}W!H+o{3E^w_p%pK5oJo9vB^K+$d z#Oj_tiP>NLdoO29BgnM5uKcaWab1eiaLa(zjH87H1)At(A`iIu>$C1hn=B^)qXEx( z{Ug3L0sWWT_qbLU`jxbZZoen2U2^#uk=sj%)cZ+TpTCxb=LI4_#A|gqV&qWtuqA^n z@)#I-Xe0)HQY-^_B;{5guhCX)N4?O_O9$Ns5vJdBi$g)8L1w8ZKa7B5U7rZQ=lM`% z1~doV2sW(#t=eJUdT*De1?HyEoB4E`?l|5TeE{S*`i(@lgC9!#NI@U#ljmUirYK1( zoMFW?`@gM>8Eggo(X)6g)qTStgMSo<1akvCGx|p1HNf=t=<|YCe73{u`@h91#OGc< z^P@8$`US#|PYU%;9zW=TE-mbWszwmS^jyg)?9i}YpCd8(ak}Sjescd2y%h2t)7!t+ z92Qocue~BqE@Vk&qp}xu7TDKH9xim0V^Sb65cZKC8?5~G_zP_6gop@u{NFkrNJTPK zBmy$2o<7Eqe%$?MN)y@?HIB!YWdXe_~f7}{e(Y&iuCs30Qj;c7eO5!>eW=<;@Ij>*WKxyI=ifG63hV+iDA}O##PmAIul^ z-6j+8g>|dH5BfCxbq2aFtd%wjXe4}I3nS$6D?Ig7+mVRn?>~BxqVEPsh$1Klar^&y z;QPWQu=Y=g5UzNpGs(05gi`Hdy4+|bX(7Wa%mkpvE==6Ejy%so4Q;si%id;i{N){h zJu7tYZcOcTo0}HZ{Wse;(ZhV+?@H)WBva~KT2$pZ{qFh)7Aq<#po`49~=nZ`KDYGo_-AOxscHFQ+2uWjA?(> zGM7D7c{t<@MCH!^N7-8kG`YqN!^eq&pdeBz0wO9Up)>=ABBFG68l-f$69Jt9N=lBB zPU+kP5$SG_k{pZ}y}`bFfSz+a&-1?T_s?W*_jUcQU*|QCeC_ip@84eKxW-|eqBEcT za$~)=qh6;zfzRwFr&a#b<)^vhUK04Qr+1>|Yjr%$;5kjN&VWd+zW58&b8DjP?ec?nvZ=)OCWDF_Bsgl4rZiZrrA>&=Oa66&Xpen@p25k`dTw<8VPE%~5(Yg>AlH2K z+2I#}7pM=}j&rw>me->~Qv`*7KMSxp$$bz{4^EsKUG3o=-tpWKgucGC;xZcDXQAbv zpWfmQeFc2yYNyDFy`RqLaQU>mPR6-&OG(&Aa%4ID-${Eu$pM|L1UQjSBs*{P$9s)` z_HDJZ#%qrheKP&`_5d{V`AAyJy)c>%Wt{R|R!v|Y0Dy{}a;GKyicAzmy>0lIp?y>0 z2c@o!BGSF)vyvYP=Vv%?*VuQG+E;5S^Qr)6=dMqPet4|-WN(1v`H8-hcVDst+w(Gj z%I10V7_Kx2@_V;AUD2EieyNxf55B=q0&JT>?UvY|r=jyQ;t-+7FT8n3eCEp@;s{(| zn$u$jt?*?^0wzrrCbmV5T_4wY*-dnG_fiB9fILA&Ylw{3qEM+;wU=p7B<B#wjdnwb4??o4lJ=bKIQWlN2QY#Lt;ulslJ5jNI#gYNec z;cZ;j{R(lAT}@IT7$!V-_>w39M7c!>16 zZKf5jr-UNc9L-LauWn|@`;oQEwxH1z|Jg;Ow<2m*=%kPI8n?8@uny;VTDiAIl6gF0 z&f~+icMs@uE3G4^=Wv+axjA1|1YWb(Ho=p^2dn>SVVasz_z7*dr_*~|^=elTG5xk@ z91=Y!tp_@AXlLyy)X@VknAG^lHr-4Joh8!jT)GWgjTTYDY=6{iyFwrhz^%R1gNie_ z-st?y@%)N&4To?K)4ay}E{%5`7>JW)F5~e&s;?*U@0mWHQVym(J~%DI4L`g6@e3h( zr-lz=tuB9DN%h^Lp#)CJ9hK26%;c&IIaErT)JO7@T>65Z0T|h0TwG6>eY^C zJD0qtUXLhN3$qj`#>FwTzd#0wejB4^6lhnvZ<}~X&wSDgzfMK9`Y=b2Ga*Iz`S+Xo zoqKc~iwEA=ClTj_Ok_2%r>BW&7&U4X*PRb$U=U5}t=FI7nr^A#q6?xld+(>dGy9A9 zv_s&&J_oF8SRS9tdzbeOhovXS`-8yQE_G z=slDJ`e+zMJZ&3pkg7t6YBNpI85f@s)lpE06w8(TNLgCW0^VvO7z#Lc@6Kyb*NavL zh>C$w1?>S>Aan2z2m~L7+jlMW;i*xuSKX0rpguvg4np&v_qf3aLdA7OpQKX)w9XEN zv~M)M1Q^{qHM#ZZ7^2Rc7e+v};+noAN^u2}dI=sa6Tfm%bxskJfyrTZcEH8?O5kz0 zf&Ch~<{&I~#d6;;CgOuuef|WU)$ASrbnEL6jIE_2L=P3f`T0_$ilyNW_8p}*9h|e) zF#{j{?0Wy6X83)8`)_isvMux08g0LMB=lmRnZQnf@@pmYk^xO7oCN4J*_hPc!>i^t zc%@s6D6daYN!d#akh>qSX_^rZZq*}gQh8&7=k1T=ovg=$=I}8~k=zPtB3sROqM(_A zKs$2Hc%M4Ea7t}N;<`o?3xYB8qBBu{mgsK+?P6^;;Q_B=}1880c>|HAIzFu1p%Q5(Wfx@U?V}U*l zt8}%|K}8`aDE&6`GG4$>Q0#tB4{f=)!B7p@d&e|X=2VrD*5$r0E~ARpq7Nt&lJW7g zWg7a)R}U9*l0v3@XUNsouuq!G`^}bDdD%}u_$eQ_?kCO~31u=AF&%@f&3hsQxmWz< zM_Knq>M5&7t}eK%0?&9F(y0-PibbMh7ec=7ooq0Lb#P{h0_DOj!v+Q#=k8sOjSCHM zPSSj!a%pssm!Vjx>%XOj>v&(6Kd@)U9!am-p1uDp$Eb&)w$ag$+y0pJN>kh8;+tgV z5np(cl+5=Wr02jvXNOyejzv37W}-?oJpOdxG=Oyh@j$^UnJJzST;QiCn4xFdi{Hnf zeSkj6I;GjO7)CScxS`jUZ%}i_>X2VmR)c>*uTGHXOxk{+ zcqN~9%%{A4AT;uF9^PctFH^#pJWl{Rqk&1G<~%!8KTs^;HM^*21Wr|St;^HXVsl-5 zFRInOM?b5wvM04Tvm*o1io-8b(7^Fs5VfKWjIZ%#V_?ZIk(A6Ptb9OGYL)G2ndpE| zk9>vnI^~mjZeKk&J=GGLUA$)xdqP8@<@R;hn(E&BJywCzos8z3Y=T7RpUoT?cQ1c3&&E>VLEW0=RHIp|aeI*@hfZB1%lWr3 z@9fW!?-(iXRGt2%p{^DvgIex`U1a(d)5{CvZRSBj?JNZW>1Ua;c8zY?c~*Y*gq%X9 zdw4(jDn$LT{uDa#nve7*EsTyMSUEg&03l!aI8!ooC7sqNb+s)^)gw{!N@BSq@L#@# zkw%%TuM0o@Os|A|TJ3lc{}W%nn^|8UNIknFg7-Su-GzT$L7dyHGN1Xf!ZNN*I)G(H zBx?s9N-yxj&oM!%Z6ub_wSCoXuu#R2L+Z%Tg@G8MeJR@_yM42)W1@|aU+Dhz+*MvO z5ZZM>)3<6R(}tpW{wJmOm{w4);|X7P>VQ7)haDmQv#wIcL6_UgA3m#_%R_H`kXqcN z`sR!xoskerRYd&Y2nWnV%X`t9m}#c52#nGGmnjTn&&)E?=fM4P<=)(H5_jgk02c({ zmAn(j3{=3!I^XF&l0mzEb9`I-mnu9`NCfzJ=|TAeCY6{hK)tydpWD9TV)2Anp})c_ zEJDHFKqBcnQ4IBtSx_?$7N;VUh}(VdLD92E%!U?A)z@C8)P_*elB$K^_H-IDSPE#f zs<>hg4KkB8k*3dqEq3!mJp29uCZiLs=5@V_Ci)jgJs#pi#!B}Y696(nj`|Ac-!Z$ZCsQRDKSs6>{njYDW8Pl5pdH}Jc1^ z_7rWyB^V1eF$W+}33=DRoG-^LVj%WArs0Rq+_5*veFAHGjUc~<`0J0sul#o4w-Cwf!M_pIN zNq(VF9)VO5{R%it4to@3{gkEqLRz^Glu4kpL<#Alw#u*I?ydz+GJP7<_Is7I_oChQ z#_gGBi$EP5gB4H2j+|6qkUn@s+)Q6+Wb=~?(IQFlK7M>9n{pCQ9g^RZ=zptd=^naT z2D^3U+#AN`yN|n)75K3d{^{3=(iD(NPS)ve8B<&Sewdz4Gj^ZkovNuQSPf082goN2 z%RZ0uEF)7mPBks_hN_gRYW#ARdhd&ifJAst3MraD{9gMd+U~av^Qc zLd#wusiL%F^xQs}t_YTWKlCppm59n+NnltiPC_%>vH z21!N~JiD0A>Jykk0l;hD!O+jlyse<2lLV=KbRsW)+0t?B`9Rb#Wd=*?GVy*uypo*W4vr+vCN*D?g9XfrgQ~ z2yyx@FVQ|dq??lt;gn{9N2iozfR^I%o;jB(4Tp5@?w|wRDbEkL-`=QSd-?2 zW{%nlRbOo~642S2nj{?;T2b43ec3Z}Dv;DSe_cg7)H==Q1H4?%_gG87wClB`jJJXavy2|2%Mt1kr)Et>>AS z{*YeiBa)o=LrnvB+$+d)yy;g>co+sGnhKY5XT(FRZ_)ujD?U`i*VZkxNI^*SQ_uWL)6Vyf70~M&t z__#dcOU_B|uR(HES?UFA<*;XR3r?(0V_NDVCE-M7zfUwoakA~toWxa1ZEk=n&e7Et z^GFZN*>bNO`1hYeA2x4ke0GqQ-tMeaO&^A}X&>s^0wiIr3IIF$6BK}lcJ+c=(5#TM zn<(ZDc@7_*@-Ki7?-1mvoys6>HZ!l4IQSL-f-xyUARU)~R*Ia0T)6`JCoTvqatW$W z+&@ZhvnTkyef&dsC=jKunk)(nw2-wLC%oa%)KXkD`?2K=-z*x^R9#Q zr9YSqp}MAVm@@cy6M8d0Uco?U8TdI)fE|EqS@Yr@XdfhxX{8kQyDiiF!!o7aG}fZblDpJMh733412D} zF6>i_jY4q{Yh{dbDmjoTf&skTV3xc<*y;vkQXd?(VQldk#A)FgqUkpLK=gh@03vXF z{2O@&)v>^dvf72~Qs?3IT=JaBdJ>J!$sZ}$3hYhv)tI#U2udHGR|^srNl)EHxDbYu zDhqXjYa7>R#${AkD-@zQrk2I=#0@pn&Z=lIF@^Sre)pht&0IAT=}RN2o3oMQS}HPq z93ixxHpL+btAh%dL9*CN$F{APQbzc<%(;WCUc3=5Z>kYiPc2B^gz;5LZS;P-zL>DB zOs7T=1UZnk0XVkWW;k1Epk1fjW-rcexfpZ?D*YBm`)XBaYWlBea_IJb)(G+($GLFe zg|<~8N)k;mmCG0CmbzZYgX7H|f%Yjm*T@$lrmi-;r9RmIobx#RCu^mbAJ&-^qJk2A zu&X*IBy?}2euKVwM8UqxI6l2Y`CV+k)+rMvjh?x86)cd3C3?OuU+|Rfep9ie;M}B) zu$U~?u_*XF41C=nVkG~Kkd(W$>+lNIfb5C; zw-r`Q9G)I2l&j~@bONg2Vr#9onri~)kn~=P#9=?EI=si{DP?;cYSRrOeq={12rjG( za1=Re+|Vtnni7?+wASA0NL%SRD_4tPD!nA=EYoQp3U*4v+69jB^PC;N-CUcHUsv{Y z(P8W$k9n-ICOk@Ry4Mk`@e#~A&agD!9#iRPKO9+5J)L-kih$#gLmXGy3>VuDv|Av4 z!6hgEodWlsw0s&H>Bw9`$NEJ-yqh{7zla3*OBzIUqO1?ta6BndLq2$yM+6q{wTW5-$m!AVy7-PQlbUMPD?ekn6OK$1%+lcFfpD4_l z>AWD%De~G9aRGq&3#vl{7>%3+_h3)jQ^B2g8@G|-`0r=Go``m^Z3-vTp|`v_@a6Vc zRFt^K{8(K;Q?H+do3~Asa0jAH)&iU>tV0>aB&8y4>=3Ny2=Y4#;q0X`rlmJmjrR`sf`>dL5ACj?n^L|uN zj7L_sEugao)AuvE6Xw@-ZZf|>=8zIlu?V1|yUc2?*ht7*vD`%$`U?3ND#d?BC~Ouh zNf&v{ADuiWL<_c1UR}CG-zaz936lG+1ZIxsM5a-&v*b|pxTg6h&*YmmSE;3p??mdk z+7(-iq40jM_T3kQlLFasTLr+6Ni~x-G4NBU;$$hKTH4^rcX*Vml^!s~ObZrIp|_+0&dEQ<=p7k!$NBuo4=Jb<}i^)+k=j9Ys#+@wIE3#y(!l!oB3no|}WTDthpX ztgBm4**5NhpK~65y>+j+Itn2)nAX(P74401DDc}h?O!%s zdllgHe8;E1Mf#N?$${l|d-9pacR)CmljkS;=!1*fm1n!J&bB(k1_!We!gEN!vLx$aHK2xfqdkImE14_8$V4hkgM$9Iy)N-9^u*rYUuDNjeVR0Fe~m>fE3*u(1hKayCfX ztMmWX^Q7kFjf4QhrMjNZw-5^4TfkwJS4~{7G7B3}0B2z6%e9b!H5Qph*~?b2>%##QzhEl>ybONJN0n!IYzF22Ias{ z^%_@y^rysg(^+r4dg{$q<*5FxNTH7}hpN48cpxmyyND3LZx|#79jcBIn-mfRzP`8x zuw9ildpLsa%A^2IG7Bkb||p50Jz*^onWmLY~shZG=mi< z>2G&vj_Q4o^O1fF4}I7Z-2Bkj*Gt=-j)HV=yY;T$E^kGRDI){zX*JjRHPrm{M%9-( zS^HVzg9E=z)VPv;kFkjS)VMA4$}n$Fr;~eMk+tf;rtVf7Pr4eNQsj>7$c@NmG9T5a zeNQ*7N|wm~Qqsa*)1jl)BIon7P}{uT_oH&2-p&5=3OCGgT7H(`von$v+Y=O+MSraU z*lfRY!SP#359#0e)MKxLmTWY+iS*33t)|ZiD`CziY*l2w;`#xLPAxOwxcU*hS9ort zJ1?ap%qIn4SoVap9lzw|aK@_b?J@K8QelYhRt^{wup$?#A$Lo{>CLBxaij7u^M_~> zyC>SO(-vIG75b=_uYs%b9b(Z}lm!zDdu3ngxQ&m3hxFq)A+rE zf8#uj(X)zi*=eKJ`>A!Jp-HHvF08+n1i@l>sV)CPn`ddJN2q0#?uKt9j>rT3ZCusA zi4Y8WTss!?GgueEJ<=EdLgTm$YIDaun= z*HVha=gy2?r8+=#(8A(r7{g6?8dzus*s@e@5`(hVQ}XmQ=*6xL&0pYx$Nr2D2cGOA zg@o@sZfK-dQu}Hl!nITNMtEP;@M0@AK|MzE%^kq$%W}vZvgMqMkedmG67*7_WY0kR z&HPwebWcebYId}PXrNt$Z1((o++PgZsQA#0rg{%O6(COCV&zM$)!O!Vr1993Ef=?rSOJaUxS zVbVq#nIF47(%Jp9?4`Km8pvNpbpO+}ZOgr#FKRRu0=ZPC$qO@GUSghsZg(;0@1bc? zoGfDqh(DsadBQx5nhC%0T=&AcLm?Y4Xk-9?r6IsB8z0tuL4?q_V!UPAP)K1k zkf2%`AZPW#=@U_)K;ALX0ZS~uej-NvLm<;V&Ku+^xd>%9*?Yz@L=N%Fj_wst+V|Ib zHq;Ef-IH4+#QI9iDV3aNd{n1)pB?DkzwUU z3gFi)=v{|QDHzH^pJJaRKXaX^VI~->f^SU{WpDXTW9%ETfP=IYaF|0eC5owbna%T7 z&=>N)Q|U$EVLk}=6*ka5T?Q7Oa=^qc(;0xI^^>hH4)RuJ1(7j#{sdehUwS3y-8XqP zG!?Az^*x+Bk(B~NWl<1Qf@O`K{3$Y-P2j*1bCuTho0zhoN??%7L)u$$M!DI zUvhnSma*0~)TLOTGv{3z`gwGkK5IhxyYb5Min{G<4kvNd9NclywuC#Uzcb8WU&&)^ z+C=5>dlFrIG(1%TW@xxuvBq2)15g>#a(Yuy%%ZNgd8?3$mJr-sx?sLOJT&Xvlb)JA zpqLlSw{fyQ>DqhB*W)|yu~ia9O?A&ocHyu6!C5ZbSKt#a+xM4A2=y-o3BWP_K6YN# z*0g-oDMFleA6t{NZRib+0o}M>HG{%u9()$z=}C>b>9nN&giYCL z2%Gjh_iXrFt~}gUZLj`|$-mP?B(2qwW7wM&D_0#EwD(VKNW(T+hN@&RCV~UdKHJPQ zO<2#Ex^bSJgPjiQab^AdL76ph11pVvWeBal;IbrrbMbDx6xyxMN8H&g-I2zOV|1&C zCYwVd>2(-!xtmoSWgA`t;~Es{UCk^qZdgI>i+ z?R83|jjxg6Bs+4dhlK%T=phPXM!d4Jvw@(p+&!!1(Cac5B(PYi``E_N{35{sJ;&wP zh5w7PKor5Myj|Ng&Bt%i_+9yDpouB(K83={`I!oqg{`j7W;~i#=w_C)cX>ERqcNMX ze9j+vBn!%I`gDL_>09O%b=_9=E#NXpB+zkGx7(&W2KJ0m+bVl$+85Nt>r4Z`JLBM7 z(OPJKw+#Z;b&(m=NS<*MTs9w77hjY3NU#Z}j|j$%YCr+Iy)YbI)o(u*_X&P2mMoXOAhjU;R8?e+$jL+#3 z0(=&XMG^qVVY=GQCkfD#VV5xo*f!^u`p;lyv2)(3{pom}HMLgz%X1d`8l9}L`F5?9g|`MY>i6-7W6OuXS)(;g(!*$k zbfL!GmDF1{fOYxeC0MzmY20X`LkQcneGI~}cu8mcNyy41?v~Dq^mvMq&wDsZ4D~^a z=Soh8!Y~fs%5n$nB$HfEEfJZ`hZ;)=Fs)t^ezL3IL`k~jwM1JUXIm=Ao?y3QfY-GuqNKAuI*u)6~-ceqx3*a3&(&ooX z$PkhRi+++LczlpAadjER)#6``Lf>eGWd&a0W!7ryAGep{XD?$@RZf5~)NVngWRMk; zz1Gw`alU8zTQZL-3g$f*=Gg9eCsaOJXorn(wkv-{i6Nq|DU7Q=&~0CK_DH^F;5wXZ z)r|jHYBA;cmDnOmh;>;cfArmr{&L5aPfeu~+`f>|n^k*N(HrH{Grku4T48tKT@Gq;Begh$mHoWK=51E4L0P;l0V?$1zkG z(1XoCw?B)`dH!H=-9PBC;$bH@yE+w|YS`A(c`5b!+w@9Wzntju0z!-^pF(gPJ8nM2 zT$28(vC6*@`d)^oqCDem%Ioqs`rt6ChHHTO{@Y5ukjy`YYp{>$e|SeWUW_Tzp_V7~ z=D=>@^`3D(FB$tcQ_v<+ikEh#mYSJHwZ!v4!Q!mPo2T7v@z(j=H*M8nJ8e*+3LzrM8(0r&{Tc z%#`lFLQ9DLevs{Ko5ysIze&*t8yEo+UA9mG%QHGcOPG&hTq6NBd1r7s=l0&$^+YS- z?DbYy-d73tkAwo^-6;bVjkj~@g?@_H_0FNEBj3S?!w;BIu~)#HIQ|VE7{51jlknXZ zi^i~$i0$UPB4rjcU%WozH2H)A{9AjKZlznK@IPT}q39OemMaAtwIF2I9p0jgope4w z*5g`9`02hzM;f9}pDtc<9#Z4;DH1_T*XiYf2Hf_VM>As*H;06dxsyQBsY;7EDr#>a zJ-g`KGxwShBzm7Ock2AkyDSxJ!SYY+J4s#L_Fmc#OWW6Psbb@PxQ@@9>7Ec}>Z_y* zTQ}4fXu|Ao;bqtBrXc)9u~UohlQKuJL=Cbk31+@ogODaUI()q8*~sy9i}oaCugoPd zS45GUdY`&D%8u&_O z;I8CADQR4Jn8$J+D|P@pNwIS^KRZqiwBHcdUJO8H_q|gh6zTuLrGJ4^NW`wI?Lece zZ8>dEHhhCcad;^W&p2$aMbvz!poToMf0-P9kVB)#$<0lpd|*pm>+7H<9bHs{)`5*C zUR*Qv?Y@V;JEsu7LiuxdEhQxZFk1d5r)0Kz@9Y$M5vpOyESpLLqXb*Twe@ zdN8Z8vCFAv4B7$oDV+yuH3N&rbsb(GeXcpa@B!`OQkSH4+r|4*mh|A<2llEGy-Z3x z8qv-*cbUZ{ck!BEM}E#KsYE-ab`{GaeAvpCb()#xm;=X67rnJAF|e67zLBc}2yegTyZpzuFHJ|1`2P1bV@i2r*Kh*-c+t3d(Dq8gjM(O-oQ za+-TP;Psjp$I1XHPqOm`Lue4V8SJ)m*T=?qiv0}K;=*dj4h?LTZ)IWovKW#^eZY~1 zl>@h57Xj;`u~i?vt0J^p>*WzLZ;~2H zPU@v&O^WzlzmW=a*)#njM_tQaP^zxS=_8_dMQm8M+fgAG&U*a_!) zlL+&g5fp1$z02B0o&a!yLK*$2IVM$(xTbC2D>&Vh;@~pVW>J&`48k_GUb{>E;J&x? zXSAAb`zL$UkVeEe>s6dPY!vmu5z9&eI0y6Dl6nK}X@H?xUO$p5objsWvzIcuR~4)$a^WWJyt;x@)t5eGR6I zUjuO0(O!b)O+Qafk>`vk`fM*1o&yR3vl{#G=lJwipy#INdgm;afs5s0CinQnl*N37 z(hp&!+Z}zM&AWs*fr{y^CLjW64V8>NJedTOL9NR=KR?Tx*6x+F!Oj*ZfX0R4S67aL z=`;i2s69RbA`$>Hie9WZ@2gOXW*TPi3US_(=|7q5%#Bf%<`&RU%ClKN!pA&J61cu7 zZE*ocwk&DPd`j%0(T<+Wi2rjEtc@bDn7bT;u|9ur6{v_56Y95+4s$i7Cju%!S!ff( zcRw3YEeAkldd{{?Sx?Zu#&IMzE6{50p7(`8TRNh7gewbAVLJudZ1YqA7fe zKhJ^q{)fk$4Mx=V2QC0nfsm_Df$>OX8&H}rdJL$G8`vi?klacIo~2VoeDcOd^i4vT z^~#DVvJUI%9}$uScr-e2A+NZOf?;!`#7vZd+Geru*QFCsG}%`n;K#oAXS?qmCmhTr zfpeX^s(K38s52CKQe)*DY<1tRZ^I)(uck9NZINAFN$?1W`140V#7ibolANW~mO{bP zR*Qq~6ZZ?M)0MZEpF|0gjncY=5CYJGVo998uNTXk^rWa#;1?whcgu?f7;ip766g4c zxE~$Qqeb#;pYKz?XwD*1TW59Y9bFP=$chMqC6O% z&6F|UfB9HMI=)w3+<8m-(h(AAWsh)tEBqu@+V~?fnqvaNE^f`z1uZ$(cPbCVHSD`X z(eyWR-qV>cTI2xqEeq8@TA`A;vv(87QdTAbgR^X8J7i6PY>Pg(b@JE|-;4qRu;fn0 zZS)@qMFWCPB9w-P+u0g{?zkRqJ6487rdjWtsWY%X8q6agaS!({I(K~53&&>_wb}BM zGyp`cKO%Y58bEhV&`SdFi5QTAcu=3Zcsy|m^ziU}Tt|?Y?me|zji=oS*PSCzODO$t zZ~q4|gZ9YzG{s|xSpgiOf3OBElo5y+Ig0z3*t6aXr z=P`PPz)PKUP()^uC%a*OdF*3@1{jQrMvG%HEp0Kg3etRJc?-@T6Po$#AS=k3sPrN= zX^-g0aqAO-os9XR3L0KY4V17V>i^{G&wq3P4y(9(W_FbjEnlK7zt@P~+5C7KK#wWk z;FUvugle9Osr&)Re>Sn)%n{{}>`~_<=*hmUWtv!am-(k;7Su<`{Kq%=ISQVqlqlFf zV^obg;^Du~5Byckg%p2X>YvGU#P&p$9ClH6>+6cMms1V?vC83Q8`6oGaS>=%KVxfZz>AGQJgN(mk*jo%r<|GtGrQ);Iv$;cA-cXepyE3VE53U$+BGd2QX zT0zA${=X7q#{m8J`G0?4oOWs+sk^e_>cckX5!mIH8F5-5z+xJl_)Ba2A0YYHN*8q! zG5d@QNYQnXXD5#s=l{0%-$zb#1l4%divhh!+q+43j}doABee0?T>$^`(f&m2KjI8t zf6&0$;DtqYvC=w)i1bh-nw(qlTg_-9)bkoco4x3{}$BFfd+VE>OZ z{rBu-Hvx(C*obG6E&E}j{NHoUGOs?ecC7fb@bc$Ehn=kq423{KJJ`qVIiVo%?`_{8 znRN~I{#n9EWPr*M_XSvhJ(D#5#AccvkTf3Lg@U8tXvZ&I|Msg7SB|H9ZF|$XwvhjK z|E~F?35_gQp#NVt3vT%z)=hi@=&xMYd!pl1760Qw|3T0BRAhc{R+RJSXZ-&~h%Tje zX@qX3lO~amh;S!we8lvu@PBsun;rj$zvg2kas6fO6r+GW>w-wEoniDwEeucji^eU} z0M7UStCSpl?=?82cHGHb^jo$WFye3-L*z_AM?M%d{Jehox``fE?z61 zTU*Rxf4tpy4q&)`$jPPuV#ec^9Z$*rC0K-~*I9l-N8>ojP@V$Z4}iaMY1=k34e(SQzE!QnO|8n{Ld zf3(48xjxFOdvj zm!_WZ>S5j9lSd73wD*hrUAH(~97y#)1n2u`6Yuz2fFA}Q`%ZuhTaIMyt3{j}E`OUQ z!x2e;i|_A)AE7s~1+=X^J?g&s!|y>tY9>=}wL`(^Y^s*+$Mlm{u}uC$*(3RCVg~l5 zb&yy8F`0We-=nJcVrm(PmVYauG|34b(bITqm$eCPmw^pO(%O;BFL?jh7Lc*imJ#55 z8)5UYH!lY>C*%0u#>awNj`8GLcajj94|KZ%+q@C_1Z_WG#f6JsJ$jp3; zz%HK+a~4kcx!?Q`oYE6omfve(Wo#~hn|zg-xAcV(-Tb5zL+#yrS+v?x*}j=1rjRb& za+Z(@*Pm*G?3$hL@}{Nq4rJ+otP`drgZ1qIEx`?r=OBQYjo@zX`f(;bAULZTn`Y@- z&%Oq9Fl;?E7yfgpWsgo044Z1z8&Bd~+^ zd1$$B3RA+eQ}RixbS0&Hde;hFgx^%$7nL37cSdSCyxOKO|3z0IaY9EVk^R7ps5gp@ zHVb3v#g|ETvzSn>JKFR!GNiA3jCxYwose@yVez?N9~`kCuxT@3aJWXkS^*YE#O8V6 zERU*QsTS^8=gkmHjPxm^)^kVW$^RhKt#V|8YbE9(Db(!<86-H{ zfB5ferxc+%ZZ&l?r-?432B=Q+5XC2C4Nmoo{Bz{?@6~?&HOp$3DTv+22|nZewbAd7 zxa=sb7ofM%{41A-$9Ng_KVwe1a*yXVsP{9%Co;v5wB(B+S(w`}TQ*H)oZ`{M@9%T} ziTgi4NaO*p9h$Z9kNGVDcwe|EGna z7PM|3$klq5%ZWVnNVc?Dx>2dk``I4OF9~vlwryv1llQLhgr?KPf zxQ~&2^uKR-lX;>`Q+(ZWzIsom&DZz>n9-3b9^VcH08cOxJRG(D=P{v*WY5$4ciHEu zs6GElvK}5gUxq1GYCT1KlEbTiP@bSvxX8q0H<_}vy$j&={NA-wkzX7JZhBvAofhN+ zZqt3B;+dw!TE3;C^4sA#-VwS$WjEKCLX%BSzC7OJ`uYmxZ<&*PFcIJE`DQ8*7JmoT z^tZ$uJ((Dc?B{TL_qgQlq?Evz9MJck?Pt1W_SwrfrZSOh8L(|+S@rFI`Zho$KJ4!7 zE3g(jW_6S6?={WuPE=)K9sxb&2lT5~q}dIUX!5n<^f`FHLzLCsE4ZjFJ|28w~1_DQ)bXa^|^`eXd@-?R(2 zBpvWIbR55kxW{XNa=aU}MR!>7eAD^o|rm*VbHF+ur8RfJ(Js zgsfB7wRZeGoSiT<%mWM#3bpO5K!SZY#x;7J_NQ@<2dINFy5_ZAcme{yvkM>XKrmS@ z$nBL}zU=cu2<4FyxIa0#xz$s}eI4|B+QkUH)9gi z3d!<43#XD{cdV?&l3-ZP@Hbk!&fpbgFP`?+R(#odLYB5*5qFz|(iUE$TEyVPt(C-Q zkTbO&p+>qtkLNMxwv85qekM!f4(9e6H{l519c(@he8;eU9D&=1wBhkX%1m1k#7sN2 zPKM(0%iafxZRoG7kzd4qc?cg3kH&{5tv@Y1>ha9Da_P6&{4xac%1D+13F87t2y(H^ z_q_-A1bL2$wt=Dj_>U$pv!7S0<$9vXExutMWNs((gVf1niJ?3q}zf2e(2Ur+dNHhu+(8rfu$-XK0KaAYY68WSI*>y~Q zaU2?p)2oebOV;4ynCMc#=MAbACMSotcfo8UHObcz;%r4-?;T7w5ds)!f%3-ym8=I0 z&^{p5Ez8v34CZ)n2^uk-f5D*mH3qR`_Nv2obMAW1%p82&7k5AbIk(F`fM`hJci0Ce zI`{l{uyf!&60B{iPR+p)hd4s;KJguM)J!Tzv%>!pzTdWuaHRPX@f_Ov7ifS%S8E3;QyNB_}T`xh|GY1_*9=ZZjH+As)NxpXTb>2`>F^zAY;$KUes zuMPjHs(}wa!jTS`k$1W0Kb9dGif7)^P{g{Pdww*3iu_45br8f@T+{<(0OBRY`N{vY zlBwj?+Gzfhfi29(?$9sxxys7}owLa9p&sDy$sRN+%zN3(1dZtJo#T+maDz`>f+Fwi0DL5;xu z==si-boWL%YFpJSiC3NVjA;Z`Vqpw?ZMdlQgj`|KqP*N3aMl! ze^|0#Z;@U^VzM)8)D&9Jvv#tlw*7W$(}dU-)2zu(jj;olpOw(qa)&cl(SvGj;i&4x z{@vOGJX@)IscjWM3e2kD5?h$YqfTi?&#$x6Z=?1SJvXaRG>Vqr5WZxm@5F%-z z)99t)p=dKt!=z=eI}A!6%^02s9MS-pbLN%S?#^lI{Pl&op%qlN*TFUPYS{>74AYE; zL~I7G7MSO(V%JnY=1_AzB+K_#Rs&TdKQ3(^^PwawQLrj!-R5}wtz+LyKIH|NG5}>D zDlUa$6t24qDNg1dScdYW1@C!N$5$g+i(57v-*c}BnHHf_m&IeObZzNd5R65+&yOZU zfq77~tSP%?Y)Gj~w|dPxBoU>Fbj=wmq;aR@2h^unz4T`6$4;!=`Ih#NIN^+1Xk&=5}Kz)dzH8E%Z5ISx0#NC_+g3f@jbll|T zWLL0lq~XkTtZ5@VI+nRIYP~jT@X?xItH#{fRth&T?#x`~G4MQb9m>mU8;rqcHTz=?>nbE>|cw*=oF0R7X3BwP!{1 z-c!*8-Z~fRqaZU;2~^#ju4I*!{J3!~4S!tDv4e4!Cx|?*Kz)21W)O!x2TFB#;;Kw*~)$z)%sWZ$}&L zoDEx^j)!#Sk_CsyWOqZ;!h0lNZ=^MXMZ4h#1T8u1zhS z(APKqH@yQT1)3NrUHiB4volx>oxQ8iw_z@Clcb~+F$TG64?oq0#-PRuH>_Y0cV_a{ ze^jWrsgD;_`^`c!**>Y2)WHX`PUy>ygdXV|fJ2;W$60_9)ndvQpChMpJ^=}_(?|}Cq4OABRy7@51jZ3NC-(f*2mDpM%X)A=UF!up zkT1S_1^06P&VJ4w#A+5(p2zz+E3pXlywa?VI069r--#tcER)Cb^x^=N16 zsD3s*jh`dB6>q0{NC&p18dP9Us2P(4qR^?Wt}~iv*Yo-|7Dz?&HLo6z;PU-SKVvGA zn2&hiR=H$6{i9E@QWf8#n%6ntahhf8H}^T3WEcSb$>?iHL444Ur4Y+>dzXNk&9#d zl7+R8Pc;O=Oqxy_=4hzQi|e}jGYEqNQY&*P0`U+PYIQLUrro;TFkCI z_aM|LNL6T!174{*#woM_aIFNVPecef!AEM1xj~K^%z;jEi%#Z<&x)XDt5z6x*U)*p z==H;ZJC!N$(y#kU{+%iz=(rN1#j|mfu!QssjJQ|kNluo6eo+Q8)$j3dw=bG>ppnB* zvwGY&lZGb^X~6sO{oVzS#gBve`OlK0D}`apPl@^!6KF3V-JXDaNzz;K;cK=R?ay9$ zr}QEKxgD~b4WzCCxnboebYl1V0L` zWM926fKp`P&s`wTsdrhvJ1&uwaT(Wk(lT6Bi6R@mc}&n(lJUfTw8;iT-kJulis;|J zIbL3kQ+XS}+>9D)!~tJNP^zd0nU-m<)-Hcn_bLaxPb1n^)yL3YN4i0wx_Qo_4}ZR4 zfOCr~UsvR))$I?*1&>guie7X$h~VN9pg0N-5+$_}}Vxwmd$8={mZzz4Ch3B%~)5tLjDVdY~E)|C97PdQj4s2%4=?igsNd$yl0| zTI6;%W^kT3yE4}TbEFwyZ&z{a^h2-*t5h2l#w2sN!+n-8eo0>z z-wo4>(tNP1G*AtJ46u2TkEd%Z-IiUSvkw1YDU%ud`IzFor$9Viw|6M40t3rgkC>h} z2;|yz*B>=ju)r|6gzZpP0B)q99yJIADia__lp`M&+xeKKcU`nt>`C6!54PqD;@8tV^pl{Cv!7P(;M z`wP`zt>EEk_1Je20b{O8BNroC`#$ul;>SE^uu0N`Jo` z$D%K;AV}v}abE-lT8-Elq?|-ueE%GFCF>^nvJ!!->rmnp{6vfUky{W6U z{zDCw$MM{=c>UPMy<-8m-+n{1(t1?qC8ajlBh;Lh;G`f2K`mJUS@@iE{E2a|?~HQs z%y_Y<|6hCW8P(Lb^$#ne2q+w+sWb%@q)YE83P|sQbVNj?mrw&3l`2J)j(|uHU7B?qQOQnBc3rDPlFsc!?rkCDhHU%J+$4BAQ`Sao^&Zv%uIv9t=zI$_h_F` z@%JRM-L>fWaOe3YO*09EZ&&s3Q2?0}(p)ZJ>;uF~ueVAZ>SmTA-~;P(?y)O`JO(2rYMFp2D%j=e<$XTgP6s;v*;nnE&tn6_-P`lP zu` z0YdLBZsgN5+mv;bsw;Vefrv2cK5d2s*c%TOyFDjs*=DiYRklanxj8+n9-Ap4bwV-u zn>KGUtfbw0UX+PqcV-rbxiEAkY=*Yc%<{ z5SmudvA&*g$ui))Gv_P3uD0J<0?<}vBLCh{Ctu~g6b}2ler@~dMKz$>f+JTxphOB@ zh;v2QgzfYm{~ac+lNbg)6aVC%sb`j(Y$C}ih9!j>Mc7<1`N~3xs%f!QSv$+ao|sqo z18;+wfU>&3D+G|plm1R#=K$G!x!`2ox|&iN$EO9(E=G@b^kh=r6x5Hy#dh2Ce>GCk ziG(MP4l+qdH!UG(wmFsA^6K?b(`8|FW8VbyvvC!#$?4~2zI}sx@5I)K6?_PIYgM~k zq=%N1e;)@MVlojL-^;;~=#@>0zwD6F;g}IE99TRdmGdN{Jy~yLE&GNAUb~>C@2v1v zDQ$&Xw)OQj&k2y?$z7XIJo`zZfCh)L0;{A|=B*8MsFW7h5;|y+U~YHP6sZH4$UJxw z<;thjXjo*2xUo>Ik34cDk)UF+ zhnBhw(;K5?WAur47}svzNmlu;=ukZ$yj`%=xD}F7Xt5nJii><~w4c)pO!uPv2Yn`k zI40!RC!h28G!L8m?Dy!>r=@_W7tFzRV(=fv$dCxluNsa&XG81mB&17CT^@1{rkmFP zv}rS3`Q`Lmd|kQmywZ5~wb9nsyGB#w&R@*Oi*PGCpGF@K4s?6JB^$hzXJl45HrQb- zSSjbVm%SM<{iA8iIif%ld0`kWX1EnrHJMGD?VSagW&f#SgM^b|52yFk061Kof2qxM z>~QshY^SE!!((SBWV)rv+v9UWj+Qpv|f7YHG%QB zd+VPP>z^!eS>FV@pMeeHL$a9hLUCjG;%82QPz<9?Z0^r7pcXSiZN6WNz${u9R>Hr+Z%uQeHN1?W2!ZwW;leP z+TmJNSgNwAFfU^_CLPbV->_A4g-)JV+AM)ffeYOHJ6l1lgOn5xXHwEy9#~nFY%Iim zqD#SV1P~&XcF`DP%qd#b&2`=Ax1D7P?p(UNd`N##abZ7Xjl$3jL|v!}DzHnn95Pcb z>I>yk=9-T(tJa!>LS;&E-)WPMK+`cKeu7Bw>Llc*%gr%Y5J_*J%jxLK239vWP!z+C z#?{sd(iZuO{-$Tn3PPCe)vDDQ<1DO5&ukpvLwK6Ymfhg^**}ZdI{Bj*Unz_&ZmT9r z%p@m3nnpY>JlafP<;qq2EV%QYwxqQ5NQq?l)_JF@S&V+-kDgDJOI!&)dn6ows02EA zx?Z+rzgwJ^=!gA6F~9)&(mch!m}r|R8lBU&j;kKkIrREL>d)?T@DxgehMkULRytv4 z?EE{;e7C+9RQ8mfj7$U%FlAqTd=2Mx(p57!R<0Hnq>t}P&d8g85|?NRwB5S@^}x>j z%SRwt^ch$%rmDU~vy%|rpkIdGdMcBAp6OBFPFvQcm&i_`aLl*1R(Qlm3lp@x4ZE$? z4s=s;h> zI%jK|9e4}Uy0Xsn7y!Lsq$}UuN$J^;qGp5>fgFb5atHVU*MIpzF?*ScOew%O1~&W1NG2IhbjqXVT-n-1U6nf?bXt=m85HoD4?P_0ZF>XZ9u#Qq zoj%o<>af{>eEa6Ei+#&w%Y9rG8#%>hMFz5V1H}|_Q>6?;Gd(LQp&G3T3HUK<9-XlL zDo|jH-Qod|h4*}qziNQc56U;15&PY@4>XPgaX-}Y@#cVF)hxrc+74^=FQT8Bvlp-^ zaIDUDh2AWjyS`(7iLvEMksGe`5%nK_j9+*lxGy!${43AttZnhVF9x|{FcArQOa?Qs zVrOAg3;DMlt8ZY5B-DL|4$sv-p%}oD7>}g6Zn(`n9X@3l_7PANcYbn|V>DB+;ctknDLvJU$sm<%{v=Nyd-uK< zj$J>p4T6X8c3FhNw6B?CIz3gj#mgcVPr4R&M`Y@45*}1W3Mi-cmHo)+9MZM>&11Td z;rOt5(?NJHO%P-wbO;F4 zJvT1p!M7)c+`9^uOi`1JMKc$L%@qyoh^8YgN_+GnKr5lbpXRhSTmBuNuXTsTjxGSd zxU&!$x=z*ez_-Ck&X_;3A$?9-8y!b0qdrvkx@khIAp2shX#o*~K8<(LV6sk$-d1l;iI^B-EROlu`FWscW5pYVIS^D zq(B*rfA7$la6(O^z;FiZwC?(nwZ7ut)bbqo?HOW{FhAT!io5mk=WO){&gPGCT+V7c z!YxNoDE-`O@o(og1NF>H9n<6>I;_TBtTbZ>12t}vp- z9xCr~=W~QNZ8^nICYxfQc=#=_3JRLvyNBRg0fbW`f9!p)cpo|vL{ z7)vgMima99?@NgeX`WF)ldhFeYfc5F4b3T0%Hc`0;oe}VfPa5APYL6t=Yhcw=HSAv zr7GPq4%Iw*Z_XVK{tK5*Kt_oRtL+^Y6E=N{TH)A3{K(ep#!9fb`x9;>M&UqFH9fv`6E1Rj3Jcd;eemQeSlB6NbQ$xpaO zm(!P}RQGo(pzzPtBtSCX^uuP{G<)b>8ky8lPN+idz>?jFM3Q72o~nD?!NECK9J9TZ z=`z~mG(RJzcRrsg&hqF`lDmJxUC6FjB|$C-m+V(D05%9geG#buAMM}a5)7Jzps2d{0v1xr1mVly{hZZIB5XLkF~C+2)kmT2Qn|HPI!3bD#I}~`s3et zFG%$%Yhn%$PHs?d{^h4vM4i6yKv8csodvpMDCL!z4EP?L97<4O=P2#JDsV1k3YJ+l zt0ktwWIlsszP0MS4}H`dn3vZWUP@v5lC4#25b)4j*gT118QnGS+5RkeI4$jR4(PhA z<2r3?Ns7&taJc`h@iHc;U8YDa_TgdJ_yZ|qb4%AdK0AaUqz&Zc1NBA+z6PMYU+gA;JZl7V6{UE>gO?~`^q2%p zVk8UeM+6fd^LdztD;6Teb$DUbuC=|rwWIvGtAwVA9dWT7K*M&W>YKQ5DxbB{f~ay9AF!9T<6RjdfFD=pi+qw48?;UJKXlRbI2jJR`1fgt`K=UPtoFvca z(HR;RF1yb~Sd5Or{Ey0;fyr&5-EoF`8)=Ut$F_$vD@@B4=2V&Y3-^FYbVc7@q|8W= zn>0T}7Pluw_Nl5$5(nJJBGo@s9u^|o>8^UP`N(d*uK!qGEQNm*4HA5@20;&+uLsP z(Oqr4YV)}6sy*J1rVO@Ii+fAz@i^r!ItiKx@bqtEo@T!swztk@1w=2zAD&~y+! zyuo0IT#`vxj5z!Qunjr}^wx1_+=4q>aAv{uWH<&otVYLdO(}{lVMnObv{FT|&A{)T zg^>p%0Xkg}ilEuh_H2w+kHXBexSMyYqkIw@Rm*L(2tGcVi_x zQyA$uv)orUF7J+kwVdmxftqA=qDobdrXg&i&R4LTk$Y&kzWC-Le}E1`2aSRh_q61X zqRw>0bEA;83sbeu#2?^KW9ey^IHXCJ^;_MR0Sxr25D<|5#mdIi?t1a08Zn%?8rXp` zQfo&o>c+&6MZkK2UGt;C+(+Sy8&`qy8YPZW%71QJ5GAtcdY_AfkO6zC`X_f&(_Oe8 zccegTC#~eZ^ zy%D>E@|CI3Z)s0{;agboT95<>+8W!5?1(+nn=_IEE4bwJ2X4%%Mn@Q5V&Hz?h%2{E zRc+IuE4=jMuvIW568syLzhJ!pzo}Am0NS0sJ;s_ zdE;7Z@oYp)=CUik{&#J5X`=v_(e0ZEn#Lvbb)z!r7V`>Y3!ZOIN^v{4z|zxu@E?73%y}w%J*78V~Qz zbY5?(I5c3y5H=y|LdhyVOF8jBf`a_gM2d@=O>6$0i{ji7hdJGlOyXF5m{q6~;&pZ# zubxEe)E%MeWn~)x_$esy>d2RLK{UyevadOMm-l-N0$914?N$<(B<}B z1eLczo#ge=me+c1-W6m(ulz&Dbz=_EyzNUT-|CGV2A%q({dT3dtCN>~oYT- z1r9;K@Ty_8c{WDE7Y_MoG$2&df_S^*Re02yJ;^4R$2gj zVH-Q`Qia!OW6wnlJH}*I5I?R%Y^f4yf}>2|h}J z0b0RDX!|q+%uXp!o;%#O`9mA$^~8*E@Ws^f!X;+6?>)J+pcc@sfZZEa3>2xIt?h4C z68gx#m4(LH*J76u8TodpVk1SEi^tbvdYzy;wT96)&OU3u-v83``^H@*KjSC11q`CC z{zEMwA3uVZF8Z5spLf$I#rq~#p|~Pit7Z{3^*yeUN^6YJ<2`h($pGn^?cECN=fWqW z4h*;gotT-?D-;W`E6kzh&h)%i=VUUd@rrnQ;EucPN<^On{n@4}wRS$(Vv(2gH%Fbs z22JEN!#U?VYx)sHz z=<`q)_l5^iwi6+=qsRvclLS$^r7ediFW-?_^;PWXA|ozcV1HVtD6dwz_6b+16P7sG zccyAfZk!_h!TqDejAvwwX)aUVxQN72%|EI-ws?N?SOpMBL(lbsgLDH9aw+Y+racdp zKKqWOiHf-x;U#AR`njR<1UyumK`PXG(XX`9%RLt^x2qUqHlqJkwy4;icQoBER$?-6 z3!Y12^&O-+?2wp+YW^gt8S$b()!N5nno!^;cs&`t?B3*%1|2cSsU6=UDAC|i1U0acYDC1?aiLM- z%(bLLaq4B;Pe5OI{7rY?vO>JbJwHHx?jhiAdzZ^=-c5=NFJVtmWx9zLT9E#1*in<*hNt`UUE4yiP5(&oaxtJpYu;vNEEUeJThNG- zLdkQRE9biBBk~HQBEKhPp`x`$CWz`_FM@L`Z@{L+s1hJq2Vv#5od8I6Tcn>Izp~(NU zH0HytlO9ra_knuoB=rfQrW6sqVT|<>_G~;~;lmzvP7kZBh%yuAuqL)R7<<)>g`rt3 zxJin0&8WqXKf@F=V;LGlW8?i^U>d`-n~SW>3YaVT5%Eb@yQPhOZX-d_cjbmZM?Y$d zg^AYK2qzoaVOoI4+@yGK&6vBqn{l>nWpA`*)W8UeJ<(_^LwiU5-nrAtSI-)jXq8+m zC?6S&>}7pc9-By$G?J>2K9CW@BVzbW)=npBcQWYaZjO@WotISS733dj_I)TPCQrK( z@SycxQSg{@RotpClDkL3)fywXjrx%=+@oyz6GMqGJ(y_@4Uab78u+R?719G<~q?#1(NLa_QhB$)4Ct>CX;N8p**HR{zfrx zjJt14UCCu#YJ(wB3YhBi6DNuCXupz^uJJi{&?kC(>9pOL<%5a>ma9FQKJ;0Q4a#Rt zf~5%C9%ZB9Pd(iCCFjSsrjK*gA%Cty^3vV#MwUbUMHpIHVU zZ>u+Uu>*YtzR@dk3n&SN^1Y9EeHr%ps|a$ot&eqGo|`S1F~*oG>K%_`_}7ql%xkfm zkD{WVV8TAju(EvqNlAsDg}t|{La)Ah+K8v>6yPR^-+GY3vSLfc9^I9~lS;o_CHT>J zqTNb8(9h-7y?uI!*A2}zsSP5>ZIdA>d=19;59j)W2;cc#g-zzt$|`AccXp_|(jFZN zvNfe4-1A}j?On1p9x5~YSy9oV@GuotY@oX7`}kPw06#u&>b;Pp?N$fxX&IL7hT_g! z_BFC}3F5be`EpcMyqp)8sTR$$-`BpSFN+xK0AGp4M1GZ+R~ydik+03je8FIZt$vbe zVHz*)AqNpy!AE`H>jfNGhUtg`1BY%!#uY>ke-VyP5=ni3 z&xI@2WaHzSG!5-o{nP6_NS3f#BSY+Vbj*g5pU5;lo9V@sf?!I;S?S+Gjs`C-ZbWL` zRe#j|)r?Q+kZWB;)(Z177lE)$WL@Zd*y%Hp{%U?k_wdYGd>e{>GMOO54l`B0ZIFyc zs6&ViX2dV!*_HwUCNauOFtD0faI>y3>W z$|nsd9TLZVW0ExgHU6gBFW{V{>#?^#x$Q+HAnoUe2zZQ%m4A4>xKXk4%2o1(GHakVxh-`ZlUVA?Qha7SGxCY zAMzTA^~8w_yh``(=>FU2r>GVP(j?RdBv>W5>c0!|zY@?$1M$^aUvr2f%`c^F|8T~0 z9ewl8F_HKeh4^zUg92bx5MZA@4uk{3iRRy?_OBXsrT8RKUFybDBGef$87S%)>3&3;o#t-BCjzp*h*+_}||A(0_#O=@YOS)e)`oj|r4^&F}KZWqWO#wnh+K*%Q8Rnrj z@Db;}EdPIaZM>4XCgp+<1e8?*;{V^-bNhMYN1_|W?$kdB>25WkX03nc;=c+xWQ`oy z6}B&)TcWBd_0S;w-^IcFInmnAmvPz8eZ+$&{hEXljxpt8%x&LWHo8oGm;U3Jjn__6 z8?d+k=)w?GXOp|j|1Z2+C<2yz5lgIYXsVZ&+fF<w!Vzw-d-FN$>R;Tk)8QT^wU3gtg} zadZ){KS`?hJBu!=&R7^yK3XhfHen4cim2iN*Z(F9#>o zw25JX(e+=Jmd(kHM#4VNydG4)dvaN7t)Mmxup+@28%FnMmZLZRHt^vNZVKtkd$ERk zm1mcCNK)fh;(7)`Rn<}-S03o--d`MncoF+uC5vG%o|_#An#+C6*BRe9qPAW5#e&b? zZuHtO)8W#*QH+ixtCw-S-f=|POX^L}foZ`iAKIW9s++&LlaVGuo8a+m* zRVX~$XOKxnYE0#O^JP+}bwrM1wcN1<%2%9>oGC+kH!}?O5nCVed6460f|v)M_f4mn znWfSD#J~7*z*j)wtiG+fgin<=F9*hIT<~-dZWQljiFf+|J}#=a6(L1!&R)q=nm~$W zr&!qGNEag;dldCMx-*Uac&5LuxM^q{Cqeo*F<#h8crfsi03RpyY=f7L(#|k}uaPa) z*~NtxzrBC^&pCzNn}jbYR>7PQ_qQ?*k1fv|e-I5RL}8P!#9@-^$d0QUW$8Bt`j-I@N8Iee^y!TZ}#qvP*cBKG5h@($6< zR95%p9GUow!HqQ^laV!dcyWP!r6h3o?q6QR_7nKsArjUO;egJ;zH`VHj_HG6B!K=yNWqfK81A@mE-du@@U0&KlJ{m>l-;m+0clU6h zb7qq*3r+s_qA{dZV2)|{**`G%p9-Z(Jbr--|2)j){8XYr4@;Xe1FMtVD$bS)dFoB4 zBpdAYA$b1#>e~}VzV2dR?O1yy`H0YNPttL;ONl(Vf)U)&xQDNQ(|MN)6DlSmcN~0* z=uckywuWL|LY-lGz1-%=+dj9x)3vZw$+kp!SLW@;+wSY%T#u9XqVZ;wzw5oKh&PU| z;?_+ipN{NcH=P(;^#nLr!7pgDCs^N6Bz*jzpc4{KBPQ-mfCMYDcK8Mw*NZm2(?4aQ z(tEYkCO*9c;;Z?br*SHwwO-LOlnuX3HU5uFekR8)>;@^%~gSmk=bL6 zVqyMc>_3*{J2wQ}jY(qLy~fHXWaZ5G*0CV?sWFTOm$L; zr>)fo*Ls1B@A*x{M~|TOA14G=l9eqNp$zAD?dBtwzLez`mHH09_kzjN?0IeLQ}#*8 zbU2d#9Ibe2G}D*$DY?yh^rO9fzKo*0HKly?F4E)mOP&)}{+)#^*2`-$MxywbMPuWZAV_axC>z$OW;>$CZBPOlbxt=$o|6hwks zWP!Q_*S6m()yz#`Inu&Vd|a@ZLoPb@YmLD4J1TK%!{g%dh0jUJZG}KJ|G=a>ySScn zdXTf{XnVSCE(+zY*%HT}x!LcCCh1t>;ffB4%0^(3DW#$btsFq5Pxc9vi)<`zq!V)x2FR&eF@` zo*;tdlK*9Ay;m%sL_l^&>VP}T$Pkc1@2u7GkP09B8LSPbWW(ycw6_@{gjUQXd3A`& zKN+19P146b^;3%)t`KeI+hg&RBk#^yZOBB)!r*&K0thS~bUc9!6CyI9(A8HkfNr2$SI1f?N`Yy-4n4&Z<% zwR_53aRHdR)67wR{C>=_UGMLpg%d#QTw$~YGQ3N^HZ6HuxKq)y)j*O~LCUT}VLFZo8el9As~ZGJ;%$wcG~7KF6V zRl#!)xsPS+R9KE;qa8i$OP_VlR7y0EoDww)GQF_JTH-nkr0ww8Cc4?-K)3DN)VLZ- zW0vQ&>JgdU)jW|C`~*LmQ*pA5?foK!AXD)Qm{0z!<(_>fCUFS4gEY>BZN-S=qWRWh zta2;udbH=c+bWV3@d#OeE8_=94}SGG8Y~4mqg5l=PpU?KMy4hWAR^*G_BTbic76O9 z(@JZwEh{?w*G=HBz0{q`=@f|mO3k5FKICb!1rZ+8VrjHxyi4%pSZ69PZ=2.2.1 # Apache-2.0 -sphinx>=2.0.0,!=2.1.0 # BSD -stestr>=2.0.0 # Apache-2.0 -testtools>=0.9.34 -yasfb>=0.8.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index ddc4f0a0..00000000 --- a/setup.cfg +++ /dev/null @@ -1,12 +0,0 @@ -[metadata] -name = tripleo-specs -summary = TripleO specs repository -description_file = - README.rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://specs.openstack.org/openstack/tripleo-specs/ -classifier = - Intended Audience :: Developers - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux diff --git a/setup.py b/setup.py deleted file mode 100644 index 097bada8..00000000 --- a/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - py_modules=[], - pbr=True) diff --git a/specs/juno/backwards-compat-policy.rst b/specs/juno/backwards-compat-policy.rst deleted file mode 100644 index d1cfc0cd..00000000 --- a/specs/juno/backwards-compat-policy.rst +++ /dev/null @@ -1,260 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Backwards compatibility and TripleO -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-backwards-compat - -TripleO has run with good but not perfect backwards compatibility since -creation. It's time to formalise this in a documentable and testable fashion. - -TripleO will follow Semantic Versioning (aka semver_) for versioning all -releases. We will strive to avoid breaking backwards compatibility at all, and -if we have to it will be because of extenuating circumstances such as security -fixes with no other way to fix things. - -Problem Description -=================== - -TripleO has historically run with an unspoken backwards compatibility policy -but we now have too many people making changes - we need to build a single -clear policy or else our contributors will have to rework things when one -reviewer asks for backwards compat when they thought it was not needed (or vice -versa do the work to be backwards compatible when it isn't needed. - -Secondly, because we haven't marked any of our projects as 1.0.0 there is no -way for users or developers to tell when and where backwards compatibility is -needed / appropriate. - -Proposed Change -=============== - -Adopt the following high level heuristics for identifying backwards -incompatible changes: - -* Making changes that break user code that scripts or uses a public interface. - -* Becoming unable to install something we could previously. - -* Being unable to install something because someone else has altered things - - e.g. being unable to install F20 if it no longer exists on the internet - is not an incompatible change - if it were returned to the net, we'd be able - to install it again. If we remove the code to support this thing, then we're - making an incompatible change. The one exception here is unsupported - projects - e.g. unsupported releases of OpenStack, or Fedora, or Ubuntu. - Because unsupported releases are security issues, and we expect most of our - dependencies to do releases, and stop supporting things, we will not treat - cleaning up code only needed to support such an unsupported release as - backwards compatible. For instance, breaking the ability to deploy a previous - *still supported* OpenStack release where we had previously been able to - deploy it is a backwards incompatible change, but breaking the ability to - deploy an *unsupported* OpenStack release is not. - -Corollaries to these principles: - -* Breaking a public API (network or Python). The public API of a project is - any released API (e.g. not explicitly marked alpha/beta/rc) in a version that - is >= 1.0.0. For Python projects, a \_ prefix marks a namespace as non-public - e.g. in ``foo.\_bar.quux`` ``quux`` is not public because it's in a non-public - namespace. For our projects that accept environment variables, if the - variable is documented (in the README.md/user documentation) then the variable - is part of the public interface. Otherwise it is not. - -* Increasing the set of required parameters to Heat templates. This breaks - scripts that use TripleO to deploy. Note that adding new parameters which - need to be set when deploying *new* things is fine because the user is - doing more than just pulling in updated code. - -* Decreasing the set of accepted parameters to Heat templates. Likewise, this - breaks scripts using the Heat templates to do deploys. If the parameters are - no longer accepted because they are for no longer supported versions of - OpenStack then that is covered by the carve-out above. - -* Increasing the required metadata to use an element except when both Tuskar - and tripleo-heat-templates have been updated to use it. There is a - bi-directional dependency from t-i-e to t-h-t and back - when we change - signals in the templates we have to update t-i-e first, and when we change - parameters to elements we have to alter t-h-t first. We could choose to make - t-h-t and t-i-e completely independent, but don't believe that is a sensible - use of time - they are closely connected, even though loosely coupled. - Instead we're treating them a single unit: at any point in time t-h-t can - only guarantee to deploy images built from some minimum version of t-i-e, - and t-i-e can only guarantee to be deployed with some minimum version of - t-h-t. The public API here is t-h-t's parameters, and the link to t-i-e - is equivalent to the dependency on a helper library for a Python - library/program: requiring new minor versions of the helper library is not - generally considered to be an API break of the calling code. Upgrades will - still work with this constraint - machines will get a new image at the same - time as new metadata, with a rebuild in the middle. Downgrades / rollback - may require switching to an older template at the same time, but that was - already the case. - -* Decreasing the accepted metadata for an element if that would result in an - error or misbehaviour. - -Other sorts of changes may also be backwards incompatible, and if identified -will be treated as such - that is, this list is not comprehensive. - -We don't consider the internal structure of Heat templates to be an API, nor -any test code within the TripleO codebases (whether it may appear to be public -or not). - -TripleO's incubator is not released and has no backwards compatibility -guarantees - but a point in time incubator snapshot interacts with ongoing -releases of other components - and they will be following semver, which means -that a user wanting stability can get that as long as they don't change the -incubator. - -TripleO will promote all its component projects to 1.0 within one OpenStack -release cycle of them being created. Projects may not become dependencies of a -project with a 1.0 or greater version until they are at 1.0 themselves. This -restriction serves to prevent version locking (makes upgrades impossible) by -the depending version, or breakage (breaks users) if the pre 1.0 project breaks -compatibility. Adding new projects will involve creating test jobs that test -the desired interactions before the dependency is added, so that the API can -be validated before the new project has reached 1.0. - -Adopt the following rule on *when* we are willing to [deliberately] break -backwards compatibility: - -* When all known uses of the code are for no longer supported OpenStack - releases. - -* If the PTL signs off on the break. E.g. a high impact security fix for which - we cannot figure out a backwards compatible way to deliver it to our users - and distributors. - -We also need to: - -* Set a timeline for new codebases to become mature (one cycle). Existing - codebases will have the clock start when this specification is approved. - -* Set rules for allowing anyone to depend on new codebases (codebase must be - 1.0.0). - -* Document what backwards compatible means in the context of heat templates and - elements. - -* Add an explicit test job for deploying Icehouse from trunk, because that will - tell us about our ability to deploy currently supported OpenStack versions - which we could previously deploy - that failing would indicate the proposed - patch is backwards incompatible. - -* If needed either fix Icehouse, or take a consensus decision to exclude - Icehouse support from this policy. - -* Commit to preserving backwards compatibility. - -* When we need alternate codepaths to support backwards compatibility we will - mark them clearly to facilitate future cleanup:: - - # Backwards compatibility: <....> - if .. - # Trunk - ... - elif - # Icehouse - ... - else - # Havana - ... - -Alternatives ------------- - -* We could say that we don't do backwards compatibility and release like the - OpenStack API services do, but this makes working with us really difficult - and it also forces folk with stable support desires to work from separate - branches rather than being able to collaborate on a single codebase. - -* We could treat tripleo-heat-templates and tripleo-image-elements separately - to the individual components and run them under different rules - e.g. using - stable branches rather than semver. But there have been so few times that - backwards compatibility would be hard for us that this doesn't seem worth - doing. - -Security Impact ---------------- - -Keeping code around longer may have security considerations, but this is a -well known interaction. - -Other End User Impact ---------------------- - -End users will love us. - -Performance Impact ------------------- - -None anticipated. Images will be a marginally larger due to carrying backwards -compat code around. - -Other Deployer Impact ---------------------- - -Deployers will appreciate not having to rework things. Not that they have had -to, but still. - -Developer Impact ----------------- - -Developers will have clear expectations set about backwards compatibility which -will help them avoid being asked to rework things. They and reviewers will need -to look out for backward incompatible changes and special case handling of -them to deliver the compatibility we aspire to. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - lifeless - -Other contributors: - -Work Items ----------- - -* Draft this spec. - -* Get consensus around it. - -* Release all our non-incubator projects as 1.0.0. - -* Add Icehouse deploy test job. (Because we could install Icehouse at the start - of Juno, and if we get in fast we can keep being able to do so). - -Dependencies -============ - -None. An argument could be made for doing a quick cleanup of stuff, but the -reality is that it's not such a burden we've had to clean it up yet. - -Testing -======= - -To ensure we don't accidentally break backwards compatibility we should look -at the oslo cross-project matrix eventually - e.g. run os-refresh-config -against older releases of os-apply-config to ensure we're not breaking -compatibility. Our general policy of building releases of things and using -those goes a long way to giving us good confidence though - we can be fairly -sure of no single-step regressions (but will still have to watch out for -N-step regressions unless some mechanism is put in place). - -Documentation Impact -==================== - -The users manual and developer guides should reflect this. - -References -========== - -.. _semver: http://docs.openstack.org/developer/pbr/semver.html diff --git a/specs/juno/haproxy_configuration.rst b/specs/juno/haproxy_configuration.rst deleted file mode 100644 index a088e681..00000000 --- a/specs/juno/haproxy_configuration.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================ -Haproxy ports and related services configuration -================================================ - -Blueprint: https://blueprints.launchpad.net/tripleo/+spec/tripleo-haproxy-configuration - -Current spec provides options for HA endpoints delivery via haproxy. - - -Problem Description -=================== - -Current tripleo deployment scheme binds services on 0.0.0.0:standard_port, -with stunnel configured to listen on ssl ports. - -This configuration has some drawbacks and wont work in ha, for several reasons: - -* haproxy cant bind on : - openstack services are - bound to 0.0.0.0: - -* services ports hardcoded in many places (any_service.conf, init-keystone), - so changing them and configuring from heat would be a lot of pain - -* the non-ssl endpoint is reachable from outside the local host, - which could potentially confuse users and expose them to an insecure connection - in the case where we want to run that service on SSL only. We want to offer SSL - by default but we can't really prevent it. - -Proposed Change -=============== - -We will bind haproxy, stunnel (ssl), openstack services on ports with -different ipaddress settings. - -HAProxy will be bound to VIP addresses only. - -STunnel where it is used will be bound to the controller ctlplane address. - -OpenStack services will bind to localhost for SSL only configurations, and to -the ctlplane address for non-SSL or mixed-mode configurations. They will bind -to the standard non-encrypted ports, but will never bind to 0.0.0.0 on any -port. - -We'll strive to make SSL-only the default. - -An example, using horizon in mixed mode (HTTPS and HTTP): - -vip_address = 192.0.2.21 -node_address = 192.0.2.24 - -1. haproxy - listen horizon_http - bind vip_address:80 - server node_1 node_address:80 - listen horizon_https - bind vip_address:443 - server node_1 node_address:443 - -2. stunnel - accept node_address:443 - connect node_address:80 - -3. horizon - bind node_address:80 - -A second example, using horizon in HTTPS only mode: - -vip_address = 192.0.2.21 -node_address = 192.0.2.24 - -1. haproxy - listen horizon_https - bind vip_address:443 - server node_1 node_address:443 - -2. stunnel - accept node_address:443 - connect 127.0.0.1:80 - -3. horizon - bind 127.0.0.1:80 - -Alternatives ------------- - -There are several alternatives which do not cover all the requirements for -security or extensibility - -Option 1: Assignment of different ports for haproxy, stunnel, openstack services on 0.0.0.0 - -* requires additional firewall configuration -* security issue with non-ssl services endpoints - -1. haproxy - bind :80 - - listen horizon - server node_1 node_address:8800 - -2. stunnel - accept :8800 - connect :8880 - -3. horizon - bind :8880 - -Option 2: Using only haproxy ssl termination is suboptimal: - -* 1.5 is still in devel phase -> potential stability issues -* we would have to get this into supported distros -* this also means that there is no SSL between haproxy and real service -* security issue with non-ssl services endpoints - -1. haproxy - bind vip_address:80 - - listen horizon - server node_1 node_address:80 - -2. horizon - bind node_address:80 - -Option 3: Add additional ssl termination before load-balancer - -* not useful in current configuration because load balancer (haproxy) - and openstack services installed on same nodes - -Security Impact ---------------- - -* Only ssl protected endpoints are publicly available if running SSL only. -* Minimal firewall configuration -* Not forwarding decrypted traffic over non-localhost connections -* compromise of a control node exposes all external traffic (future and possibly past) - to decryption and/or spoofing - -Other End User Impact ---------------------- - -Several services will listen on same port, but it will be quite easy -to understand if user (operator) will know some context. - - -Performance Impact ------------------- - -No differences between approaches. - -Other Deployer Impact ---------------------- -None - -Developer Impact ----------------- -None - -Implementation -============== - -We need to make the service configs - nova etc - know on a per service basis -where to bind. The current approach uses logic in the template to choose -between localhost and my_ip. If we move the selection into Heat this can -become a lot simpler (read a bind address, if set use it, if not don't). - -We considered extending the connect_ip concept to be on a per service basis. -Right now all services are exposed to both SSL and plain, so this would be -workable until we get a situation where only some services are plain - but we -expect that sooner rather than later. - -Assignee(s) ------------ - -Primary assignee: - dshulyak - - -Work Items ----------- - -tripleo-incubator: -* build overcloud-control image with haproxy element - -tripleo-image-elements: - -* openstack-ssl element refactoring - -* refactor services configs to listen on 127.0.0.1 / ctlplane address: - horizon apache configuration, glance, nova, cinder, swift, ceilometer, - neutron, heat, keystone, trove - -tripleo-heat-templates: -* add haproxy metadata to heat-templates - - -Dependencies -============ -None - - -Testing -======= -CI testing dependencies: - -* use vip endpoints in overcloud scripts - -* add haproxy element to overcloud-control image (maybe with stats enabled) before - adding haproxy related metadata to heat templates - - -Documentation Impact -==================== - -* update incubator manual - -* update elements README.md - - -References -========== - -http://haproxy.1wt.eu/download/1.4/doc/configuration.txt - -https://www.stunnel.org/howto.html diff --git a/specs/juno/network_configuration.rst b/specs/juno/network_configuration.rst deleted file mode 100644 index 11e9e599..00000000 --- a/specs/juno/network_configuration.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -TripleO network configuration -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/os-net-config - -We need a tool (or tools) to help configure host level networking -in TripleO. This includes things like: - - * Static IPs - - * Multiple OVS bridges - - * Bonding - - * VLANs - -Problem Description -=================== - -Today in TripleO we bootstrap nodes using DHCP so they can download -custom per node metadata from Heat. This metadata contains per instance -network information that allows us to create a customized host level network -configuration. - -Today this is accomplished via two scripts: - - * ensure-bridge: http://git.openstack.org/cgit/openstack/tripleo-image-elements/tree/elements/network-utils/bin/ensure-bridge - * init-neutron-ovs: http://git.openstack.org/cgit/openstack/tripleo-image-elements/tree/elements/neutron-openvswitch-agent/bin/init-neutron-ovs - -The problem with the existing scripts is that their feature set is extremely -prescriptive and limited. Today we only support bridging a single NIC -onto an OVS bridge, VLAN support is limited and more advanced configuration -(of even common IP address attributes like MTUs, etc) is not possible. - -Furthermore we also desire some level of control over how networking changes -are made and whether they are persistent. In this regard a provider layer -would be useful so that users can choose between using for example: - - * ifcfg/eni scripts: used where persistence is required and we want - to configure interfaces using the distro supported defaults - * iproute2: used to provide optimized/streamlined network configuration - which may or may not also include persistence - -Our capabilities are currently limited to the extent that we are unable -to fully provision our TripleO CI overclouds without making manual -changes and/or hacks to images themselves. As such we need to -expand our host level network capabilities. - -Proposed Change -=============== - -Create a new python project which encapsulates host level network configuration. - -This will likely consist of: - - * an internal python library to facilitate host level network configuration - - * a binary which processes a YAML (or JSON) format and makes the associated - python library calls to configure host level networking. - -By following this design the tool should work well with Heat driven -metadata and provide us the future option of moving some of the -library code into Oslo (oslo.network?) or perhaps Neutron itself. - -The tool will support a "provider" layer such that multiple implementations -can drive the host level network configuration (iproute2, ifcfg, eni). -This is important because as new network config formats are adopted -by distributions we may want to gradually start making use of them -(thinking ahead to systemd.network for example). - -The tool will also need to be extensible such that we can add new -configuration options over time. We may for example want to add -more advanced bondings options at a later point in time... and -this should be as easy as possible. - -The focus of the tool initially will be host level network configuration -for existing TripleO features (interfaces, bridges, vlans) in a much -more flexible manner. While we support these things today in a prescriptive -manner the new tool will immediately support multiple bridges, interfaces, -and vlans that can be created in an ad-hoc manner. Heat templates can be -created to drive common configurations and people can customize those -as needed for more advanced networking setups. - -The initial implementation will focus on persistent configuration formats -for ifcfg and eni, like we do today via ensure-bridge. This will help us -continue to make steps towards bringing bare metal machines back online -after a power outage (providing a static IP for the DHCP server for example). - -The primary focus of this tool should always be host level network -configuration and fine tuning that we can't easily do within Neutron itself. -Over time the scope and concept of the tool may shift as Neutron features are -added and/or subtracted. - - -Alternatives ------------- - -One alternative is to keep expanding ensure-bridge and init-neutron-ovs -which would require a significant number of new bash options and arguments to -configure all the new features (vlans, bonds, etc.). - -Many of the deployment projects within the OpenStack ecosystem are doing -similar sorts of networking today. Consider: - - * Chef/Crowbar: https://github.com/opencrowbar/core/blob/master/chef/cookbooks/network/recipes/default.rb - * Fuel: https://github.com/stackforge/fuel-library/tree/master/deployment/puppet/l23network - * VDSM (GPL): contains code to configure interfaces, both ifcfg and iproute2 abstractions (git clone http://gerrit.ovirt.org/p/vdsm.git, then look at vdsm/vdsm/network/configurators) - * Netconf: heavy handed for this perhaps but interesting (OpenDaylight, etc) - -Most of these options are undesirable because they would add a significant -number of dependencies to TripleO. - -Security Impact ---------------- - -The configuration data used by this tool is already admin-oriented in -nature and will continue to be provided by Heat. As such there should -be no user facing security concerns with regards to access to the -configuration data that aren't already present. - -This implementation will directly impact the low level network connectivity -in all layers of TripleO including the seed, undercloud, and overcloud -networks. Any of the host level networking that isn't already provided -by Neutron is likely affected. - -Other End User Impact ---------------------- - -This feature enables deployers to build out more advanced undercloud and -overcloud networks and as such should help improve the reliability and -performance of the fundamental host network capabilities in TripleO. - -End users should benefit from these efforts. - -Performance Impact ------------------- - -This feature will allow us to build better/more advanced networks and as -such should help improve performance. In particular the interface bonding -and VLAN support should help in this regard. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Dan Prince (dan-prince on Launchpad) - -Work Items ----------- - - * Create project on GitHub: os-net-config - - * Import project into openstack-infra, get unit tests gating, etc. - - * Build a python library to configure host level networking with - an initial focus on parity with what we already have including things - we absolutely need for our TripleO CI overcloud networks. - - The library will consist of an object model which will allow users to - create interfaces, bridges, and vlans, and bonds (optional). Each of - these types will act as a container for address objects (IPv4 and IPv6) - and routes (multiple routes may be defined). Additionally, each - object will include options to enable/disable DHCP and set the MTU. - - * Create provider layers for ifcfg/eni. The providers take an object - model and apply it ("make it so"). The ifcfg provider will write out - persistent config files in /etc/sysconfig/network-scripts/ifcfg- - and use ifup/ifdown to start and stop the interfaces when an change - has been made. The eni provider will write out configurations to - /etc/network/interfaces and likewise use ifup/ifdown to start and - stop interfaces when a change has been made. - - * Create a provider layer for iproute2. Optional, can be done at - a later time. This provider will most likely not use persistent - formats and will run various ip/vconfig/route commands to - configure host level networking for a given object model. - - * Create a binary that processes a YAML config file format and makes - the correct python library calls. The binary should be idempotent - in that running the binary once with a given configuration should - "make it so". Running it a second time with the same configuration - should do nothing (i.e. it is safe to run multiple times). An example - YAML configuration format is listed below which describes a single - OVS bridge with an attached interface, this would match what - ensure-bridge creates today: - -.. code-block:: yaml - - network_config: - - - type: ovs_bridge - name: br-ctlplane - use_dhcp: true - ovs_extra: - - br-set-external-id br-ctlplane bridge-id br-ctlplane - members: - - - type: interface - name: em1 - -.. - - The above format uses a nested approach to define an interface - attached to a bridge. - - * TripleO element to install os-net-config. Most likely using - pip (but we may use git initially until it is released). - - * Wire this up to TripleO...get it all working together using the - existing Heat metadata formats. This would include any documentation - changes to tripleo-incubator, deprecating old elements, etc. - - * TripleO heat template changes to use the new YAML/JSON formats. Our default - configuration would most likely do exactly what we do today (OVS bridge - with a single attached interface). We may want to create some other example - heat templates which can be used in other environments (multi-bridge - setups like we use for our CI overclouds for example). - - -Dependencies -============ - -None - -Testing -======= - -Existing TripleO CI will help ensure that as we implement this we maintain -parity with the current feature set. - -The ability to provision and make use of our Triple CI clouds without -custom modifications/hacks will also be a proving ground for much of -the work here. - -Additional manual testing may be required for some of the more advanced -modes of operation (bonding, VLANs, etc.) - -Documentation Impact -==================== - -The recommended heat metadata used for network configuration may -change as result of this feature. Older formats will be preserved for -backwards compatibility. - -References -========== - -Notes from the Atlanta summit session on this topic can be found -here (includes possible YAML config formats): - - * https://etherpad.openstack.org/p/tripleo-network-configuration diff --git a/specs/juno/oac-header.rst b/specs/juno/oac-header.rst deleted file mode 100644 index cd45fa3b..00000000 --- a/specs/juno/oac-header.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -===================================== -Control mechanism for os-apply-config -===================================== - -Problem Description -=================== - -We require a control mechanism in os-apply-config (oac). This could be used, -for example, to: - - * Not create an empty target - * Set permissions on the target - -Proposed Change -=============== - -The basic proposal is to parameterise oac with maps (aka dictionaries) -containing control data. These maps will be supplied as YAML in companion -control files. Each file will be named after the template it relates to, with a -".oac" suffix. For example, the file "abc/foo.sh" would be controlled by -"abc/foo.sh.oac". - -Only control files with matching templates files will be respected, IE the file -"foo" must exist for the control file "foo.oac" to have any effect. A dib-lint -check will be added to look for file control files without matching templates, -as this may indicate a template has been moved without its control file. - -Directories may also have control files. In this case, the control file must be -inside the directory and be named exactly "oac". A file either named "oac" or -with the control file suffix ".oac" will never be considered as templates. - -The YAML in the control file must evaluate to nothing or a mapping. The former -allows for the whole mapping having been commented out. The presence of -unrecognised keys in the mapping is an error. File and directory control keys -are distinct but may share names. If they do, they should also share similar -semantics. - -Example control file:: - - key1: true - key2: 0700 - # comment - key3: - - 1 - - 2 - -To make the design concrete, one file control key will be offered initially: -allow_empty. This expects a Boolean value and defaults to true. If it is true, -oac will behave as it does today. Otherwise, if after substitutions the -template body is empty, no file will be created at the target path and any -existing file there will be deleted. - -allow_empty will also be allowed as a directory control key. Again, it will -expect a Boolean value and default to true. Given a nested structure -"A/B/C/foo", where "foo" is an empty file with allow_empty=false: - - * C has allow_empty=false: A/B/ is created, C is not. - * B has allow_empty=false: A/B/C/ is created. - * B and C have allow_empty=false: Only A/ is created. - -It is expected that additional keys will be proposed soon after this spec is -approved. - -Alternatives ------------- - -A fenced header could be used rather than a separate control file. Although -this aids visibility of the control data, it is less consistent with control -files for directories and (should they be added later) symlinks. - -The directory control file name has been the subject of some debate. -Alternatives to control "foo/" include: - - * foo/.oac (not visible with unmodified "ls") - * foo/oac.control (longer) - * foo/control (generic) - * foo.oac (if foo/ is empty, can't be stored in git) - * foo/foo.oac (masks control file for foo/foo) - -Security Impact ---------------- - -None. The user is already in full control of the target environment. For -example, they could use the allow_empty key to delete a critical file. However -they could already simply provide a bash script to do the same. Further, the -resulting image will be running on their (or their customer's) hardware, so it -would be their own foot they'd be shooting. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -It will no longer be possible to create files named "oac" or with the suffix -".oac" using oac. This will not affect any elements currently within -diskimage-builder or tripleo-image-elements. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - alexisl (aka lxsli, Alexis Lee) - -Other contributors: - None - -Work Items ----------- - - * Support file control files in oac - * Support the allow_empty file control key - * Add dib-lint check for detached control files - * Support directory control files in oac - * Support the allow_empty directory control key - * Update the oac README - -Dependencies -============ - -None. - -Testing -======= - -This change is easily tested using standard unit test techniques. - -Documentation Impact -==================== - -The oac README must be updated. - -References -========== - -There has already been some significant discussion of this feature: - https://blueprints.launchpad.net/tripleo/+spec/oac-header - -There is a bug open for which an oac control mechanism would be useful: - https://bugs.launchpad.net/os-apply-config/+bug/1258351 diff --git a/specs/juno/promote-heat-env.rst b/specs/juno/promote-heat-env.rst deleted file mode 100644 index c625fe12..00000000 --- a/specs/juno/promote-heat-env.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================ -Promote HEAT_ENV -================ - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-promote-heat-env - -Promote values set in the Heat environment file to take precedence over -input environment variables. - -Problem Description -=================== - -Historically TripleO scripts have consulted the environment for many items of -configuration. This raises risks of scope leakage and the number of environment -variables required often forces users to manage their environment with scripts. -Consequently, there's a push to prefer data files like the Heat environment -file (HEAT_ENV) which may be set by passing -e to Heat. To allow this file to -provide an unambiguous source of truth, the environment must not be allowed to -override the values from this file. That is to say, precedence must be -transferred. - -A key distinction is whether the value of an environment variable is obtained -from the environment passed to it by its parent process (either directly or -through derivation). Those which are will be referred to as "input variables" -and are deprecated by this spec. Those which are not will be called "local -variables" and may be introduced freely. Variables containing values -synthesised from multiple sources must be handled on a case-by-case basis. - - -Proposed Change -=============== - -Since changes I5b7c8a27a9348d850d1a6e4ab79304cf13697828 and -I42a9d4b85edcc99d13f7525e964baf214cdb7cbf, ENV_JSON (the contents of the file -named by HEAT_ENV) is constructed in devtest_undercloud.sh like so:: - - ENV_JSON=$(jq '.parameters = { - "MysqlInnodbBufferPoolSize": 100 - } + .parameters + { - "AdminPassword": "'"${UNDERCLOUD_ADMIN_PASSWORD}"'", - "AdminToken": "'"${UNDERCLOUD_ADMIN_TOKEN}"'", - "CeilometerPassword": "'"${UNDERCLOUD_CEILOMETER_PASSWORD}"'", - "GlancePassword": "'"${UNDERCLOUD_GLANCE_PASSWORD}"'", - "HeatPassword": "'"${UNDERCLOUD_HEAT_PASSWORD}"'", - "NovaPassword": "'"${UNDERCLOUD_NOVA_PASSWORD}"'", - "NeutronPassword": "'"${UNDERCLOUD_NEUTRON_PASSWORD}"'", - "NeutronPublicInterface": "'"${NeutronPublicInterface}"'", - "undercloudImage": "'"${UNDERCLOUD_ID}"'", - "BaremetalArch": "'"${NODE_ARCH}"'", - "PowerSSHPrivateKey": "'"${POWER_KEY}"'", - "NtpServer": "'"${UNDERCLOUD_NTP_SERVER}"'" - }' <<< $ENV_JSON) - -This is broadly equivalent to "A + B + C", where values from B override those -from A and values from C override those from either. Currently section C -contains a mix of input variables and local variables. It is proposed that -current and future environment variables are allocated such that: - -* A only contains default values. -* B is the contents of the HEAT_ENV file (from either the user or a prior run). -* C only contains computed values (from local variables). - -The following are currently in section C but are not local vars:: - - NeutronPublicInterface (default 'eth0') - UNDERCLOUD_NTP_SERVER (default '') - -The input variables will be ignored and the defaults moved into section A:: - - ENV_JSON=$(jq '.parameters = { - "MysqlInnodbBufferPoolSize": 100, - "NeutronPublicInterface": "eth0", - "NtpServer": "" - } + .parameters + { - ... elided ... - }' <<< $ENV_JSON) - -devtest_overcloud.sh will be dealt with similarly. These are the variables -which need to be removed and their defaults added to section A:: - - OVERCLOUD_NAME (default '') - OVERCLOUD_HYPERVISOR_PHYSICAL_BRIDGE (default '') - OVERCLOUD_HYPERVISOR_PUBLIC_INTERFACE (default '') - OVERCLOUD_BRIDGE_MAPPINGS (default '') - OVERCLOUD_FLAT_NETWORKS (default '') - NeutronPublicInterface (default 'eth0') - OVERCLOUD_LIBVIRT_TYPE (default 'qemu') - OVERCLOUD_NTP_SERVER (default '') - -Only one out of all these input variables is used outside of these two scripts -and consequently the rest are safe to remove. - -The exception is OVERCLOUD_LIBVIRT_TYPE. This is saved by the script -'write-tripleorc'. As it will now be preserved in HEAT_ENV, it does not need to -also be preserved by write-tripleorc and can be removed from there. - ----- - -So that users know they need to start setting these values through HEAT_ENV -rather than input variables, it is further proposed that for an interim period -each script echo a message to STDERR if deprecated input variables are set. For -example:: - - for OLD_VAR in OVERCLOUD_NAME; do - if [ ! -z "${!OLD_VAR}" ]; then - echo "WARNING: ${OLD_VAR} is deprecated, please set this in the" \ - "HEAT_ENV file (${HEAT_ENV})" 1>&2 - fi - done - ----- - -To separate user input from generated values further, it is proposed that user -values be read from a new file - USER_HEAT_ENV. This will default to -{under,over}cloud-user-env.json. A new commandline parameter, --user-heat-env, -will be added to both scripts so that this can be changed. - -#. ENV_JSON is initialised with default values. -#. ENV_JSON is overlaid by HEAT_ENV. -#. ENV_JSON is overlaid by USER_HEAT_ENV. -#. ENV_JSON is overlaid by computed values. -#. ENV_JSON is saved to HEAT_ENV. - -See http://paste.openstack.org/show/83551/ for an example of how to accomplish -this. In short:: - - ENV_JSON=$(cat ${HEAT_ENV} ${USER_HEAT_ENV} | jq -s ' - .[0] + .[1] + {"parameters": - ({..defaults..} + .[0].parameters + {..computed..} + .[1].parameters)}') - cat > "${HEAT_ENV}" <<< ${ENV_JSON} - -Choosing to move user data into a new file, compared to moving the merged data, -makes USER_HEAT_ENV optional. If users wish, they can continue providing their -values in HEAT_ENV. The complementary solution requires users to clean -precomputed values out of HEAT_ENV, or they risk unintentionally preventing the -values from being recomputed. - -Loading computed values after user values sacrifices user control in favour of -correctness. Considering that any devtest user must be rather technical, if a -computation is incorrect they can fix or at least hack the computation -themselves. - -Alternatives ------------- - -Instead of removing the input variables entirely, an interim form could be -used:: - - ENV_JSON=$(jq '.parameters = { - "MysqlInnodbBufferPoolSize": 100, - "NeutronPublicInterface": "'"${NeutronPublicInterface}"'", - "NtpServer": "'"${UNDERCLOUD_NTP_SERVER}"'" - } + .parameters + { - ... - } - -However, the input variables would only have an effect if the keys they affect -are not present in HEAT_ENV. As HEAT_ENV is written each time devtest runs, the -keys will usually be present unless the file is deleted each time (rendering it -pointless). So this form is more likely to cause confusion than aid -transition. - ----- - -jq includes an 'alternative operator', ``//``, which is intended for providing -defaults:: - - A filter of the form a // b produces the same results as a, if a produces - results other than false and null. Otherwise, a // b produces the same - results as b. - -This has not been used in the proposal for two reasons: - -#. It only works on individual keys, not whole maps. -#. It doesn't work in jq 1.2, still included by Ubuntu 13.04 (Saucy). - -Security Impact ---------------- - -None. - -Other End User Impact ---------------------- - -An announcement will be made on the mailing list when this change merges. This -coupled with the warnings given if the deprecated variables are set should -provide sufficient notice. - -As HEAT_ENV is rewritten every time devtest executes, we can safely assume it -matches the last environment used. However users who use scripts to switch -their environment may be surprised. Overall the change should be a benefit to -these users, as they can use two separate HEAT_ENV files (passing --heat-env to -specify which to activate) instead of needing to maintain scripts to set up -their environment and risking settings leaking from one to the other. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -None. - - -Implementation -============== - -Assignee(s) ------------ - -lxsli - -Work Items ----------- - -* Add USER_HEAT_ENV to both scripts. -* Move variables in both scripts. -* Add deprecated variables warning to both scripts. -* Remove OVERCLOUD_LIBVIRT_TYPE from write-tripleorc. - - -Dependencies -============ - -None. - - -Testing -======= - -The change will be tested in isolation from the rest of the script. - - -Documentation Impact -==================== - -* Update usage docs with env var deprecation warnings. -* Update usage docs to recommend HEAT_ENV. - - -References -========== - -#. http://stedolan.github.io/jq/manual/ - JQ manual -#. http://jqplay.herokuapp.com/ - JQ interactive demo diff --git a/specs/juno/ssl_pki.rst b/specs/juno/ssl_pki.rst deleted file mode 100644 index e4d0403b..00000000 --- a/specs/juno/ssl_pki.rst +++ /dev/null @@ -1,169 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======= -SSL PKI -======= - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-ssl-pki - -Each of our clouds require multiple ssl certificates to operate. We need to -support generating these certificates in devtest in a manner which will -closely resemble the needs of an actual deployment. We also need to support -interfacing with the PKI (Public Key Infrastructure) of existing organizations. -This spec outlines the ways we will address these needs. - -Problem Description -=================== - -We have a handful of services which require SSL certificates: - - * Keystone - * Public APIs - * Galera replication - * RabbitMQ replication - -Developers need to have these certificates generated automatically for them, -while organizations will likely want to make use of their existing PKI. We -have not made clear at what level we will manage these certificates and/or -their CA(s) and at what level the user will be responsible for them. This is -further complicated by the Public API's likely having a different CA than the -internal-only facing services. - -Proposed Change -=============== - -Each of these services will accept their SSL certificate, key, and CA via -environment JSON (heat templates for over/undercloud, config.json for seed). - -At the most granular level, a user can specify these values by editing the -over/undercloud-env.json or config.json files. If a certificate and key is -specified for a service then we will not attempt to automatically generate one -for that service. If only a certificate or key is specified it is considered -an error. - -If no certificate and key is specified for a service, we will attempt to -generate a certificate and key, and sign the certificate with a self-signed -CA we generate. Both the undercloud and seed will share a self-signed CA in -this scenario, and each overcloud will have a separate self-signed CA. We will -also add this self-signed CA to the chain of trust for hosts which use services -of the cloud being created. - -The use of a custom CA for signing the automatically generated certificates -will be solved in a future iteration. - -Alternatives ------------- - -None presented thus far. - -Security Impact ---------------- - -This change has high security impact as it affects our PKI. We currently do not -have any SSL support, and implementing this should therefore improve our -security. We should ensure all key files we create in this change have file -permissions of 0600 and that the directories they reside in have permissions -of 0700. - -There are many security implications for SSL key generation (including entropy -availability) and we defer to the OpenStack Security Guide[1] for this. - -Other End User Impact ---------------------- - -Users can interact with this feature by editing the under/overcloud-env.json -files and the seed config.json file. Additionally, the current properties which -are used for specifying the keystone CA and certificate will be changed to -support a more general naming scheme. - -Performance Impact ------------------- - -We will be performing key generation which can require a reasonable amount of -resources, including entropy sources. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -More SSL keys will be generated for developers. Debugging via monitoring -network traffic can also be more difficult once SSL is adopted. Production -environments will also require SSL unwrapping to debug network traffic, so this -will allow us to closer emulate production (developers can now spot missing SSL -wrapping). - -Implementation -============== - -The code behind generate-keystone-pki in os-cloud-config will be generalized -to support creation of a CA and certificates separately, and support creation -of multiple certificates using a single CA. A new script will be created -named 'generate-ssl-cert' which accepts a heat environment JSON file and a -service name. This will add ssl.certificate and ssl.certificate_key properties -under the servicename property (an example is below). If no ssl.ca_certificate -and ssl.ca_certificate_key properties are defined then this script will perform -generation of the self-signed certificate. - -Example heat environment output:: - - { - "ssl": { - "ca_certificate": "", - "ca_key": "" - }, - "horizon" { - "ssl": { - "ca_certificate": "", - "ca_certificate_key": "" - }, - ... - }, - ... - } - -Assignee(s) ------------ - -Primary assignee: - greghaynes - -Work Items ----------- - - * Generalize CA/certificate creation in os-cloud-config. - * Add detection logic for certificate key pairs in -env.json files to devtest - * Make devtest scripts call CA/cert creation scripts if no cert is found - for a service - -Dependencies -============ - -The services listed above are not all set up to use SSL certificates yet. This -is required before we can add detection logic for user specified certificates -for all services. - -Testing -======= - -Tests for new functionality will be made to os-cloud-config. The default -behavior for devtest is designed to closely mimic a production setup, allowing -us to best make use of our CI. - -Documentation Impact -==================== - -We will need to document the new interfaces described in 'Other End User -Impact'. - -References -========== - -1. Openstack Security Guide: http://docs.openstack.org/security-guide/content/ diff --git a/specs/juno/tripleo-juno-ci-improvements.rst b/specs/juno/tripleo-juno-ci-improvements.rst deleted file mode 100644 index 380566b9..00000000 --- a/specs/juno/tripleo-juno-ci-improvements.rst +++ /dev/null @@ -1,269 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================== -Triple CI improvements -====================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-ci-improvements - -Tripleo CI is painful at the moment, we have problems with both reliability -and consistency of running job times, this spec is intended to address a -number of the problems we have been facing. - -Problem Description -=================== - -Developers should be able to depend on CI to produce reliable test results with -a minimum number of false negatives reported in a timely fashion, this -currently isn't the case. To date the reliability of tripleo ci has been -heavily effected by network glitches, availability of network resources and -reliability of the CI clouds. This spec is intended to deal with the problems -we have been seeing. - -**Problem :** Reliability of hp1 (hp1_reliability_) - Intermittent failures on jobs running on the hp1 cloud have been causing a - large number of job failures and sometimes taking this region down - altogether. Current thinking is that the root of most of these issues is - problems with a mellanox driver. - -**Problem :** Unreliable access to network resources (net_reliability_) - Gaining reliable access to various network resources has been inconsistent - causing a CI outage when any one network resource is unavailable. Also - inconsistent speeds downloading these resources can make it difficult to - gauge overall speed improvements made to tripleo. - -**Problem :** (system_health_) The health of the overall CI system isn't - immediately obvious, problems often persist for hours (or occasionally days) - before we react to them. - -**Problem :** (ci_run_times_) The tripleo devtest story takes time to run, - this uses up CI resources and developer's time, where possible we should - reduce the time required to run devtest. - -**Problem :** (inefficient_usage_) Hardware on which to run tripleo is a finite - resource, there is a spec in place to run devtest on an openstack - deployment[1], this is the best way forward in order to use the resources we - have in the most efficient way possible. We also have a number of options to - explore that would help minimise resource wastage. - -**Problem :** (system_feedback_) Our CI provides no feedback about trends. - A good CI system should be more than a system that reports pass or fail, we - should be getting feedback on metrics allowing us to observe degradations, - where possible we should make use of services already provided by infra. - This will allow us to proactively intervene as CI begins to degrade? - -**Problem :** (bug_frequency_) We currently have no indication of which CI - bugs are occurring most often. This frustrates efforts to make CI more - reliable. - -**Problem :** (test_coverage_) Currently CI only tests a subset of what it - should. - - -Proposed Change -=============== - -There are a number of changes required in order to address the problems we have -been seeing, each listed here (in order of priority). - -.. _hp1_reliability: - -**Solution :** - -* Temporarily scale back on CI by removing one of the overcloud jobs (so rh1 has - the capacity to run CI Solo). -* Remove hp1 from the configuration. -* Run burn-in tests on each hp1 host, removing(or repairing) failing hosts. - Burn-in tests should consist of running CI on a newly deployed cloud matching - the load expected to run on the region. Any failure rate should not exceed - that of currently deployed regions. -* Redeploy testing infrastructure on hp1 and test with tempest, this redeploy - should be done with our tripleo scripts so it can be repeated and we - are sure of parity between ci-overcloud deployments. -* Place hp1 back into CI and monitor situation. -* Add back any removed CI jobs. -* Ensure burn-in / tempest tests are followed on future regions being deployed. -* Attempts should be made to deal with problems that develop on already - deployed clouds, if it becomes obvious they can't be quickly dealt with after - 48 hours they should be temporarily removed from the CI infrastructure and will - need to pass the burn-in tests before being added back into production. - -.. _net_reliability: - -**Solution :** - -* Deploy a mirror of pypi.openstack.org on each Region. -* Deploy a mirror of the Fedora and Ubuntu package repositories on each region. -* Deploy squid in each region and cache http traffic through it, mirroring - where possible should be considered our preference but having squid in place - should cache any resources not mirrored. -* Mirror other resources (e.g. github.com, percona tarballs etc..). -* Any new requirements added to devtest should be cachable with caches in - place before the requirement is added. - -.. _system_health: - -**Solution :** - -* Monitor our CI clouds and testenvs with Icinga, monitoring should include - ping, starting (and connecting to) new instances, disk usage etc.... -* Monitor CI test results and trigger an alert if "X" number of jobs of the - same type fail in succession. An example of using logstash to monitor CI - results can be found here[5]. - -Once consistency is no longer a problem we will investigate speed improvements -we can make on the speed of CI jobs. - -.. _ci_run_times: - -**Solution :** - -* Investigate if unsafe disk caching strategies will speed up disk image - creation, if an improvement is found implement it in production CI by one of - - * run "unsafe" disk caching strategy on ci cloud VM's (would involve exposing - this libvirt option via the nova api). - * use "eatmydata" to noop disk sync system calls, not currently - packaged for F20 but we could try and restart that process[2]. - - -.. _inefficient_usage: - -**Solution :** - -* Abandon on failure : adding a feature to zuul (or turning it on if it already - exists) to abandon all jobs in a queue for a particular commit as soon as a - voting commit fails. This would minimize usage of resources running long - running jobs that we already know will have to be rechecked. - -* Adding the collectl element to compute nodes and testenv hosts will allow us - to find bottle necks and also identify places where it is safe to overcommit - (e.g. we may find that overcommitting CPU a lot on testenv hosts is viable). - -.. _system_feedback: - -**Solution :** - -* Using a combination of logstash and graphite - - * Output graphs of occurrences of false negative test results. - * Output graphs of CI run times over time in order to identify trends. - * Output graphs of CI job peak memory usage over time. - * Output graphs of CI image sizes over time. - -.. _bug_frequency: - -**Solution :** - -* In order to be able to track false negatives that are hurting us most we - should agree not to use "recheck no bug", instead recheck with the - relevant bug number. Adding signatures to Elastic recheck for known CI - issues should help uptake of this. - -.. _test_coverage: - -**Solution :** - -* Run tempest against the deployed overcloud. -* Test our upgrade story by upgrading to a new images. Initially to avoid - having to build new images we can edit something on the overcloud qcow images - in place in order to get a set of images to upgrade too[3]. - - -Alternatives ------------- - -* As an alternative to deploying our own distro mirrors we could simply point - directly at a mirror known to be reliable. This is undesirable as a long - term solution as we still can't control outages. - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -* No longer using recheck no bug places a burden on developers to - investigate why a job failed. - -* Adding coverage to our tests will increase the overall time to run a job. - -Performance Impact ------------------- - -Performance of CI should improve overall. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - derekh - -Other contributors: - looking for volunteers... - - -Work Items ----------- - -* hp1 upgrade to trusty. -* Potential pypi mirror. -* Fedora Mirrors. -* Ubuntu Mirrors. -* Mirroring other non distro resources. -* Per region caching proxy. -* Document CI. -* Running an unsafe disk caching strategy in the overcloud nodes. -* ZUUL abandon on failure. -* Include collectl on compute and testenv Hosts and analyse output. -* Mechanism to monitor CI run times. -* Mechanism to monitor nodepool connection failures to instances. -* Remove ability to recheck no bug or at the very least discourage its use. -* Monitoring cloud/testenv health. -* Expand ci to include tempest. -* Expand ci to include upgrades. - - -Dependencies -============ - -None - -Testing -======= - -CI failure rate and timings will be tracked to confirm improvements. - -Documentation Impact -==================== - -The tripleo-ci repository needs additional documentation in order to describe -the current layout and should then be updated as changes are made. - -References -========== - -* [1] spec to run devtest on openstack https://review.openstack.org/#/c/92642/ -* [2] eatmydata for Fedora https://bugzilla.redhat.com/show_bug.cgi?id=1007619 -* [3] CI upgrades https://review.openstack.org/#/c/87758/ -* [4] summit session https://etherpad.openstack.org/p/juno-summit-tripleo-ci -* [5] http://jogo.github.io/gate/tripleo.html diff --git a/specs/juno/tripleo-juno-configurable-mnt-state.rst b/specs/juno/tripleo-juno-configurable-mnt-state.rst deleted file mode 100644 index c3b97d4f..00000000 --- a/specs/juno/tripleo-juno-configurable-mnt-state.rst +++ /dev/null @@ -1,238 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================================= -Configurable directory for persistent and stateful data -======================================================= - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-configurable-mnt-state - -Make the hardcoded /mnt/state path for stateful data be configurable. - -Problem Description -=================== - -1. A hard coded directory of /mnt/state for persistent data is incompatible -with Red Hat based distros available mechanism for a stateful data path. Red -Hat based distros, such as Fedora, RHEL, and CentOS, have a feature that uses -bind mounts for mounting paths onto a stateful data partition and does not -require manually reconfiguring software to use /mnt/state. - -2. Distros that use SELinux have pre-existing policy that allows access to -specific paths. Reconfiguring these paths to be under /mnt/state, results -in SELinux denials for existing services, requiring additional policy to be -written and maintained. - -3. Some operators and administrators find the reconfiguring of many services to -not use well known default values for filesystem paths to be disruptive and -inconsistent. They do not expect these changes when using a distro that they've -come to learn and anticipate certain configurations. These types of changes -also require documentation changes to existing documents and processes. - - -Proposed Change -=============== -Deployers will be able to choose a configurable path instead of the hardcoded -value of /mnt/state for the stateful path. - -A new element, stateful-path will be added that defines the value for the -stateful path. The default will be /mnt/state. - -There are 3 areas that need to respect the configurable path: - -os-apply-config template generation - The stateful-path element will set the stateful path value by installing a - JSON file to a well known location for os-collect-config to use as a local - data source. This will require a new local data source collector to be added - to os-collect-config (See `Dependencies`_). - - The JSON file's contents will be based on $STATEFUL_PATH, e.g.: - - {'stateful-path': '/mnt/state'} - - File templates (files under os-apply-config in an element) will then be - updated to replace the hard coded /mnt/state with {{stateful-path}}. - - Currently, there is a mix of root locations of the os-apply-config templates. - Most are written under /, although some are written under /mnt/state. The - /mnt/state is hard coded in the directory tree under os-apply-config in these - elements, so this will be removed to have the templates just written under /. - Symlinks could instead be used in these elements to setup the correct paths. - Support can also be added to os-apply-config's control file mechanism to - indicate these files should be written under the stateful path. An example - patch that does this is at: https://review.openstack.org/#/c/113651/ - -os-refresh-config scripts run at boot time - In order to make the stateful path configurable, all of the hard coded - references to /mnt/state in os-refresh-config scripts will be replaced with an - environment variable, $STATEFUL_PATH. - - The stateful-path element will provide an environment.d script for - os-refresh-config that reads the value from os-apply-config: - - export STATEFUL_PATH=$(os-apply-config --key stateful-path --type raw) - -Hook scripts run at image build time - The stateful-path element will provide an environment.d script for use at - image build time: - - export STATEFUL_PATH=${STATEFUL_PATH:-"/mnt/state"} - -The use-ephemeral element will depend on the stateful-path element, effectively -making the default stateful path remain /mnt/state. - -The stateful path can be reconfigured by defining $STATEFUL_PATH either A) in -the environment before an image build; or B) in an element with an -environment.d script which runs earlier than the stateful-path environment.d -script. - - -Alternatives ------------- -None come to mind, the point of this spec is to enable an alternative to what's -already existing. There may be additional alternatives out there other folks -may wish to add support for. - -Security Impact ---------------- -None - -Other End User Impact ---------------------- -End users using elements that change the stateful path location from /mnt/state -to something else will see this change reflected in configuration files and in -the directories used for persistent and stateful data. They will have to know -how the stateful path is configured and accessed. - -Different TripleO installs would appear different if used with elements that -configured the stateful path differently. - -This also adds some complexity when reading TripleO code, because instead of -there being an explicit path, there would instead be a reference to a -configurable value. - -Performance Impact ------------------- -There will be additional logic in os-refresh-config to determine and set the -stateful path, and an additional local collector that os-collect-config would -use. However, these are negligible in terms of negatively impacting -performance. - - -Other Deployer Impact ---------------------- -Deployers will be able to choose different elements that may reconfigure the -stateful path or change the value for $STATEFUL_PATH. The default will remain -unchanged however. - -Deployers would have to know what the stateful path is, and if it's different -across their environment, this could be confusing. However, this seems unlikely -as deployers are likely to be standardizing on one set of common elements, -distro, etc. - -In the future, if TripleO CI and CD clouds that are based on Red Hat distros -make use of this feature to enable Red Hat read only root support, then these -clouds would be configured differently from clouds that are configured to use -/mnt/state. As a team, the tripleo-cd-admins will have to know which -configuration has been used. - -Developer Impact ----------------- -1. Developers need to use the $STATEFUL_PATH and {{stateful-path}} -substitutions when they intend to refer to the stateful path. - -2. Code that needs to know the stateful path will need access to the variable -defining the path, it won't be able to assume the path is /mnt/state. A call to -os-apply-config to query the key defining the path could be done to get -the value, as long as os-collect-config has already run at least once. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - james-slagle - -Work Items ----------- - -tripleo-incubator -^^^^^^^^^^^^^^^^^ -* Update troubleshooting docs to mention that /mnt/state is a configurable - path, and could be different in local environments. - -tripleo-image-elements -^^^^^^^^^^^^^^^^^^^^^^ -* Add a new stateful-path element that configures stateful-path and $STATEFUL_PATH - to /mnt/state -* Update os-apply-config templates to replace /mnt/state with {{stateful-path}} -* Update os-refresh-config scripts to replace /mnt/state with $STATEFUL_PATH -* Update all elements that have os-apply-config template files under /mnt/state - to just be under /. - - * update os-apply-config element to call os-apply-config with a --root - $STATEFUL_PATH option - * update elements that have paths to os-apply-config generated files (such - as /etc/nova/nova.conf) to refer to those paths as - $STATEFUL_PATH/path/to/file. - -* make use-ephemeral element depend on stateful-path element - -Dependencies -============ -1. os-collect-config will need a new feature to read from a local data source - directory that elements can install JSON files into, such as a source.d. There - will be a new spec filed on this feature. - https://review.openstack.org/#/c/100965/ - -2. os-apply-config will need an option in its control file to support - generating templates under the configurable stateful path. There is a patch - here: https://review.openstack.org/#/c/113651/ - - -Testing -======= - -There is currently no testing that all stateful and persistent data is actually -written to a stateful partition. - -We should add tempest tests that directly exercise the preserve_ephemeral -option, and have tests that check that all stateful data has been preserved -across a "nova rebuild". Tempest seems like a reasonable place to add these -tests since preserve_ephemeral is a Nova OpenStack feature. Plus, once TripleO -CI is running tempest against the deployed OverCloud, we will be testing this -feature. - -We should also test in TripleO CI that state is preserved across a rebuild by -adding stateful data before a rebuild and verifying it is still present after a -rebuild. - -Documentation Impact -==================== - -We will document the new stateful-path element. - -TripleO documentation will need to mention the potential difference in -configuration files and the location of persistent data if a value other than -/mnt/state is used. - - -References -========== - -os-collect-config local datasource collector spec: - -* https://review.openstack.org/100965 - -Red Hat style stateful partition support this will enable: - -* https://git.fedorahosted.org/cgit/initscripts.git/tree/systemd/fedora-readonly -* https://git.fedorahosted.org/cgit/initscripts.git/tree/sysconfig/readonly-root -* https://git.fedorahosted.org/cgit/initscripts.git/tree/statetab -* https://git.fedorahosted.org/cgit/initscripts.git/tree/rwtab diff --git a/specs/juno/tripleo-juno-deploy-cloud-hypervisor-type.rst b/specs/juno/tripleo-juno-deploy-cloud-hypervisor-type.rst deleted file mode 100644 index 7ac58648..00000000 --- a/specs/juno/tripleo-juno-deploy-cloud-hypervisor-type.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -TripleO Deploy Cloud Hypervisor Type -==================================== - -# TODO: file the actual blueprint... -https://blueprints.launchpad.net/tripleo/+spec/tripleo-deploy-cloud-hypervisor-type - -The goal of this spec is to detail how the TripleO deploy cloud type could be -varied from just baremetal to baremetal plus other hypervisors to deploy -Overcloud services. - -Linux kernel containers make this approach attractive due to the lightweight -nature that services and process can be virtualized and isolated, so it seems -likely that libvirt+lxc and Docker would be likely targets. However we should -aim to make this approach as agnostic as possible for those deployers who may -wish to use any Nova driver, such as libvirt+kvm. - -Problem Description -=================== - -The overcloud control plane is generally lightly loaded and allocation of -entire baremetal machines to it is wasteful. Also, when the Overcloud services -are running entirely on baremetal they take longer to upgrade and rollback. - -Proposed Change -=============== - -We should support any Nova virtualization type as a target for Overcloud -services, as opposed to using baremetal nodes to deploy overcloud images. -Containers are particularly attractive because they are lightweight, easy to -upgrade/rollback and offer similar isolation and security as full VM's. For the -purpose of this spec, the alternate Nova virtualization target for the -Overcloud will be referred to as alt-hypervisor. alt-hypervisor could be -substituted with libvirt+lxc, Docker, libvirt+kvm, etc. - -At a minimum, we should support running each Overcloud service in isolation in -its own alt-hypervisor instance in order to be as flexible as possible to deployer -needs. We should also support combining services. - -In order to make other alt-hypervisors available as deployment targets for the -Overcloud, we need additional Nova Compute nodes/services configured to use -alt-hypervisors registered with the undercloud Nova. - -Additionally, the undercloud must still be running a Nova compute with the -ironic driver in order to allow for scaling itself out to add additional -undercloud compute nodes. - -To accomplish this, we can run 2 Nova compute processes on each undercloud -node. One configured with Nova+Ironic and one configured with -Nova+alt-hypervisor. For the straight baremetal deployment, where an alternate -hypervisor is not desired, the additional Nova compute process would not be -included. This would be accomplished via the standard inclusion/exclusion of -elements during a diskimage-builder tripleo image build. - -It will also be possible to build and deploy just an alt-hypervisor compute -node that is registered with the Undercloud as an additional compute node. - -To minimize the changes needed to the elements, we will aim to run a full init -stack in each alt-hypervisor instance, such as systemd. This will allow all the -services that we need to also be running in the instance (cloud-init, -os-collect-config, etc). It will also make troubleshooting similar to the -baremetal process in that you'd be able to ssh to individual instances, read -logs, restart services, turn on debug mode, etc. - -To handle Neutron network configuration for the Overcloud, the Overcloud -neutron L2 agent will have to be on a provider network that is shared between -the hypervisors. VLAN provider networks will have to be modeled in Neutron and -connected to alt-hypervisor instances. - -Overcloud compute nodes themselves would be deployed to baremetal nodes. These -images would be made up of: -* libvirt+kvm (assuming this is the hypervisor choice for the Overcloud) -* nova-compute + libvirt+kvm driver (registered to overcloud control). -* neutron-l2-agent (registered to overcloud control) -An image with those contents is deployed to a baremetal node via nova+ironic -from the undercloud. - -Alternatives ------------- - -Deployment from the seed -^^^^^^^^^^^^^^^^^^^^^^^^ -An alternative to having the undercloud deploy additional alt-hypervisor -compute nodes would be to register additional baremetal nodes with the seed vm, -and then describe an undercloud stack in a template that is the undercloud -controller and its set of alt-hypervisor compute nodes. When the undercloud -is deployed via the seed, all of the nodes are set up initially. - -The drawback with that approach is that the seed is meant to be short-lived in -the long term. So, it then becomes difficult to scale out the undercloud if -needed. We could offer a hybrid of the 2 models: launch all nodes initially -from the seed, but still have the functionality in the undercloud to deploy -more alt-hypervisor compute nodes if needed. - -The init process -^^^^^^^^^^^^^^^^ -If running systemd in a container turns out to be problematic, it should be -possible to run a single process in the container that starts just the -OpenStack service that we care about. However that process would also need to -do things like read Heat metadata. It's possible this process could be -os-collect-config. This change would require more changes to the elements -themselves however since they are so dependent on an init process currently in -how they enable/restart services etc. It may be possible to replace os-svc-* -with other tools that don't use systemd or upstart when you're building images -for containers. - -Security Impact ---------------- -* We should aim for equivalent security when deploying to alt-hypervisor - instances as we do when deploying to baremetal. To the best of our ability, it - should not be possible to compromise the instance if an individual service is - compromised. - -* Since Overcloud services and Undercloud services would be co-located on the - same baremetal machine, compromising the hypervisor and gaining access to the - host is a risk to both the Undercloud and Overcloud. We should mitigate this - risk to the best of our ability via things like SELinux, and removing all - unecessary software/processes from the alt-hypervisor instances. - -* Certain hypervisors are inherently more secure than others. libvirt+kvm uses - virtualization and is much more secure then container based hypervisors such as - libvirt+lxc and Docker which use namespacing. - -Other End User Impact ---------------------- -None. The impact of this change is limited to Deployers. End users should have -no visibility into the actual infrastructure of the Overcloud. - -Performance Impact ------------------- -Ideally, deploying an overcloud to containers should result in a faster -deployment than deploying to baremetal. Upgrading and downgrading the Overcloud -should also be faster. - -More images will have to be built via diskimage-builder however, which will -take more time. - -Other Deployer Impact ---------------------- -The main impact to deployers will be the ability to use alt-hypervisors -instances, such as containers if they wish. They also must understand how to -use nova-baremetal/ironic on the undercloud to scale out the undercloud and add -additional alt-hypervisor compute nodes if needed. - -Additional space in the configured glance backend would also likely be needed -to store additional images. - -Developer Impact ----------------- -* Developers working on TripleO will have the option of deploying to - alt-hypervisor instances. This should make testing and developing on some - aspects of TripleO easier due to the need for less vm's. - -* More images will have to be built due to the greater potential variety with - alt-hypervisor instances housing Overcloud services. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - james-slagle - -Work Items ----------- - -tripleo-incubator -^^^^^^^^^^^^^^^^^ -* document how to use an alternate hypervisor for the overcloud deployment - ** eventually, this could possibly be the default -* document how to troubleshoot this type of deployment -* need a user option or json property to describe if the devtest - environment being set up should use an alternate hypervisor for the overcloud - deployment or not. Consider using HEAT_ENV where appropriate. -* load-image should be updated to add an additional optional argument that sets - the hypervisor_type property on the loaded images in glance. The argument is - optional and wouldn't need to be specified for some images, such as regular - dib images that can run under KVM. -* Document commands to setup-neutron for modeling provider VLAN networks. - -tripleo-image-elements -^^^^^^^^^^^^^^^^^^^^^^ -* add new element for nova docker driver -* add new element for docker registry (currently required by nova docker - driver) -* more hypervisor specific configuration files for the different nova compute - driver elements - ** /etc/nova/compute/nova-kvm.conf - ** /etc/nova/compute/nova-baremetal.conf - ** /etc/nova/compute/nova-ironic.conf - ** /etc/nova/compute/nova-docker.conf -* Separate configuration options per compute process for: - ** host (undercloud-kvm, undercloud-baremetal, etc). - ** state_path (/var/lib/nova-kvm, /var/lib/nova-baremetal, etc). -* Maintain backwards compatibility in the elements by consulting both old and - new heat metadata key namespaces. - -tripleo-heat-templates -^^^^^^^^^^^^^^^^^^^^^^ -* Split out heat metadata into separate namespaces for each compute process - configuration. -* For the vlan case, update templates for any network modeling for - alt-hypervisor instances so that those instances have correct interfaces - attached to the vlan network. - -diskimage-builder -^^^^^^^^^^^^^^^^^ -* add ability where needed to build new image types for alt-hypervisor - ** Docker - ** libvirt+lxc -* Document how to build images for the new types - -Dependencies -============ -For Docker support, this effort depends on continued development on the nova -Docker driver. We would need to drive any missing features or bug fixes that -were needed in that project. - -For other drivers that may not be as well supported as libvirt+kvm, we will -also have to drive missing features there as well if we want to support them, -such as libvirt+lxc, openvz, etc. - -This effort also depends on the provider resource templates spec (unwritten) -that will be done for the template backend for Tuskar. That work should be done -in such a way that the provider resource templates are reusable for this effort -as well in that you will be able to create templates to match the images that -you intend to create for your Overcloud deployment. - -Testing -======= -We would need a separate set of CI jobs that were configured to deploy an -Overcloud to each alternate hypervisor that TripleO intended to support well. - -For Docker support specifically, CI jobs could be considered non-voting since -they'd rely on a stackforge project which isn't officially part of OpenStack. -We could potentially make this job voting if TripleO CI was enabled on the -stackforge/nova-docker repo so that changes there are less likely to break -TripleO deployments. - -Documentation Impact -==================== -We should update the TripleO specific docs in tripleo-incubator to document how -to use an alternate hypervisor for an Overcloud deployment. - -References -========== -Juno Design Summit etherpad: https://etherpad.openstack.org/p/juno-summit-tripleo-and-docker -nova-docker driver: https://git.openstack.org/cgit/stackforge/nova-docker -Docker: https://www.docker.io/ -Docker github: https://github.com/dotcloud/docker diff --git a/specs/juno/tripleo-juno-dracut-ramdisks.rst b/specs/juno/tripleo-juno-dracut-ramdisks.rst deleted file mode 100644 index 7c321d55..00000000 --- a/specs/juno/tripleo-juno-dracut-ramdisks.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================== -Dracut Deploy Ramdisks -====================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-dracut-ramdisks - -Our current deploy ramdisks include functionality that is duplicated from -existing tools such as Dracut, and do not include some features that those -tools do. Reimplementing our deploy ramdisks to use Dracut would shrink -our maintenance burden for that code and allow us to take advantage of those -additional features. - -Problem Description -=================== - -Currently our deploy ramdisks are implemented as a bash script that runs -as init during the deploy process. This means that we are responsible for -correctly configuring things such as udev and networking which would normally -be handled by distribution tools. While this isn't an immediate problem -because the implementation has already been done, it is an unnecessary -duplication and additional maintenance debt for the future as we need to add -or change such low-level functionality. - -In addition, because our ramdisk is a one-off, users will not be able to make -use of any ramdisk troubleshooting methods that they might currently know. -This is an unnecessary burden when there are tools to build ramdisks that are -standardized and well-understood by the people using our software. - -Proposed Change -=============== - -The issues discussed above can be dealt with by using a standard tool such as -Dracut to build our deploy ramdisks. This will actually result in a reduction -in code that we have to maintain and should be compatible with all of our -current ramdisks because we can continue to use the same method of building -the init script - it will just run as a user script instead of process 0, -allowing Dracut to do low-level configuration for us. - -Initially this will be implemented alongside the existing ramdisk element to -provide a fallback option if there are any use cases not covered by the -initial version of the Dracut ramdisk. - -Alternatives ------------- - -For consistency with the rest of Red Hat/Fedora's ramdisks I would prefer to -implement this using Dracut, but if there is a desire to also make use of -another method of building ramdisks, that could probably be implemented -alongside Dracut. The current purely script-based implementation could even -be kept in parallel with a Dracut version. However, I believe Dracut is -available on all of our supported platforms so I don't see an immediate need -for alternatives. - -Additionally, there is the option to replace our dynamically built init -script with Dracut modules for each deploy element. This is probably -unnecessary as it is perfectly fine to use the current method with Dracut, -and using modules would tightly couple our deploy ramdisks to Dracut, making -it difficult to use any alternatives in the future. - -Security Impact ---------------- - -The same security considerations that apply to the current deploy ramdisk -would continue to apply to Dracut-built ones. - -Other End User Impact ---------------------- - -This change would enable end users to make use of any Dracut knowledge they -might already have, including the ability to dynamically enable tracing -of the commands used to do the deployment (essentially set -x in bash). - -Performance Impact ------------------- - -Because Dracut supports more hardware and software configurations, it is -possible there will be some additional overhead during the boot process. -However, I would expect this to be negligible in comparison to the time it -takes to copy the image to the target system, so I see it as a reasonable -tradeoff. - -Other Deployer Impact ---------------------- - -As noted before, Dracut supports a wide range of hardware configurations, -so deployment methods that currently wouldn't work with our script-based -ramdisk would become available. For example, Dracut supports using network -disks as the root partition, so running a diskless node with separate -storage should be possible. - -Developer Impact ----------------- - -There would be some small changes to how developers would add a new dependency -to the ramdisk images. Instead of executables and their required libraries -being copied to the ramdisk manually, the executable can simply be added to -the list of things Dracut will include in the ramdisk. - -Developers would also gain the dynamic tracing ability mentioned above in -the end user impact. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bnemec - -Work Items ----------- - -* Convert the ramdisk element to use Dracut (see WIP change in References). - -* Verify that DHCP booting of ramdisks still works. - -* Verify that nova-baremetal ramdisks can be built successfully with Dracut. - -* Verify that Ironic ramdisks can be built successfully with Dracut. - -* Verify that Dracut can build Ironic-IPA ramdisks. - -* Verify the Dracut debug shell provides equivalent functionality to the - existing one. - -* Provide ability for other elements to install additional files to the - ramdisk. - -* Provide ability for other elements to include additional drivers. - -* Find a way to address potential 32-bit binaries being downloaded and run in - the ramdisk for firmware deployments. - -Dependencies -============ - -This would add a dependency on Dracut for building ramdisks. - -Testing -======= - -Since building deploy ramdisks is already part of CI, this should be covered -automatically. If it is implemented in parallel with another method, then -the CI jobs would need to be configured to exercise the different methods -available. - -Documentation Impact -==================== - -We would want to document the additional features available in Dracut. -Otherwise this should function in essentially the same way as the current -ramdisks, so any existing documentation will still be valid. - -Some minor developer documentation changes may be needed to address the -different ways Dracut handles adding extra kernel modules and files. - -References -========== - -* Dracut: https://dracut.wiki.kernel.org/index.php/Main_Page - -* PoC of building ramdisks with Dracut: - https://review.openstack.org/#/c/105275/ - -* openstack-dev discussion: - http://lists.openstack.org/pipermail/openstack-dev/2014-July/039356.html diff --git a/specs/juno/tripleo-juno-occ-localdatasource.rst b/specs/juno/tripleo-juno-occ-localdatasource.rst deleted file mode 100644 index efbeed35..00000000 --- a/specs/juno/tripleo-juno-occ-localdatasource.rst +++ /dev/null @@ -1,168 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=================================== -os-collect-config local data source -=================================== - - -https://blueprints.launchpad.net/tripleo-juno-occ-local-datasource - -os-collect-config needs a local data source collector for configuration data. -This will allow individual elements to drop files into a well-known location to -set the initial configuration data of an instance. - -There is already a heat_local collector, but that uses a single hard coded path -of /var/lib/heat-cfntools/cfn-init-data. - -Problem Description -=================== - -* Individual elements can not currently influence the configuration available - to os-apply-config for an instance without overwriting each other. -* Elements that rely on configuration values that must be set the same at both - image build time and instance run time currently have no way of propagating the - value used at build time to a run time value. -* Elements have no way to specify default values for configuration they may - need at runtime (outside of configuration file templates). - - -Proposed Change -=============== - -A new collector class will be added to os-collect-config that collects -configuration data from JSON files in a configurable list of directories with a -well known default of /var/lib/os-collect-config/local-data. - -The collector will return a list of pairs of JSON files and their content, -sorted by the JSON filename in traditional C collation. For example, if -/var/lib/os-collect-config/local-data contains bar.json and foo.json - - [ ('bar.json', bar_content), - ('foo.json', foo_content) ] - -This new collector will be configured first in DEFAULT_COLLECTORS in -os-collect-config. This means all later configured collectors will override any -shared configuration keys from the local datasource collector. - -Elements making use of this feature can install a json file into the -/var/lib/os-collect-config/local-data directory. The os-collect-config element -will be responsible for creating the /var/lib/os-collect-config/local-data -directory at build time and will create it with 0755 permissions. - -Alternatives ------------- - -OS_CONFIG_FILES -^^^^^^^^^^^^^^^ -There is already a mechanism in os-apply-config to specify arbitrary files to -look at for configuration data via setting the OS_CONFIG_FILES environment -variable. However, this is not ideal because each call to os-apply-config would -have to be prefaced with setting OS_CONFIG_FILES, or it would need to be set -globally in the environment (via an environment.d script for instance). As an -element developer, this is not clear. Having a robust and clear documented -location to drop in configuration data will be simpler. - -heat_local collector -^^^^^^^^^^^^^^^^^^^^ -There is already a collector that reads from local data, but it must be -configured to read explicit file paths. This does not scale well if several -elements want to each provide local configuration data, in that you'd have to -reconfigure os-collect-config itself. We could modify the heat_local collector -to read from directories instead, while maintaining backwards compatibility as -well, instead of writing a whole new collector. However, given that collectors -are pretty simple implementations, I'm proposing just writing a new one, so -that they remain generally single purpose with clear goals. - -Security Impact ---------------- - -* Harmful elements could drop bad configuration data into the well known - location. This is mitigated somewhat that as a deployer, you should know and - validate what elements you're using that may inject local configuration. - -* We should verify that the local data source files are not world writable and - are in a directory that is root owned. Checks to dib-lint could be added to - verify this at image build time. Checks could be added to os-collect-config - for instance run time. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -An additional collector will be running as part of os-collect-config, but its -execution time should be minimal. - -Other Deployer Impact ---------------------- - -* There will be an additional configuration option in os-collect-config to - configure the list of directories to look at for configuration data. This - will have a reasonable default and will not usually need to be changed. -* Deployers will have to consider what local data source configuration may be - influencing their current applied configuration. - -Developer Impact ----------------- - -We will need to make clear in documentation when to use this feature versus -what to expose in a template or specify via passthrough configuration. -Configuration needed at image build time where you need access to those values -at instance run time as well are good candidates for using this feature. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - james-slagle - -Work Items ----------- - -* write new collector for os-collect-config -* unit tests for new collector -* document new collector -* add checks to dib-lint to verify JSON files installed to the local data - source directory are not world writable -* add checks to os-collect-config to verify JSON files read by the local data - collector are not world writable and that their directory is root owned. - -Dependencies -============ - -* The configurable /mnt/state spec at: - https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-configurable-mnt-state - depends on this spec. - -Testing -======= - -Unit tests will be written for the new collector. The new collector will also -eventually be tested in CI because there will be an existing element that will -configure the persistent data directory to /mnt/state that will make use of -this implementation. - - -Documentation Impact -==================== - -The ability of elements to drop configuration data into a well known location -should be documented in tripleo-image-elements itself so folks can be made -better aware of the functionality. - -References -========== - -* https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-configurable-mnt-state -* https://review.openstack.org/#/c/94876 diff --git a/specs/juno/tripleo-juno-tuskar-rest-api.rst b/specs/juno/tripleo-juno-tuskar-rest-api.rst deleted file mode 100644 index 046d23f9..00000000 --- a/specs/juno/tripleo-juno-tuskar-rest-api.rst +++ /dev/null @@ -1,611 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -Tuskar Plan REST API Specification -================================== - -Blueprint: -https://blueprints.launchpad.net/tuskar/+spec/tripleo-juno-tuskar-plan-rest-api - -In Juno, the Tuskar API is moving towards a model of being a large scale -application planning service. Its initial usage will be to deploy OpenStack -on OpenStack by leveraging TripleO Heat Templates and fitting into the -greater TripleO workflow. - -As compared to Icehouse, Tuskar will no longer make calls to Heat for creating -and updating a stack. Instead, it will serve to define and manipulate the Heat -templates for describing a cloud. Tuskar will be the source for the cloud -planning while Heat is the source for the state of the live cloud. - -Tuskar employs the following concepts: - -* *Deployment Plan* - The description of an application (for example, - the overcloud) being planned by Tuskar. The deployment plan keeps track of - what roles will be present in the deployment and their configuration values. - In TripleO terms, each overcloud will have its own deployment plan that - describes what services will run and the configuration of those services - for that particular overcloud. For brevity, this is simply referred to as - the "plan" elsewhere in this spec. -* *Role* - A unit of functionality that can be added to a plan. A role - is the definition of what will run on a single server in the deployed Heat - stack. For example, an "all-in-one" role may contain all of the services - necessary to run an overcloud, while a "compute" role may provide only the - nova-compute service. - -Put another way, Tuskar is responsible for assembling -the user-selected roles and their configuration into a Heat environment and -making the built Heat templates and files available to the caller (the -Tuskar UI in TripleO but, more generally, any consumer of the REST API) to send -to Heat. - -Tuskar will ship with the TripleO Heat Templates installed to serve as its -roles (dependent on the conversions taking place this release [4]_). -For now it is assumed those templates are installed as part of the TripleO's -installation of Tuskar. A different spec will cover the API calls necessary -for users to upload and manipulate their own custom roles. - -This specification describes the REST API clients will interact with in -Tuskar, including the URLs, HTTP methods, request, and response data, for the -following workflow: - -* Create an empty plan in Tuskar. -* View the list of available roles. -* Add roles to the plan. -* Request, from Tuskar, the description of all of the configuration values - necessary for the entire plan. -* Save user-entered configuration values with the plan in Tuskar. -* Request, from Tuskar, the Heat templates for the plan, which includes - all of the files necessary to deploy the configured application in Heat. - -The list roles call is essential to this workflow and is therefore described -in this specification. Otherwise, this specification does not cover the API -calls around creating, updating, or deleting roles. It is assumed that the -installation process for Tuskar in TripleO will take the necessary steps to -install the TripleO Heat Templates into Tuskar. A specification will be filed -in the future to cover the role-related API calls. - - -Problem Description -=================== - -The REST API in Tuskar seeks to fulfill the following needs: - -* Flexible selection of an overcloud's functionality and deployment strategy. -* Repository for discovering what roles can be added to a cloud. -* Help the user to avoid having to manually manipulate Heat templates to - create the desired cloud setup. -* Storage of a cloud's configuration without making the changes immediately - live (future needs in this area may include offering a more structured - review and promotion lifecycle for changes). - - -Proposed Change -=============== - -**Overall Concepts** - -* These API calls will be added under the ``/v2/`` path, however the v1 API - will not be maintained (the model is being changed to not contact Heat and - the existing database is being removed [3]_). -* All calls have the potential to raise a 500 if something goes horribly wrong - in the server, but for brevity this is omitted from the list of possible - response codes in each call. -* All calls have the potential to raise a 401 in the event of a failed user - authentication and have been similarly omitted from each call's - documentation. - ----- - -.. _retrieve-single-plan: - -**Retrieve a Single Plan** - -URL: ``/plans//`` - -Method: ``GET`` - -Description: Returns the details of a specific plan, including its -list of assigned roles and configuration information. - -Notes: - -* The configuration values are read from Tuskar's stored files rather than - Heat itself. Heat is the source for the live stack, while Tuskar is the - source for the plan. - -Request Data: None - -Response Codes: - -* 200 - if the plan is found -* 404 - if there is no plan with the given UUID - -Response Data: - -JSON document containing the following: - -* Tuskar UUID for the given plan. -* Name of the plan that was created. -* Description of the plan that was created. -* The timestamp of the last time a change was made. -* List of the roles (identified by name and version) assigned to the plan. - For this sprint, there will be no pre-fetching of any more role information - beyond name and version, but can be added in the future while maintaining - backward compatibility. -* List of parameters that can be configured for the plan, including the - parameter name, label, description, hidden flag, and current value if - set. - -Response Example: - -.. code-block:: json - - { - "uuid" : "dd4ef003-c855-40ba-b5a6-3fe4176a069e", - "name" : "dev-cloud", - "description" : "Development testing cloud", - "last_modified" : "2014-05-28T21:11:09Z", - "roles" : [ - { - "uuid" : "55713e6a-79f5-42e1-aa32-f871b3a0cb64", - "name" : "compute", - "version" : "1", - "links" : { - "href" : "http://server/v2/roles/55713e6a-79f5-42e1-aa32-f871b3a0cb64/", - "rel" : "bookmark" - } - }, - { - "uuid" : "2ca53130-b9a4-4fa5-86b8-0177e8507803", - "name" : "controller", - "version" : "1", - "links" : { - "href" : "http://server/v2/roles/2ca53130-b9a4-4fa5-86b8-0177e8507803/", - "rel" : "bookmark" - } - } - ], - "parameters" : [ - {"name" : "database_host", - "label" : "Database Host", - "description" : "Hostname of the database server", - "hidden" : "false", - "value" : "10.11.12.13" - } - ], - "links" : [ - { - "href" : "http://server/v2/plans/dd4ef003-c855-40ba-b5a6-3fe4176a069e/", - "rel" : "self" - } - ] - } - ----- - -.. _retrieve-plan-template: - -**Retrieve a Plan's Template Files** - -URL: ``/plans//templates/`` - -Method: ``GET`` - -Description: Returns the set of files to send to Heat to create or update -the planned application. - -Notes: - -* The Tuskar service will build up the entire environment into a single - file suitable for sending to Heat. The contents of this file are returned - from this call. - -Request Data: None - -Response Codes: - -* 200 - if the plan's templates are found -* 404 - if no plan exists with the given ID - -Response Data: - ----- - -.. _list-plans: - -**List Plans** - -URL: ``/plans/`` - -Method: ``GET`` - -Description: Returns a list of all plans stored in Tuskar. In the future when -multi-tenancy is added, this will be scoped to a particular tenant. - -Notes: - -* The detailed information about a plan, including its roles and configuration - values, are not returned in this call. A follow up call is needed on the - specific plan. It may be necessary in the future to add a flag to pre-fetch - this information during this call. - -Request Data: None (future enhancement will require the tenant ID and -potentially support a pre-fetch flag for more detailed data) - -Response Codes: - -* 200 - if the list can be retrieved, even if the list is empty - -Response Data: - -JSON document containing a list of limited information about each plan. -An empty list is returned when no plans are present. - -Response Example: - -.. code-block:: json - - [ - { - "uuid" : "3e61b4b2-259b-4b91-8344-49d7d6d292b6", - "name" : "dev-cloud", - "description" : "Development testing cloud", - "links" : { - "href" : "http://server/v2/plans/3e61b4b2-259b-4b91-8344-49d7d6d292b6/", - "rel" : "bookmark" - } - }, - { - "uuid" : "135c7391-6c64-4f66-8fba-aa634a86a941", - "name" : "qe-cloud", - "description" : "QE testing cloud", - "links" : { - "href" : "http://server/v2/plans/135c7391-6c64-4f66-8fba-aa634a86a941/", - "rel" : "bookmark" - } - } - ] - - ----- - -.. _create-new-plan: - -**Create a New Plan** - -URL: ``/plans/`` - -Method: ``POST`` - -Description: Creates an entry in Tuskar's storage for the plan. The details -are outside of the scope of this spec, but the idea is that all of the -necessary Heat environment infrastructure files and directories will be -created and stored in Tuskar's storage solution [3]_. - -Notes: - -* Unlike in Icehouse, Tuskar will not make any calls into Heat during this - call. This call is to create a new (empty) plan in Tuskar that - can be manipulated, configured, saved, and retrieved in a format suitable - for sending to Heat. -* This is a synchronous call that completes when Tuskar has created the - necessary files for the newly created plan. -* As of this time, this call does not support a larger batch operation that - will add roles or set configuration values in a single call. From a REST - perspective, this is acceptable, but from a usability standpoint we may want - to add this support in the future. - -Request Data: - -JSON document containing the following: - -* Name - Name of the plan being created. Must be unique across all plans - in the same tenant. -* Description - Description of the plan to create. - -Request Example: - -.. code-block:: json - - { - "name" : "dev-cloud", - "description" : "Development testing cloud" - } - -Response Codes: - -* 201 - if the create is successful -* 409 - if there is an existing plan with the given name (for a particular - tenant when multi-tenancy is taken into account) - -Response Data: - -JSON document describing the created plan. -The details are the same as for the GET operation on an individual plan -(see :ref:`Retrieve a Single Plan `). - - ----- - -.. _delete-plan: - -**Delete an Existing Plan** - -URL: ``/plans//`` - -Method: ``DELETE`` - -Description: Deletes the plan's Heat templates and configuration values from -Tuskar's storage. - -Request Data: None - -Response Codes: - -* 200 - if deleting the plan entries from Tuskar's storage was successful -* 404 - if there is no plan with the given UUID - -Response Data: None - - ----- - -.. _add-plan-role: - -**Adding a Role to a Plan** - -URL: ``/plans//roles/`` - -Method: ``POST`` - -Description: Adds the specified role to the given plan. - -Notes: - -* This will cause the parameter consolidation to occur and entries added to - the plan's configuration parameters for the new role. -* This call will update the ``last_modified`` timestamp to indicate a change - has been made that will require an update to Heat to be made live. - -Request Data: - -JSON document containing the uuid of the role to add. - -Request Example: - -.. code-block:: json - - { - "uuid" : "role_uuid" - } - -Response Codes: - -* 201 - if the addition is successful -* 404 - if there is no plan with the given UUID -* 409 - if the plan already has the specified role - -Response Data: - -The same document describing the plan as from -:ref:`Retrieve a Single Plan `. The newly added -configuration parameters will be present in the result. - - ----- - -.. _remove-cloud-plan: - -**Removing a Role from a Plan** - -URL: ``/plans//roles//`` - -Method: ``DELETE`` - -Description: Removes a role identified by role_uuid from the given plan. - -Notes: - -* This will cause the parameter consolidation to occur and entries to be - removed from the plan's configuration parameters. -* This call will update the ``last_modified`` timestamp to indicate a change - has been made that will require an update to Heat to be made live. - -Request Data: None - -Response Codes: - -* 200 - if the removal is successful -* 404 - if there is no plan with the given UUID or it does not have the - specified role and version combination - -Response Data: - -The same document describing the cloud as from -:ref:`Retrieve a Single Plan `. The configuration -parameters will be updated to reflect the removed role. - - ----- - -.. _changing-plan-configuration: - -**Changing a Plan's Configuration Values** - -URL: ``/plans//`` - -Method: ``PATCH`` - -Description: Sets the values for one or more configuration parameters. - -Notes: - -* This call will update the ``last_modified`` timestamp to indicate a change - has been made that will require an update to Heat to be made live. - -Request Data: JSON document containing the parameter keys and values to set -for the plan. - -Request Example: - -.. code-block:: json - - [ - { - "name" : "database_host", - "value" : "10.11.12.13" - }, - { - "name" : "database_password", - "value" : "secret" - } - ] - -Response Codes: - -* 200 - if the update was successful -* 400 - if one or more of the new values fails validation -* 404 - if there is no plan with the given UUID - -Response Data: - -The same document describing the plan as from -:ref:`Retrieve a Single Plan `. - - ----- - -.. _list-roles: - -**Retrieving Possible Roles** - -URL: ``/roles/`` - -Method: ``GET`` - -Description: Returns a list of all roles available in Tuskar. - -Notes: - -* There will be a separate entry for each version of a particular role. - -Request Data: None - -Response Codes: - -* 200 - containing the available roles - -Response Data: A list of roles, where each role contains: - -* Name -* Version -* Description - -Response Example: - -.. code-block:: json - - [ - { - "uuid" : "3d46e510-6a63-4ed1-abd0-9306a451f8b4", - "name" : "compute", - "version" : "1", - "description" : "Nova Compute" - }, - { - "uuid" : "71d6c754-c89c-4293-9d7b-c4dcc57229f0", - "name" : "compute", - "version" : "2", - "description" : "Nova Compute" - }, - { - "uuid" : "651c26f6-63e2-4e76-9b60-614b51249677", - "name" : "controller", - "version" : "1", - "description" : "Controller Services" - } - ] - - -Alternatives ------------- - -There are currently no alternate schemas proposed for the REST APIs. - -Security Impact ---------------- - -These changes should have no additional security impact. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -The potential performance issues revolve around Tuskar's solution for storing -the cloud files [3]_. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -After being merged, there will be a period where the Tuskar CLI is out of date -with the new calls. The Tuskar UI will also need to be updated for the changes -in flow. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - jdob - -Work Items ----------- - -* Implement plan CRUD APIs -* Implement role retrieval API -* Write REST API documentation - - -Dependencies -============ - -These API changes are dependent on the rest of the Tuskar backend being -implemented, including the changes to storage and the template consolidation. - -Additionally, the assembly of roles (provider resources) into a Heat -environment is contingent on the conversion of the TripleO Heat templates [4]_. - - -Testing -======= - -Tempest testing should be added as part of the API creation. - - -Documentation Impact -==================== - -The REST API documentation will need to be updated accordingly. - - -References -========== - -.. [3] https://review.openstack.org/#/c/97553/ -.. [4] https://review.openstack.org/#/c/97939/ diff --git a/specs/juno/tripleo-juno-tuskar-template-storage.rst b/specs/juno/tripleo-juno-tuskar-template-storage.rst deleted file mode 100644 index d7086873..00000000 --- a/specs/juno/tripleo-juno-tuskar-template-storage.rst +++ /dev/null @@ -1,552 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================ -TripleO Template and Deployment Plan Storage -============================================ - -This design specification describes a storage solution for a deployment plan. -Deployment plans consist of a set of roles, which in turn define a master Heat -template that can be used by Heat to create a stack representing the deployment -plan; and an environment file that defines the parameters needed by the master -template. - -This specification is principally intended to be used by Tuskar. - -https://blueprints.launchpad.net/tuskar/+spec/tripleo-juno-tuskar-template-storage - -.. _tripleo_juno_tuskar_template_storage_problem: - -Problem Description -=================== - -.. note:: The terminology used in this specification is defined in the `Tuskar - REST API`_ specification. - -.. _Tuskar REST API: https://blueprints.launchpad.net/tuskar/+spec/tripleo-juno-tuskar-plan-rest-api - -In order to accomplish the goal of this specification, we need to first define -storage domain models for roles, deployment plans, and associated concepts. -These associated concepts include Heat templates and environment files. The -models must account for requirements such as versioning and the appropriate -relationships between objects. - -We also need to create a storage mechanism for these models. The storage -mechanism should be distinct from the domain model, allowing the latter to be -stable while the former retains enough flexibility to use a variety of backends -as need and availability dictates. Storage requirements for particular models -include items such as versioning and secure storage. - - -Proposed Change -=============== - -**Change Summary** - -The following proposed change is split into three sections: - -- Storage Domain Models: Defines the domain models for templates, environment - files, roles, and deployment plans. -- Storage API Interface: Defines Python APIs that relate the models to - the underlying storage drivers; is responsible for translating stored content - into a model object and vice versa. Each model requires its own storage - interface. -- Storage Drivers: Defines the API that storage backends need to implement in - order to be usable by the Python API Interface. Plans for initial and future - driver support are discussed here. - -It should be noted that each storage interface will be specified by the user as -part of the Tuskar setup. Thus, the domain model can assume that the appropriate -storage interfaces - a template store, an environment store, etc - are defined -globally and accessible for use. - - -**Storage Domain Models** - -The storage API requires the following domain models: - -- Template -- Environment File -- Role -- Deployment Plan - -The first two map directly to Heat concepts; the latter two are Tuskar concepts. - -Note that each model will also contain a save method. The save method will call -create on the store if the uuid isn't set, and will call update on the store -if the instance has a uuid. - - -**Template Model** - -The template model represents a Heat template. - -.. code-block:: python - - class Template: - uuid = UUID string - name = string - version = integer - description = string - content = string - created_at = datetime - - # This is derived from the content from within the template store. - parameters = dict of parameter names with their types and defaults - - -**Environment File Model** - -The environment file defines the parameters and resource registry for a Heat -stack. - -.. code-block:: python - - class EnvironmentFile: - uuid = UUID string - content = string - created_at = datetime - updated_at = datetime - - # These are derived from the content from within the environment file store. - resource_registry = list of provider resource template names - parameters = dict of parameter names and their values - - def add_provider_resource(self, template): - # Adds the specified template object to the environment file as a - # provider resource. This updates the parameters and resource registry - # in the content. The provider resource type will be derived from the - # template file name. - - def remove_provider_resource(self, template): - # Removes the provider resource that matches the template from the - # environment file. This updates the parameters and resource registry - # in the content. - - def set_parameters(self, params_dict): - # The key/value pairs in params_dict correspond to parameter names/ - # desired values. This method updates the parameters section in the - # content to the values specified in params_dict. - - -**Role Model** - -A role is a scalable unit of a cloud. A deployment plan specifies one or more -roles. Each role must specify a primary role template. It must also specify -the dependencies of that template. - -.. code-block:: python - - class Role: - uuid = UUID string - name = string - version = integer - description = string - role_template_uuid = Template UUID string - dependent_template_uuids = list of Template UUID strings - created_at = datetime - - def retrieve_role_template(self): - # Retrieves the Template with uuid matching role_template_uuid - - def retrieve_dependent_templates(self): - # Retrieves the list of Templates with uuids matching - # dependent_template_uuids - - -**Deployment Plan Model** - -The deployment plan defines the application to be deployed. It does so by -specifying a list of roles. Those roles are used to construct an environment -file that contains the parameters that are needed by the roles' templates and -the resource registry that register each role's primary template as a provider -resource. A master template is also constructed so that the plan can be -deployed as a single Heat stack. - -.. code-block:: python - - class DeploymentPlan: - uuid = UUID string - name = string - description = string - role_uuids = list of Role UUID strings - master_template_uuid = Template UUID string - environment_file_uuid = EnvironmentFile UUID string - created_at = datetime - updated_at = datetime - - def retrieve_roles(self): - # Retrieves the list of Roles with uuids matching role_uuids - - def retrieve_master_template(self): - # Retrieves the Template with uuid matching master_template_uuid - - def retrieve_environment_file(self): - # Retrieves the EnvironmentFile with uuid matching environment_file_uuid - - def add_role(self, role): - # Adds a Role to the plan. This operation will modify the master - # template and environment file through template munging operations - # specified in a separate spec. - - def remove_role(self, role): - # Removes a Role from the plan. This operation will modify the master - # template and environment file through template munging operations - # specified in a separate spec. - - def get_dependent_templates(self): - # Returns a list of dependent templates. This consists of the - # associated role templates. - - -**Storage API Interface** - -Each of the models defined above has their own Python storage interface. These -are manager classes that query and perform CRUD operations against the storage -drivers and return instances of the models for use (with the exception of delete -which returns ``None``). The storage interfaces bind the models to the driver -being used; this allows us to store each model in a different location. - -Note that each store also contains a serialize method and a deserialize method. -The serialize method takes the relevant object and returns a dictionary -containing all value attributes; the deserialize method does the reverse. - -The drivers are discussed in -:ref:`the next section`. - - -**Template API** - -.. code-block:: python - - class TemplateStore: - - def create(self, name, content, description=None): - # Creates a Template. If no template exists with a matching name, - # the template version is set to 0; otherwise it is set to the - # greatest existing version plus one. - - def retrieve(self, uuid): - # Retrieves the Template with the specified uuid. Queries a Heat - # template parser for template parameters and dependent template names. - - def retrieve_by_name(self, name, version=None): - # Retrieves the Template with the specified name and version. If no - # version is specified, retrieves the latest version of the Template. - - def delete(self, uuid): - # Deletes the Template with the specified uuid. - - def list(self, only_latest=False): - # Returns a list of all Templates. If only_latest is True, filters - # the list to the latest version of each Template name. - - -**Environment File API** - -The environment file requires secure storage to protect parameter values. - -.. code-block:: python - - class EnvironmentFileStore: - - def create(self): - # Creates an empty EnvironmentFile. - - def retrieve(self, uuid): - # Retrieves the EnvironmentFile with the specified uuid. - - def update(self, model): - # Updates an EnvironmentFile. - - def delete(self, uuid): - # Deletes the EnvironmentFile with the specified uuid. - - def list(self): - # Returns a list of all EnvironmentFiles. - - -**Role API** - -.. code-block:: python - - class RoleStore: - - def create(self, name, role_template, description=None): - version=None, template_uuid=None): - # Creates a Role. If no role exists with a matching name, the - # template version is set to 0; otherwise it is set to the greatest - # existing version plus one. - # - # Dependent templates are derived from the role_template. The - # create method will take all dependent template names from - # role_template, retrieve the latest version of each from the - # TemplateStore, and use those as the dependent template list. - # - # If a dependent template is missing from the TemplateStore, then - # an exception is raised. - - def retrieve(self, uuid): - # Retrieves the Role with the specified uuid. - - def retrieve_by_name(self, name, version=None): - # Retrieves the Role with the specified name and version. If no - # version is specified, retrieves the latest version of the Role. - - def update(self, model): - # Updates a Role. - - def delete(self, uuid): - # Deletes the Role with the specified uuid. - - def list(self, only_latest=False): - # Returns a list of all Roles. If only_latest is True, filters - # the list to the latest version of each Role. - - -**Deployment Plan API** - -.. code-block:: python - - class DeploymentPlanStore: - - def create(self, name, description=None): - # Creates a DeploymentPlan. Also creates an associated empty master - # Template and EnvironmentFile; these will be modified as Roles are - - def retrieve(self, uuid): - # Retrieves the DeploymentPlan with the specified uuid. - - def update(self, model): - # Updates a DeploymentPlan. - - def delete(self, uuid): - # Deletes the DeploymentPlan with the specified uuid. - - def list(self): - # Retrieves a list of all DeploymentPlans. - - -.. _tripleo_juno_tuskar_template_storage_drivers: - -**Storage Drivers** - -Storage drivers operate by storing object dictionaries. For storage solutions -such as Glance these dictionaries are stored as flat files. For a storage -solution such as a database, the dictionary is translated into a table row. It -is the responsibility of the driver to understand how it is storing the object -dictionaries. - -Each storage driver must provide the following methods. - -.. code-block:: python - - class Driver: - - def create(self, filename, object_dict): - # Stores the specified content under filename and returns the resulting - # uuid. - - def retrieve(self, uuid): - # Returns the object_dict matching the uuid. - - def update(self, uuid, object_dict): - # Updates the object_dict specified by the uuid. - - def delete(self, uuid): - # Deletes the content specified by the uuid. - - def list(self): - # Return a list of all content. - - -For Juno, we will aim to use a combination of a relational database and Heat. -Heat will be used for the secure storage of sensitive environment parameters. -Database tables will be used for everything else. The usage of Heat for secure -stores relies on `PATCH support`_ to be added the Heat API. This bug is -targeted for completion by Juno-2. - -.. _PATCH support: https://bugs.launchpad.net/heat/+bug/1224828 - -This is merely a short-term solution, as it is understood that there is some -reluctance in introducing an unneeded database dependency. In the long-term we -would like to replace the database with Glance once it is updated from an image -store to a more general artifact repository. However, this feature is currently -in development and cannot be relied on for use in the Juno cycle. The -architecture described in this specification should allow reasonable ease in -switching from one to the other. - - -.. _tripleo_juno_tuskar_template_storage_alternatives: - -Alternatives ------------- - -**Modeling Relationships within Heat Templates** - -The specification proposes modeling relationships such as a plan's associated -roles or a role's dependent templates as direct attributes of the object. -However, this information would appear to be available as part of a plan's -environment file or by traversing the role template's dependency graph. Why -not simply derive the relationships in that way? - -A role is a Tuskar abstraction. Within Heat, it corresponds to a template used -as a provider resource; however, a role has added requirements, such as the -versioning of itself and its dependent templates, or the ability to list out -available roles for selection within a plan. These are not requirements that -Heat intends to fulfill, and fulfilling them entirely within Heat feels like an -abuse of mechanics. - -From a practical point of view, modeling relationships within Heat templates -requires the in-place modification of Heat templates by Tuskar to deal with -versioning. For example, if version 1 of the compute role specifies -{{compute.yaml: 1}, {compute-config.yaml: 1}}, and version 2 of the role -specifies {{compute.yaml: 1}, {compute-config.yaml: 2}}, the only way to -allow both versions of the role to be used is to allow programmatic -modification of compute.yaml to point at the correct version of -compute-config.yaml. - - -**Swift as a Storage Backend** - -Swift was considered as an option to replace the relational database but was -ultimately discounted for two key reasons: - -- The versioning system in Swift doesn't provide a static reference to the - current version of an object. Rather it has the version "latest" and this is - dynamic and changes when a new version is added, therefore there is no way to - stick a deployment to a version. -- We need to create a relationship between the provider resources within a Role - and swift doesn't support relationships between stored objects. - -Having said that, after seeking guidance from the Swift team, it has been -suggested that a naming convention or work with different containers may -provide us with enough control to mimic a versioning system that meets our -requirements. These suggestions have made Swift more favourable as an option. - - -**File System as a Storage Backend** - -The filesystem was briefly considered and may be included to provide a simpler -developer setup. However, to create a production ready system with versioning, -and relationships this would require re-implementing much of what other -databases and services provide for us. Therefore, this option is reserved only -for a development option which will be missing key features. - - -**Secure Driver Alternatives** - -Barbican, the OpenStack secure storage service, provides us with an alternative -if PATCH support isn't added to Heat in time. - -Currently the only alternative other than Barbican is to implement our own -cryptography with one of the other options listed above. This isn't a -favourable choice as it adds a technical complexity and risk that should be -beyond the scope of this proposal. - -The other option with regards to sensitive data is to not store any. This would -require the REST API caller to provide the sensitive information each time a -Heat create (and potentially update) is called. - - -Security Impact ---------------- - -Some of the configuration values, such as service passwords, will be sensitive. -For this reason, Heat or Barbican will be used to store all configuration -values. - -While access will be controlled by the Tuskar API large files could be provided -in the place of provider resource files or configuration files. These should be -verified against a reasonable limit. - - -Other End User Impact ---------------------- - -The template storage will be primarily used by the Tuskar API, but as it may be -used directly in the future it will need to be documented. - - -Performance Impact ------------------- - -Storing the templates in Glance and Barbican will lead to API calls over the -local network rather than direct database access. These are likely to have -higher overhead. However, the read and writing used in Tuskar is expected to be -infrequent and will only trigger simple reads and writes when manipulating a -deployment plan. - - -Other Deployer Impact ---------------------- - -None - - -Developer Impact ----------------- - -TripleO will have access to sensitive and insensitive storage through the -storage API. - - -Implementation -============== - - -Assignee(s) ------------ - -Primary assignee: - d0ugal - -Other contributors: - tzumainn - - -Work Items ----------- - -- Implement storage API -- Create Glance and Barbican based storage driver -- Create database storage driver - - -Dependencies -============ - -- Glance -- Barbican - - -Testing -======= - -- The API logic will be verified with a suite of unit tests that mock the - external services. -- Tempest will be used for integration testing. - - -Documentation Impact -==================== - -The code should be documented with docstrings and comments. If it is used -outside of Tuskar further user documentation should be developed. - - -References -========== - -- https://blueprints.launchpad.net/glance/+spec/artifact-repository-api -- https://blueprints.launchpad.net/glance/+spec/metadata-artifact-repository -- https://bugs.launchpad.net/heat/+bug/1224828 -- https://docs.google.com/document/d/1tOTsIytVWtXGUaT2Ia4V5PWq4CiTfZPDn6rpRm5In7U -- https://etherpad.openstack.org/p/juno-hot-artifacts-repository-finalize-design -- https://etherpad.openstack.org/p/juno-summit-tripleo-tuskar-planning -- https://wiki.openstack.org/wiki/Barbican -- https://wiki.openstack.org/wiki/TripleO/TuskarJunoPlanning -- https://wiki.openstack.org/wiki/TripleO/TuskarJunoPlanning/TemplateBackend diff --git a/specs/juno/tripleo-on-openstack.rst b/specs/juno/tripleo-on-openstack.rst deleted file mode 100644 index b715efd7..00000000 --- a/specs/juno/tripleo-on-openstack.rst +++ /dev/null @@ -1,246 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -QuintupleO - TripleO on OpenStack -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-on-openstack - -This is intended as a new way to do a TripleO deployment in a virtualized -environment. Rather than provisioning the target virtual machines directly -via virsh, we would be able to use the standard OpenStack apis to create and -manage the instances. This should make virtual TripleO environments more -scalable and easier to manage. - -Ultimately the goal would be to make it possible to do virtual TripleO -deployments on any OpenStack cloud, except where necessary features have -explicitly been disabled. We would like to have the needed features -available on the public clouds used for OpenStack CI, so existing providers -are invited to review this specification. - -Problem Description -=================== - -TripleO development and testing requires a lot of hardware resources, and -this is only going to increase as things like HA are enabled by default. -In addition, we are going to want to be able to test larger deployments than -will fit on a single physical machine. While it would be possible to set -this up manually, OpenStack already provides services capable of managing -a large number of physical hosts and virtual machines, so it doesn't make -sense to reinvent the wheel. - -Proposed Change -=============== - -* Write a virtual power driver for OpenStack instances. I already have a - rough version for nova-baremetal, but it needs a fair amount of cleaning up - before it could be merged into the main codebase. We will also need to - work with the Ironic team to enable this functionality there. - -* Determine whether changes are needed in Neutron to allow us to run our own - DHCP server, and if so work with the Neutron team to make those changes. - This will probably require allowing an instance to be booted without any - ip assigned. If not, booting an instance without an IP would be a good - future enhancement to avoid wasting IP quota. - -* Likewise, determine how to use virtual ips with keepalived/corosync+pacemaker - in Neutron, and if changes to Neutron are needed work with their team to - enable that functionality. - -* Enable PXE booting in Nova. There is already a bug open to track this - feature request, but it seems to have been abandoned. See the link in the - References section of this document. Ideally this should be enabled on a - per-instance basis so it doesn't require a specialized compute node, which - would not allow us to run on a standard public cloud. - -* For performance and feature parity with the current virtual devtest - environment, we will want to be allow the use of unsafe caching for the - virtual baremetal instances. - -* Once all of the OpenStack services support this use case we will want to - convert our CI environment to a standard OpenStack KVM cloud, as well as - deprecate the existing method of running TripleO virtually and enable - devtest to install and configure a local OpenStack installation (possibly - using devstack) on which to run. - -* Depending on the state of our container support at that time, we may want - to run the devtest OpenStack using containers to avoid taking over the host - system the way devstack normally does. This may call for its own spec when - we reach that point. - -Alternatives ------------- - -* There's no real alternative to writing a virtual power driver. We have to - be able to manage OpenStack instances as baremetal nodes for this to work. - -* Creating a flat Neutron network connected to a local bridge can address the - issues with Neutron not allowing DHCP traffic, but that only works if you - have access to create the local bridge and configure the new network. This - may not be true in many (all?) public cloud providers. - -* I have not done any work with virtual IP addresses in Neutron yet, so it's - unclear to me whether any alternatives exist for that. - -* As noted earlier, using an iPXE image can allow PXE booting of Nova - instances. However, because that image is overwritten during the deploy, - it is not possible to PXE boot the instance afterward. Making the TripleO - images bootable on their own might be an option, but it would diverge from - how a real baremetal environment would work and thus is probably not - desirable. - -Deploy overcloud without PXE boot -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Since a number of the complications around doing TripleO development on an -OpenStack cloud relate to PXE booting the instances, one option that could -be useful in some situations is the ability to deploy images directly. Since -we're using Heat for deployments, it should be possible to build the TripleO -images with the ``vm`` element and deploy them as regular instances instead of -fake baremetal ones. - -This has the drawback of not exercising as much of the TripleO baremetal -functionality as a full virtual PXE boot process, but it should be easier to -implement, and for some development work not related to the deploy process -would be sufficient for verifying that a feature works as intended. It might -serve as a good intermediate step while we work to enable full PXE boot -functionality in OpenStack clouds. - -It would also prevent exercising HA functionality because we would likely not -be able to use virtual IP addresses if we can't use DHCP/PXE to manage our -own networking environment. - -Security Impact ---------------- - -* The virtual power driver is going to need access to OpenStack - credentials so it can control the instances. - -* The Neutron changes to allow private networks to behave as flat networks - may have security impacts, though I'm not exactly sure what they would be. - The same applies to virtual IP support. - -* PXE booting instances could in theory allow an attacker to override the - DHCP server and boot arbitrary images, but in order to do that they would - already need to have access to the private network being used, so I don't - consider this a significant new threat. - -Other End User Impact ---------------------- - -End users doing proof of concepts using a virtual deployment environment -would need to be switched to this new method, but that should be largely -taken care of by the necessary changes to devtest since that's what would -be used for such a deployment. - -Performance Impact ------------------- - -In my testing, my OpenStack virtual power driver was significantly slower -than the existing virsh-based one, but I believe with a better implementation -that could be easily solved. - -When running TripleO on a public cloud, a developer would be subject to the -usual limitations of shared hardware - a given resource may be oversubscribed -and cause performance issues for the processing or disk-heavy operations done -by a TripleO deployment. - -Other Deployer Impact ---------------------- - -This is not intended to be visible to regular deployers, but it should -make our CI environment more flexible by allowing more dynamic allocation -of resources. - -Developer Impact ----------------- - -If this becomes the primary method of doing TripleO development, devtest would -need to be altered to either point at an existing OpenStack environment or -to configure a local one itself. This will have an impact on how developers -debug problems with their environment, but since they would be debugging -OpenStack in that case it should be beneficial in the long run. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bnemec - -Other contributors: - jang - -Work Items ----------- -* Implement an Ironic OpenStack virtual power driver. - -* Implement a nova-baremetal OpenStack virtual power driver, probably out - of tree based on the feedback we're getting from Nova and Ironic. - -* Enable PXE booting of Nova instances. - -* Enable unsafe caching to be enabled on Nova instances. - -* Allow DHCP/PXE traffic on private networks in Neutron. - -* If not already covered by the previous point, allow booting of instances - without IP addresses. - -* Migrate CI to use an OpenStack cloud for its virtual baremetal instances. - -* Migrate devtest to install and configure an OpenStack cloud instead of - managing instances and networking manually. - -* To simplify the VM provisioning process, we should make it possible to - provision but not boot a Nova VM. - - -Dependencies -============ - -The Ironic, Neutron, and Nova changes in the Work Items section will all have -to be done before TripleO can fully adopt this feature. - - -Testing -======= - -* All changes in the other projects will be unit and functional tested as - would any other new feature. - -* We cannot test this functionality by running devstack to provision an - OpenStack cloud in a gate VM, such as would be done for Tempest, because - the performance of the nested qemu virtual machines would make the process - prohibitively slow. We will need to have a baremetal OpenStack deployment - that can be targeted by the tests. A similar problem exists today with - virsh instances, however, and it can probably be solved in a similar - fashion with dedicated CI environments. - -* We will need to have Tempest tests gating on all the projects we use to - exercise the functionality we depend on. This should be largely covered - by the functional tests for the first point, but it's possible we will find - TripleO-specific scenarios that need to be added as well. - - -Documentation Impact -==================== - -devtest will need to be updated to reflect the new setup steps needed to run -it against an OpenStack-based environment. - - -References -========== - -This is largely based on the discussion Devtest on OpenStack in -https://etherpad.openstack.org/p/devtest-env-reqs - -Nova bug requesting PXE booting support: -https://bugs.launchpad.net/nova/+bug/1183885 diff --git a/specs/juno/unit-testing.rst b/specs/juno/unit-testing.rst deleted file mode 100644 index f3022de2..00000000 --- a/specs/juno/unit-testing.rst +++ /dev/null @@ -1,187 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Unit Testing TripleO Projects -========================================== - -https://blueprints.launchpad.net/tripleo/unit-testing - -We should enable more unit testing in TripleO projects to allow better test -coverage of code paths not included in CI, make it easier for reviewers -to verify that a code change does what it is supposed to, and avoid wasting -reviewer and developer time resolving style issues. - -Problem Description -=================== - -Right now there is very little unit testing of the code in most of the TripleO -projects. This has a few negative effects: - -- We have no test coverage of any code that isn't included in our CI runs. - -- For the code that is included in CI runs, we don't actually know how much - of that code is being tested. There may be many code branches that are not - used during a CI run. - -- We have no way to test code changes in isolation, which makes it slower to - iterate on them. - -- Changes not covered by CI are either not tested at all or must be manually - tested by reviewers, which is tedious and error-prone. - -- Major refactorings frequently break less commonly used interfaces to tools - because those interfaces are not tested. - -Additionally, because there are few/no hacking-style checks in the TripleO -projects, many patches get -1'd for style issues that could be caught by -an automated tool. This causes unnecessary delay in merging changes. - -Proposed Change -=============== - -I would like to build out a unit testing framework that simplifies the -process of unit testing in TripleO. Once that is done, we should start -requiring unit tests for new and changed features like the other OpenStack -projects do. At that point we can also begin adding test coverage for -existing code. - -The current plan is to make use of Python unit testing libraries to be as -consistent as possible with the rest of OpenStack and make use of the test -infrastructure that already exists. This will reduce the amount of new code -required and make it easier for developers to begin writing unit tests. - -For style checking, the dib-lint tool has already been created to catch -common errors in image elements. More rules should be added to it as we -find problems that can be automatically found. It should also be applied -to the tripleo-image-elements project. - -The bashate project also provides some general style checks that would be -useful in TripleO, so we should begin making use of it as well. We should -also contribute additional checks when possible and provide feedback on any -checks we disagree with. - -Any unit tests added should be able to run in parallel. This both speeds up -testing and helps find race bugs. - -Alternatives ------------- - -Shell unit testing -^^^^^^^^^^^^^^^^^^ -Because of the quantity of bash code used in TripleO, we may want to -investigate using a shell unit test framework in addition to Python. I -think this can be revisited once we are further along in the process and -have a better understanding of how difficult it will be to unit test our -scripts with Python. I still think we should start with Python for the -reasons above and only add other options if we find something that Python -unit tests can't satisfy. - -One possible benefit of a shell-specific unit testing framework is that it -could provide test coverage stats so we know exactly what code is and isn't -being tested. - -If we determine that a shell unit test framework is needed, we should try -to choose a widely-used one with well-understood workflows to ease adoption. - -Sandboxing -^^^^^^^^^^ -I have done some initial experimentation with using fakeroot/fakechroot to -sandbox scripts that expect to have access to the root filesystem. I was -able to run a script that writes to root-owned files as a regular user, making -it think it was writing to the real files, but I haven't gotten this working -with tox for running unit tests that way. - -Another option would be to use real chroots. This would provide isolation -and is probably more common than fakeroots. The drawback would be that -chrooting requires root access on the host machine, so running the unit tests -would as well. - -Security Impact ---------------- - -Many scripts in elements assume they will be running as root. We obviously -don't want to do that in unit tests, so we need a way to sandbox those scripts -to allow them to run but not affect the test system's root filesystem. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -Adding more tests will increase the amount of time Jenkins gate jobs take. -This should have minimal real impact though, because unit tests should run -in significantly less time than the integration tests. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Developers will need to implement unit tests for their code changes, which -will require learning the unit testing tools we adopt. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bnemec - -goneri has begun some work to enable dib-lint in tripleo-image-elements - -Work Items ----------- - -* Provide and document a good Python framework for testing the behavior of - bash scripts. Use existing functionality in upstream projects where - possible, and contribute new features when necessary. - -* Gate tripleo-image-elements on dib-lint, which will require fixing any - lint failures currently in tripleo-image-elements. - -* Enable bashate in the projects with a lot of bash scripts. - -* Add unit-testing to tripleo-incubator to enable verification of things - like ``devtest.sh --build-only``. - -* Add a template validation test job to triple-heat-templates. - -Dependencies -============ - -* bashate will be a new test dependency. - -Testing -======= - -These changes should leverage the existing test infrastructure as much as -possible, so the only thing needed to enable the new tests would be changes -to the infra config for the affected projects. - -Documentation Impact -==================== - -None of this work should be user-visible, but we may need developer -documentation to help with writing unit tests. - - -References -========== - -bashate: http://git.openstack.org/cgit/openstack-dev/bashate/ - -There are some notes related to this spec at the bottom of the Summit -etherpad: https://etherpad.openstack.org/p/juno-summit-tripleo-ci diff --git a/specs/juno/virtual-public-ips.rst b/specs/juno/virtual-public-ips.rst deleted file mode 100644 index 84021175..00000000 --- a/specs/juno/virtual-public-ips.rst +++ /dev/null @@ -1,159 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================ -Virtual IPs for public addresses -================================ - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+specs/tripleo-juno-virtual-public-ips - -The current public IP feature is intended to specify the endpoint that a cloud -can be reached at. This is typically something where HA is highly desirable. - -Making the public IP be a virtual IP instead of locally bound to a single -machine should increase the availability of the clustered service, once we -increase the control plane scale to more than one machine. - -Problem Description -=================== - -Today, we run all OpenStack services with listening ports on one virtual IP. - -This means that we're exposing RabbitMQ, MySQL and possibly other cluster-only -services to the world, when really what we want is public services exposed to -the world and cluster only servers not exposed to the world. Deployers are -(rightfully) not exposing our all-services VIP to the world, which leads to -them having to choose between a) no support for externally visible endpoints, -b) all services attackable or c) manually tracking the involved ports and -playing a catch-up game as we evolve things. - -Proposed Change -=============== - -Create a second virtual IP from a user supplied network. Bind additional copies -of API endpoints that should be publically accessible to that virtual IP. We -need to keep presenting them internally as well (still via haproxy and the -control virtual IP) so that servers without any public connectivity such as -hypervisors can still use the APIs (though they may need to override the IP to -use in their hosts files - we have facilities for that already). - -The second virtual IP could in principle be on a dedicated ethernet card, or -on a VLAN on a shared card. For now, lets require the admin to specify the -interface on which keepalived should be provisioning the shared IP - be that -``br-ctlplane``, ``vlan25`` or ``eth2``. Because the network topology may be -independent, the keepalive quorum checks need to take place on the specified -interface even though this costs external IP addresses. - -The user must be able to specify the same undercloud network as they do today -so that small installs are not made impossible - requiring two distinct -networks is likely hard for small organisations. Using the same network would -not imply using the same IP address - a dedicated IP address will still be -useful to permit better testing confidence and also allows for simple exterior -firewalling of the cluster. - -Alternatives ------------- - -We could not do HA for the public endpoints - not really an option. - -We could not do public endpoints and instead document how to provide border -gateway firewalling and NAT through to the endpoints. This just shifts the -problem onto infrastructure we are not deploying, making it harder to deploy. - -Security Impact ---------------- - -Our security story improves by making this change, as we can potentially -start firewalling the intra-cluster virtual IP to only allow known nodes to -connect. Short of that, our security story has improved since we started -binding to specific ips only, as that made opening a new IP address not -actually expose core services (other than ssh) on it. - -Other End User Impact ---------------------- - -End users will need to be able to find out about the new virtual IP. That -should be straight forward via our existing mechanisms. - -Performance Impact ------------------- - -None anticipated. - -Other Deployer Impact ---------------------- - -Deployers will require an additional IP address either on their undercloud -ctlplane network (small installs) or on their public network (larger/production -installs). - -Developer Impact ----------------- - -None expected. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - lifeless (hahahaha) - -Other contributors: - None. - -Work Items ----------- - -* Generalise keepalived.conf to support multiple VRRP interfaces. - -* Add support for binding multiple IPs to the haproxy configuration. - -* Add logic to incubator and/or heat templates to request a second virtual IP. - -* Change heat templates to bind public services to the public virtual IP. - -* Possibly tweak setup-endpoints to cooperate, though the prior support - should be sufficient. - -These are out of scope for this, but necessary to use it - I intend to put -them in the discussion in Dan's network overhaul spec. - -* Add optional support to our heat templates to boot the machines with two - nics, not just one - so that we have an IP address for the public interface - when its a physical interface. We may find there are ordering / enumeration - issues in Nova/Ironic/Neutron to solve here. - -* Add optional support to our heat templates for statically allocating a port - from neutron and passing it into the control plane for when we're using - VLANs. - -Dependencies -============ - -None. - -Testing -======= - -This will be on by default, so our default CI path will exercise it. - -Additionally we'll be using it in the up coming VLAN test job which will -give us confidence it works when the networks are partitoned. - -Documentation Impact -==================== - -Add to the manual is the main thing. - -References -========== - -None diff --git a/specs/kilo/cinder_ha.rst b/specs/kilo/cinder_ha.rst deleted file mode 100644 index 71d33611..00000000 --- a/specs/kilo/cinder_ha.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========= -Cinder HA -========= - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-kilo-cinder-ha - -Ensure Cinder volumes remain available if one or multiple nodes running -Cinder services or hosting volumes go down. - - -Problem Description -=================== - -TripleO currently deploys Cinder without a shared storage, balancing requests -amongst the nodes. Should one of the nodes running `cinder-volume` fail, -requests for volumes hosted by that node will fail as well. In addition to that, -without a shared storage, should a disk of any of the `cinder-volume` nodes -fail, volumes hosted by that node would be lost forever. - - -Proposed Change -=============== - -Overview --------- - -We aim at introducing support for the configuration of Cinder's Ceph backend -driver and for the deployment of a Ceph storage for use with Cinder. - -Such a scenario will install `ceph-osd` on an arbitrary number of Ceph storage -nodes and `cinder-api`, `cinder-scheduler`, `cinder-volume` and `ceph-mon` on -the controller nodes, allowing users to scale out the Ceph storage nodes -independently from the controller nodes. - -To ensure HA of the volumes, these will be then hosted on the Ceph storage and -to achieve HA for the `cinder-volume` service, all Cinder nodes will use a -shared string as their `host` config setting so that will be able to operate -on the entire (and shared) set of volumes. - -Support for configuration of more drivers could be added later. - -Alternatives ------------- - -An alternative approach could be to deploy the `cinder-volume` services in an -active/standby configuration. This would allow us to support scenarios where the -storage is not shared amongst the Cinder nodes, one of which is for example -LVM over a shared Fiber Channel LUNs. Such a scenario would suffer from -downsides though, it won't permit to scale out and balance traffic over the -storage nodes as easily and may be prone to issues related to the iSCSI session -management on failover. - -A different scenario, based instead on the usage of LVM and DRBD combined, could -be imagined too. Yet this would suffer from downsides as well. The deployment -program would be put in charge of managing the replicas and probably required to -have some understanding of the replicas status as well. These are easily covered -by Ceph itself which takes care of more related problems indeed, like data -rebalancing, or replicas recreation. - -Security Impact ---------------- - -By introducing support for the deployment of the Ceph's tools, we will have to -secure the Ceph services. - -We will allow access to the data hosted by Ceph only to authorized hosts via -usage of `cephx` for authentication, distributing the `cephx` keyrings on the -relevant nodes. Controller nodes will be provisioned with the `ceph.mon` -keyring, with the `client.admin` keyring and the `client.cinder` keyring, -Compute nodes will be provisioned with the `client.cinder` secret in libvirt and -lastly the Ceph storage nodes will be provisioned with the `client.admin` -keyring. - -It is to be said that monitors should not be reachable from the public -network, despite being hosted on the Controllers. Also Cinder won't need -to get access to the monitors' keyring nor the `client.admin` keyring but -those will be hosted on same host as Controllers also run the Ceph monitor -service; Cinder config will not provide any knowledge about those though. - -Other End User Impact ---------------------- - -Cinder volumes as well as Cinder services will remain available despite failure -of one (or more depending on scaling setting) of the Controller nodes or Ceph -storage nodes. - -Performance Impact ------------------- - -The `cinder-api` services will remain balanced and the Controller nodes unloaded -of the LVM-file overhead and the iSCSI traffic so this topology should, as an -additional benefit, improve performances. - -Other Deployer Impact ---------------------- - -* Automated setup of Cinder HA will require the deployment of Ceph. - -* To take advantage of a pre-existing Ceph installation instead of deploying it - via TripleO, deployers will have to provide the input data needed to configure - Cinder's backend driver appropriately - -* It will be possible to scale the number of Ceph storage nodes at any time, as - well as the number of Controllers (running `cinder-volume`) but changing the - backend driver won't be supported as there are no plans to support volumes - migration. - -* Not all Cinder drivers support the scenario where multiple instances of the - `cinder-volume` service use a shared `host` string, notably the default LVM - driver does not. We will use this setting only when appropriate config params - are found in the Heat template, as it happens today with the param called - `include_nfs_backend`. - -* Ceph storage nodes, running the `ceph-osd` service, use the network to - maintain replicas' consistency and as such may transfer some large amount of - data over the network. Ceph allows for the OSD service to differentiate - between a public network and a cluster network for this purpose. This spec - is not going to introduce support for usage of a dedicated cluster network - but we want to have a follow-up spec to implement support for that later. - -Developer Impact ----------------- - -Cinder will continue to be configured with the LVM backend driver by default. - -Developers interested in testing Cinder with the Ceph shared storage will have -to use an appropriate scaling setting for the Ceph storage nodes. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - gfidente - -Other contributors: - jprovazn - -Work Items ----------- - -* add support for deployment of Cinder's Ceph backend driver - -* add support for deployment of the Ceph services - -* add support for external configuration of Cinder's Ceph backend driver - - -Dependencies -============ - -None. - - -Testing -======= - -Will be testable in CI when support for the deployment of the shared Ceph -storage nodes becomes available in TripleO itself. - - -Documentation Impact -==================== - -We will need to provide documentation on how users can deploy Cinder together -with the Ceph storage nodes and also on how users can use instead some -pre-existing Ceph deployment. - - -References -========== - -juno mid-cycle meetup -kilo design session, https://etherpad.openstack.org/p/tripleo-kilo-l3-and-cinder-ha diff --git a/specs/kilo/remove-mergepy.rst b/specs/kilo/remove-mergepy.rst deleted file mode 100644 index 9313fe5a..00000000 --- a/specs/kilo/remove-mergepy.rst +++ /dev/null @@ -1,486 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================================== -Remove merge.py from TripleO Heat Templates -=========================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-juno-remove-mergepy - -``merge.py`` is where we've historically accumulated the technical debt for our -Heat templates [0]_ with the intention of migrating away from it when Heat meets -our templating needs. - -Its main functionality includes combining smaller template snippets into a -single template describing the full TripleO deployment, merging certain -resources together to reduce duplication while keeping the snippets themselves -functional as standalone templates and a support for manual scaling of Heat -resources. - -This spec describes the changes necessary to move towards templates -that do not depend on ``merge.py``. We will use native Heat features -where we can and document the rest, possibly driving new additions to -the Heat template format. - -It is largely based on the April 2014 discussion in openstack-dev [1]_. - - -Problem Description -=================== - -Because of the mostly undocumented nature of ``merge.py`` our templates are -difficult to understand or modify by newcomers (even those already familiar with -Heat). - -It has always been considered a short-term measure and Heat can now provide most -of what we need in our templates. - - -Proposed Change -=============== - -We will start with making small correctness-preserving changes to our -templates and ``merge.py`` that move us onto using more Heat native -features. Where we cannot make the change for some reason, we will -file a bug with Heat and work with them to unblock the process. - -Once we get to a point where we have to do large changes to the -structure of our templates, we will split them off to new files and -enable them in our CI as parallel implementations. - -Once we are confident that the new templates fulfill the same -requirements as the original ones, we will deprecate the old ones, -deprecate ``merge.py`` and switch to the new ones as the default. - -The list of action items necessary for the full transition is -below. - -**1. Remove the custom resource types** - -TripleO Heat templates and ``merge.py`` carry two custom types that (after the -move to software config [8]_, [9]_) are no longer used for anything: - -* OpenStack::ImageBuilder::Elements -* OpenStack::Role - -We will drop them from the templates and deprecate in the merge tool. - - -**2. Remove combining whitelisted resource types** - -If we have two ``AWS::AutoScaling::LaunchConfiguration`` resources with the same -name, ``merge.py`` will combine their ``Properties`` and ``Metadata``. Our -templates are no longer using this after the software-config update. - - -**3. Port TripleO Heat templates to HOT** - -With most of the non-Heat syntax out of the way, porting our CFN/YAML templates -to pure HOT format [2]_ should be straightforward. - -We will have to update ``merge.py`` as well. We should be able to support both -the old format and HOT. - -We should be able to differentiate between the two by looking for the -``heat_template_version`` top-level section which is mandatory in the HOT -syntax. - -Most of the changes to ``merge.py`` should be around spelling (``Parameters`` -> -``parameters``, ``Resources`` -> ``resources``) and different names for -intrinsic functions, etc. (``Fn::GetAtt`` -> ``get_attr``). - -This task will require syntactic changes to all of our templates and -unfortunately, it isn't something different people can update bit by bit. We -should be able to update the undercloud and overcloud portions separately, but -we can't e.g. just update a part of the overcloud. We are still putting -templates together with ``merge.py`` at this point and we would end up with a -template that has both CFN and HOT bits. - - -**4. Move to Provider resources** - -Heat allows passing-in multiple templates when deploying a stack. These -templates can map to custom resource types. Each template would represent a role -(compute server, controller, block storage, etc.) and its ``parameters`` and -``outputs`` would map to the custom resource's ``properties`` and -``attributes``. - -These roles will be referenced from a master template (``overcloud.yaml``, -``undercloud.yaml``) and eventually wrapped in a scaling resource -(``OS::Heat::ResourceGroup`` [5]_) or whatever scaling mechanism we adopt. - -.. note:: Provider resources represent fully functional standalone templates. - Any provider resource template can be passed to Heat and turned into a - stack or treated as a custom resource in a larger deployment. - -Here's a hypothetical outline of ``compute.yaml``:: - - parameters: - flavor: - type: string - image: - type: string - amqp_host: - type: string - nova_compute_driver: - type: string - - resources: - compute_instance: - type: OS::Nova::Server - properties: - flavor: {get_param: flavor} - image: {get_param: image} - - compute_deployment: - type: OS::Heat::StructuredDeployment - properties: - server: {ref: compute_instance} - config: {ref: compute_config} - input_values: - amqp_host: {get_param: amqp_host} - nova_compute_driver: {get_param: nova_compute_driver} - - compute_config: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - amqp: - host: {get_input: amqp_host} - nova: - compute_driver: {get_input: nova_compute_driver} - ... - -We will use a similar structure for all the other roles (``controller.yaml``, -``block-storage.yaml``, ``swift-storage.yaml``, etc.). That is, each role will -contain the ``OS::Nova::Server``, the associated deployments and any other -resources required (random string generators, security groups, ports, floating -IPs, etc.). - -We can map the roles to custom types using Heat environments [4]_. - -``role_map.yaml``: :: - - resource_registry: - OS::TripleO::Compute: compute.yaml - OS::TripleO::Controller: controller.yaml - OS::TripleO::BlockStorage: block-storage.yaml - OS::TripleO::SwiftStorage: swift-storage.yaml - - -Lastly, we'll have a master template that puts it all together. - -``overcloud.yaml``:: - - parameters: - compute_flavor: - type: string - compute_image: - type: string - compute_amqp_host: - type: string - compute_driver: - type: string - ... - - resources: - compute0: - # defined in controller.yaml, type mapping in role_map.yaml - type: OS::TripleO::Compute - parameters: - flavor: {get_param: compute_flavor} - image: {get_param: compute_image} - amqp_host: {get_param: compute_amqp_host} - nova_compute_driver: {get_param: compute_driver} - - controller0: - # defined in controller.yaml, type mapping in role_map.yaml - type: OS::TripleO::Controller - parameters: - flavor: {get_param: controller_flavor} - image: {get_param: controller_image} - ... - - outputs: - keystone_url: - description: URL for the Overcloud Keystone service - # `keystone_url` is an output defined in the `controller.yaml` template. - # We're referencing it here to expose it to the Heat user. - value: { get_attr: [controller_0, keystone_url] } - -and similarly for ``undercloud.yaml``. - -.. note:: The individual roles (``compute.yaml``, ``controller.yaml``) are - structured in such a way that they can be launched as standalone - stacks (i.e. in order to test the compute instance, one can type - ``heat stack-create -f compute.yaml -P ...``). Indeed, Heat treats - provider resources as nested stacks internally. - - -**5. Remove FileInclude from ``merge.py``** - -The goal of ``FileInclude`` was to keep individual Roles (to borrow a -loaded term from TripleO UI) viable as templates that can be launched -standalone. The canonical example is ``nova-compute-instance.yaml`` [3]_. - -With the migration to provider resources, ``FileInclude`` is not necessary. - - -**6. Move the templates to Heat-native scaling** - -Scaling of resources is currently handled by ``merge.py``. The ``--scale`` -command line argument takes a resource name and duplicates it as needed (it's -a bit more complicated than that, but that's beside the point). - -Heat has a native scaling ``OS::Heat::ResourceGroup`` [5]_ resource that does -essentially the same thing:: - - scaled_compute: - type: OS::Heat::ResourceGroup - properties: - count: 42 - resource_def: - type: OS::TripleO::Compute - parameters: - flavor: baremetal - image: compute-image-rhel7 - ... - -This will create 42 instances of compute hosts. - - -**7. Replace Merge::Map with scaling groups' inner attributes** - -We are using the custom ``Merge::Map`` helper function for getting values out of -scaled-out servers: - -* `Building a comma-separated list of RabbitMQ nodes`__ - -__ https://github.com/openstack/tripleo-heat-templates/blob/a7f2a2c928e9c78a18defb68feb40da8c7eb95d6/overcloud-source.yaml#L642 - -* `Getting the name of the first controller node`__ - -__ https://github.com/openstack/tripleo-heat-templates/blob/a7f2a2c928e9c78a18defb68feb40da8c7eb95d6/overcloud-source.yaml#L405 - -* `List of IP addresses of all controllers`__ - -__ https://github.com/openstack/tripleo-heat-templates/blob/a7f2a2c928e9c78a18defb68feb40da8c7eb95d6/overcloud-source.yaml#L405 - -* `Building the /etc/hosts file`__ - -__ https://github.com/openstack/tripleo-heat-templates/blob/a7f2a2c928e9c78a18defb68feb40da8c7eb95d6/overcloud-source.yaml#L585 - - -The ``ResourceGroup`` resource supports selecting an attribute of an inner -resource as well as getting the same attribute from all resources and returning -them as a list. - -Example of getting an IP address of the controller node: :: - - {get_attr: [controller_group, resource.0.networks, ctlplane, 0]} - -(`controller_group` is the `ResourceGroup` of our controller nodes, `ctlplane` -is the name of our control plane network) - -Example of getting the list of names of all of the controller nodes: :: - - {get_attr: [controller_group, name]} - -The more complex uses of ``Merge::Map`` involve formatting the returned data in -some way, for example building a list of ``{ip: ..., name: ...}`` dictionaries -for haproxy or generating the ``/etc/hosts`` file. - -Since our ResourceGroups will not be using Nova servers directly, but rather the -custom role types using provider resources and environments, we can put this -data formatting into the role's ``outputs`` section and then use the same -mechanism as above. - -Example of building out the haproxy node entries:: - - # overcloud.yaml: - resources: - controller_group: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: controller_scale} - resource_def: - type: OS::TripleO::Controller - properties: - ... - - controllerConfig: - type: OS::Heat::StructuredConfig - properties: - ... - haproxy: - nodes: {get_attr: [controller_group, haproxy_node_entry]} - - - - # controller.yaml: - resources: - ... - controller: - type: OS::Nova::Server - properties: - ... - - outputs: - haproxy_node_entry: - description: A {ip: ..., name: ...} dictionary for configuring the - haproxy node - value: - ip: {get_attr: [controller, networks, ctlplane, 0]} - name: {get_attr: [controller, name]} - - - -Alternatives ------------- - -This proposal is very t-h-t and Heat specific. One alternative is to do nothing -and keep using and evolving ``merge.py``. That was never the intent, and most -members of the core team do not consider this a viable long-term option. - - -Security Impact ---------------- - -This proposal does not affect the overall functionality of TripleO in any way. -It just changes the way TripleO Heat templates are stored and written. - -If anything, this will move us towards more standard and thus more easily -auditable templates. - - -Other End User Impact ---------------------- - -There should be no impact for the users of vanilla TripleO. - -More advanced users may want to customise the existing Heat templates or write -their own. That will be made easier when we rely on standard Heat features only. - - -Performance Impact ------------------- - -This moves some of the template-assembling burden from ``merge.py`` to Heat. It -will likely also end up producing more resources and nested stacks on the -background. - -As far as we're aware, no one has tested these features at the scale we are -inevitably going to hit. - -Before we land changes that can affect this (provider config and scaling) we -need to have scale tests in Tempest running TripleO to make sure Heat can cope. - -These tests can be modeled after the `large_ops`_ scenario: a Heat template that -creates and destroys a stack of 50 Nova server resources with associated -software configs. - -We should have two tests to asses the before and after performance: - -1. A single HOT template with 50 copies of the same server resource and software - config/deployment. -2. A template with a single server and its software config/deploys, an - environment file with a custom type mapping and an overall template that - wraps the new type in a ResourceGroup with the count of 50. - -.. _large_ops: https://github.com/openstack/tempest/blob/master/tempest/scenario/test_large_ops.py - - -Other Deployer Impact ---------------------- - -Deployers can keep using ``merge.py`` and the existing Heat templates as before --- existing scripts ought not break. - -With the new templates, Heat will be called directly and will need the resource -registry (in a Heat environment file). This will mean a change in the deployment -process. - - - -Developer Impact ----------------- - -This should not affect non-Heat and non-TripleO OpenStack developers. - -There will likely be a slight learning curve for the TripleO developers who want -to write and understand our Heat templates. Chances are, we will also encounter -bugs or unforeseen complications while swapping ``merge.py`` for Heat features. - -The impact on Heat developers would involve processing the bugs and feature -requests we uncover. This will hopefully not be an avalanche. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Tomas Sedovic - - -Work Items ----------- - -1. Remove the custom resource types -2. Remove combining whitelisted resource types -3. Port TripleO Heat templates to HOT -4. Move to Provider resources -5. Remove FileInclude from ``merge.py`` -6. Move the templates to Heat-native scaling -7. Replace Merge::Map with scaling groups' inner attributes - - -Dependencies -============ - -* The Juno release of Heat -* Being able to kill specific nodes in Heat (for scaling down or because they're - misbehaving) - - Relevant Heat blueprint: `autoscaling-parameters`_ - -.. _autoscaling-parameters: https://blueprints.launchpad.net/heat/+spec/autoscaling-parameters - - -Testing -======= - -All of these changes will be made to the tripleo-heat-templates repository and -should be testable by our CI just as any other t-h-t change. - -In addition, we will need to add Tempest scenarios for scale to ensure Heat can -handle the load. - - -Documentation Impact -==================== - -We will need to update the `devtest`_, `Deploying TripleO`_ and `Using TripleO`_ -documentation and create a guide for writing TripleO templates. - -.. _devtest: http://docs.openstack.org/developer/tripleo-incubator/devtest.html -.. _Deploying TripleO: http://docs.openstack.org/developer/tripleo-incubator/deploying.html -.. _Using TripleO: http://docs.openstack.org/developer/tripleo-incubator/userguide.html - - -References -========== - -.. [0] https://github.com/openstack/tripleo-heat-templates -.. [1] http://lists.openstack.org/pipermail/openstack-dev/2014-April/031915.html -.. [2] http://docs.openstack.org/developer/heat/template_guide/hot_guide.html -.. [3] https://github.com/openstack/tripleo-heat-templates/blob/master/nova-compute-instance.yaml -.. [4] http://docs.openstack.org/developer/heat/template_guide/environment.html -.. [5] http://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Heat::ResourceGroup -.. [8] https://review.openstack.org/#/c/81666/ -.. [9] https://review.openstack.org/#/c/93319/ diff --git a/specs/kilo/tripleo-enable-dvr.rst b/specs/kilo/tripleo-enable-dvr.rst deleted file mode 100644 index 7d5a92c8..00000000 --- a/specs/kilo/tripleo-enable-dvr.rst +++ /dev/null @@ -1,169 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Enable Neutron DVR on overcloud in TripleO -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/support-neutron-dvr - -Neutron distributed virtual routing should be able to be configured in TripleO. - - -Problem Description -=================== - -To be able to enable distributed virtual routing in Neutron there needs to be -several changes to the current TripleO overcloud deployment. The overcloud -compute node(s) are constructed with the ``neutron-openvswitch-agent`` image -element, which provides the ``neutron-openvswitch-agent`` on the compute node. -In order to support distributed virtual routing, the compute node(s) must also -have the ``neutron-metadata-agent`` and ``neutron-l3-agent`` installed. The -installation of the ``neutron-l3-agent`` and ``neutron-dhcp-agent`` will need -also to be decoupled. - -Additionally, for distributed virtual routing to be enabled, the -``neutron.conf``, ``l3_agent.ini`` and ``ml2_conf.ini`` all need to have -additional settings. - -Proposed Change -=============== - -Overview --------- - -In the tripleo-image-elements, move the current ``neutron-network-node`` element -to an element named ``neutron-router``, which will be responsible for doing the -installation and configuration work required to install the ``neutron-l3-agent`` -and the ``neutron-metadata-agent``. This ``neutron-router`` element will list -the ``neutron-openvswitch-agent`` in its element-deps. The ``neutron-network --node`` element will then become simply a 'wrapper' whose sole purpose is to list -the dependencies required for a network node (neutron, ``neutron-dhcp-agent``, -``neutron-router``, os-refresh-config). - -Additionally, in the tripleo-image-elements/neutron element, the -``neutron.conf``, ``l3_agent.ini`` and ``plugins/ml2/ml2_conf.ini`` will be -modified to add the configuration variables required in each to support -distributed virtual routing (the required configuration variables are listed at -https://wiki.openstack.org/wiki/Neutron/DVR/HowTo#Configuration). - -In the tripleo-heat-templates, the ``nova-compute-config.yaml`` -``nova-compute-instance.yaml`` and ``overcloud-source.yaml`` files will be -modified to provide the correct settings for the new distributed virtual routing -variables. The enablement of distributed virtual routing will be determined by -a 'NeutronDVR' variable which will be 'False' by default (distributed virtual -routing not enabled) for backward compatibility, but can be set to 'True' if -distributed virtual routing is desired. - -Lastly, the tripleo-incubator script ``devtest_overcloud.sh`` will be modified -to: a) build the overcloud-compute disk-image with ``neutron-router`` rather -than with ``neutron-openvswitch-agent``, and b) configure the appropriate -parameter values to be passed in to the heat stack create for the overcloud so -that distributed routing is either enabled or disabled. - -Alternatives ------------- - -We could choose to make no change to the ``neutron-router`` image-element and -it can be included as well in the list of elements arguments to the disk image -build for compute nodes. This has the undesired effect of also -including/configuring and starting the ``neutron-dhcp-agent`` on each compute -node. Alternatively, it is possible to keep the ``neutron-network-node`` -element as it is and create a ``neutron-router`` element which is a copy of -most of the element contents of the ``neutron-network-node`` element but without -the dependency on the ``neutron-dhcp-agent`` element. This approach would -introduce a significant amount of code duplication. - -Security Impact ---------------- - -Although TripleO installation does not use FWaaS, enablement of DVR currently -is known to break FWaaS. -See https://blueprints.launchpad.net/neutron/+spec/neutron-dvr-fwaas - -Other End User Impact ---------------------- - -The user will have the ability to set an environment variable during install -which will determine whether distributed virtual routing is enabled or not. - -Performance Impact ------------------- - -None identified - -Other Deployer Impact ---------------------- - -The option to enable or disable distributed virtual routing at install time will -be added. By default distributed virtual routing will be disabled. - -Developer Impact ----------------- - -None identified - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Erik Colnick (erikcolnick on Launchpad) -Other contributors: - None - -Work Items ----------- - - * Create ``neutron-router`` element in tripleo-image-elements and move related - contents from ``neutron-network-node`` element. Remove the - ``neutron-dhcp-agent`` dependency from the element-deps of the - ``neutron-router`` element. - - * Add the ``neutron-router`` element as a dependency in the - ``neutron-network-node`` ``element-deps`` file. The ``element-deps`` - file becomes the only content in the ``neutron-network-node`` element. - - * Add the configuration values indicated in - https://wiki.openstack.org/wiki/Neutron/DVR/HowTo#Configuration to the - ``neutron.conf``, ``l3_agent.ini`` and ``ml2_conf.ini`` files in the - ``neutron`` image element. - - * Add the necessary reference variables to the ``nova-compute-config.yaml`` and - ``nova-compute-instance.yaml`` tripleo-heat-templates files in order to be - able to set the new variables in the config files (from above item). Add - definitions and default values in ``overcloud-source.yaml``. - - * Modify tripleo-incubator ``devtest_overcloud.sh`` script to set the - appropriate environment variables which will drive the configuration of - neutron on the overcloud to either enable distributed virtual routers or - disable distributed virtual routers (with disable as the default). - -Dependencies -============ - -None - -Testing -======= - -Existing TripleO CI will help ensure that as this is implemented, the current -feature set is not impacted and that the default behavior of disabled -distributed virtual routers is maintained. - -Additional CI tests which test the installation with distributed virtual -routers should be added as this implementation is completed. - -Documentation Impact -==================== - -Documentation of the new configuration option will be needed. - -References -========== - diff --git a/specs/kilo/tripleo-review-standards.rst b/specs/kilo/tripleo-review-standards.rst deleted file mode 100644 index 23e9b864..00000000 --- a/specs/kilo/tripleo-review-standards.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================== -TripleO Review Standards -======================== - -No launchpad blueprint because this isn't a spec to be implemented in code. - -Like many OpenStack projects, TripleO generally has more changes incoming to -the projects than it has core reviewers to review and approve those changes. -Because of this, optimizing reviewer bandwidth is important. This spec will -propose some changes to our review process discussed at the Paris OpenStack -Summit and intended to make the best possible use of core reviewer time. - -There are essentially two major areas that a reviewer looks at when reviewing -a given change: design and implementation. The design part of the review -covers things like whether the change fits with the overall direction of the -project and whether new code is organized in a reasonable fashion. The -implementation part of a review will get into smaller details, such as -whether language functionality is being used properly and whether the general -sections of the code identified in the design part of the review do what is -intended. - -Generally design is considered first, and then the reviewer will drill down to -the implementation details of the chosen design. - -Problem Description -=================== -Many times an overall design for a given change will be agreed upon early in -the change's lifecycle. The implementation for the design may then be -tweaked multiple times (due to rebases, or specific issues pointed out by -reviewers) without any changes to the overall design. Many times these -implementation details are small changes that shouldn't require much -review effort, but because of our current standard of 2 +2's on the current -patch set before a change can be approved, reviewers often must unnecessarily -revisit a change even when it is clear that everyone involved in the review -is in favor of it. - -Proposed Change -=============== - -Overview --------- - -When appropriate, allow a core reviewer to approve a change even if the -latest patch set does not have 2 +2's. Specifically, this should be used -under the following circumstances: - -* A change that has had multiple +2's on past patch sets, indicating an - agreement from the other reviewers that the overall design of the change - is good. -* Any further alterations to the change since the patch set(s) with +2's should - be implementation details only - trivial rebases, minor syntax changes, or - comment/documentation changes. Any more significant changes invalidate this - option. - -As always, core reviewers should use their judgment. When in doubt, waiting -for 2 +2's to approve a change is always acceptable, but this new policy is -intended to make it socially acceptable to single approve a change under the -circumstances described above. - -When approving a change in this manner, it is preferable to leave a comment -explaining why the change is being approved without 2 +2's. - -Alternatives ------------- - -Allowing a single +2 on "trivial" changes was also discussed, but there were -concerns from a number of people present that such a policy might cause more -trouble than it was worth, particularly since "trivial" changes by nature do -not require much review and therefore don't take up much reviewer time. - -Security Impact ---------------- - -Should be minimal to none. If a change between patch sets is significant -enough to have a security impact then this policy does not apply. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Core reviewers will spend less time revisiting patches they have already -voted in favor of, and contributors should find it easier to get their -patches merged because they won't have to wait as long after rebases and -minor changes. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bnemec - -Other contributors: - All cores should review and implement this spec in their reviewing - -Work Items ----------- - -Publish the agreed-upon guidelines somewhere more permanent than a spec. - - -Dependencies -============ - -None - -Testing -======= - -None - -Documentation Impact -==================== - -A new document will need to be created for core reviewers to reference. - - -References -========== - -https://etherpad.openstack.org/p/kilo-tripleo-summit-reviews diff --git a/specs/liberty/release-branch.rst b/specs/liberty/release-branch.rst deleted file mode 100644 index 59e3a051..00000000 --- a/specs/liberty/release-branch.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Release Branch proposal for TripleO -========================================== - -To date, the majority of folks consuming TripleO have been doing so via the -master branches of the various repos required to allow TripleO to deploy -an OpenStack cloud. This proposes an alternative "release branch" methodology -which should enable those consuming stable OpenStack releases to deploy -more easily using TripleO. - - -Problem Description -=================== - -Historically strong guarantees about deploying the current stable OpenStack -release have not been made, and it's not something we've been testing in -upstream CI. This is fine from a developer perspective, but it's a major -impediment to those wishing to deploy production clouds based on the stable -OpenStack releases/branches. - -Proposed Change -=============== - -I propose we consider supporting additional "release" branches, for selected -TripleO repos where release-specific changes are required. - -The model will be based on the stable branch model[1] used by many/most -OpenStack projects, but with one difference, "feature" backports will be -permitted provided they are 100% compatible with the currently released -OpenStack services. - -Overview --------- - -The justification for allowing features is that many/most TripleO features are -actually enabling access to features of OpenStack services which will exist in -the stable branches of the services being deployed. Thus, the target audience -of this branch will likely want to consume such "features" to better access -features and configurations which are appropriate to the OpenStack release they -are consuming. - -The other aspect of justification is that projects are adding features -constantly, thus it's unlikely TripleO will be capable of aligning with every -possible new feature for, say Liberty, on day 1 of the release being made. The -recognition that we'll be playing "catch up", and adopting a suitable branch -policy should mean there is scope to continue that alignment after the services -themselves have been released, which will be of benefit to our users. - -Changes landing on the master branch can be considered as valid candidates for -backport, unless: - -* The patch requires new features of an OpenStack service (that do not exist - on the stable branches) to operate. E.g if a tripleo-heat-templates change - needs new-for-liberty Heat features it would *not* be allowed for release/kilo. - -* The patch enables Overcloud features of an OpenStack service that do not - exist on the stable branches of the supported Overcloud version (e.g for - release/kilo we only support kilo overcloud features). - -* User visible interfaces are modified, renamed or removed - removal of - deprecated interfaces may be allowed on the master branch (after a suitable - deprecation period), but these changes would *not* be valid for backport as - they could impact existing users without warning. Adding new interfaces - such as provider resources or parameters would be permitted provided the - default behavior does not impact existing users of the release branch. - -* The patch introduces new dependencies or changes the current requirements.txt. - -To make it easier to identify not-valid-for-backport changes, it's proposed -that a review process be adopted whereby a developer proposing a patch to -master would tag a commit if it doesn't meet the criteria above, or there is -some other reason why the patch would be unsuitable for backport. - -e.g: - - No-Backport: This patch requires new for Mitaka Heat features - - -Alternatives ------------- - -The main alternative to this is to leave upstream TripleO as something which -primarily targets developer/trunk-chasing users, and leave maintaining a -stable branch of the various components to downstream consumers of TripleO, -rdo-manager for example. - -The disadvantage of this approach is it's an impediment to adoption and -participation in the upstream project, so I feel it'd be better to do this work -upstream, and improve the experience for those wishing to deploy via TripleO -using only the upstream tools and releases. - - -Security Impact ---------------- - -We'd need to ensure security related patches landing in master got -appropriately applied to the release branches (same as stable branches for all -other projects). - -Other End User Impact ---------------------- - -This should make it much easier for end users to stand up a TripleO deployed -cloud using the stable released versions of OpenStack services. - -Other Deployer Impact ---------------------- - -This may reduce duplication of effort when multiple downstream consumers of -TripleO exist. - -Developer Impact ----------------- - -The proposal of valid backports will ideally be made by the developer -proposing a patch to the master branch, but avoid creating an undue barrier to -entry for new contributors this will not be mandatory, but will be reccomended -and encouraged via code review comments. - -Standard stable-maint processes[1] will be observed when proposing backports. - -We need to consider if we want a separate stable-maint core (as is common on -most other projects), or if all tripleo-core members can approve backports. -Initially it is anticipated to allow all tripleo-core, potentially with the -addition of others with a specific interest in branch maintenance (e.g -downstream package maintainers). - -Implementation -============== - -Initially the following repos will gain release branches: - -* openstack/tripleo-common -* openstack/tripleo-docs -* openstack/tripleo-heat-templates -* openstack/tripleo-puppet-elements -* openstack/python-tripleoclient -* openstack/instack-undercloud - -These will all have a new branch created, ideally near the time of the upcoming -liberty release, and to avoid undue modification to existing infra tooling, -e.g zuul, they will use the standard stable branch naming, e.g: - -* stable/liberty - -If any additional repos require stable branches, we can add those later when -required. - -It is expected that any repos which don't have a stable/release branch must -maintain compatibility such that they don't break deploying the stable released -OpenStack version (if this proves impractical in any case, we'll create -branches when required). - -Also, when the release branches have been created, we will explicitly *not* -require the master branch for those repos to observe backwards compatibility, -with respect to consuming new OpenStack features. For example, new-for-mitaka -Heat features may be consumed on the master branch of tripleo-heat-templates -after we have a stable/liberty branch for that repo. - -Assignee(s) ------------ - -Primary assignee: - shardy - -Other contributors: - TBC - -Work Items ----------- - -1. Identify the repos which require release branches -2. Create the branches -3. Communicate need to backport to developers, consider options for automating -4. CI jobs to ensure the release branch stays working -5. Documentation to show how users may consume the release branch - -Testing -======= - -We'll need CI jobs configured to use the TripleO release branches, deploying -the stable branches of other OpenStack projects. Hopefully we can make use of -e.g RDO packages for most of the project stable branch content, then build -delorean packages for the tripleo release branch content. - -Ideally in future we'd also test upgrade from one release branch to another -(e.g current release from the previous, and/or from the release branch to -master). - -As a starting point derekh has suggested we create a single centos job, which -only tests HA, and that we'll avoid having a tripleo-ci release branch, -ideally using the under development[2] tripleo.sh developer script to abstract -any differences between deployment steps for branches. - -Documentation Impact -==================== - -We'll need to update the docs to show: - -1. How to deploy an undercloud node from the release branches using stable -OpenStack service versions -2. How to build images containing content from the release branches -3. How to deploy an overcloud using only the release branch versions - -References -========== - -We started discussing this idea in this thread: - -http://lists.openstack.org/pipermail/openstack-dev/2015-August/072217.html - -[1] https://wiki.openstack.org/wiki/StableBranch -[2] https://review.openstack.org/#/c/225096/ diff --git a/specs/mitaka/external-load-balancer.rst b/specs/mitaka/external-load-balancer.rst deleted file mode 100644 index 5b86dbb4..00000000 --- a/specs/mitaka/external-load-balancer.rst +++ /dev/null @@ -1,169 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================== -External Load Balancer -====================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-mitaka-external-load-balancer - -Make it possible to use (optionally) an external load balancer as frontend for -the Overcloud. - - -Problem Description -=================== - -To use an external load balancer the Overcloud templates and manifests will be -updated to accomplish the following three: - -* accept a list of virtual IPs as parameter to be used instead of the virtual - IPs which are normally created as Neutron ports and hosted by the controllers - -* make the deployment and configuration of HAProxy on the controllers optional - -* allow for the assignment of a predefined list of IPs to the controller nodes - so that these can be used for the external load balancer configuration - - -Proposed Change -=============== - -Overview --------- - -The VipMap structure, governed by the ``OS::TripleO::Network::Ports::NetIpMap`` -resource type, will be switched to ``OS::TripleO::Network::Ports::NetVipMap``, -a more specific resource type so that it can pointed to a custom YAML allowing -for the VIPs to be provided by the user at deployment time. Any reference to the -VIPs in the templates will be updated to gather the VIP details from such a -structure. The existing VIP resources will also be switched from the non -specialized type ``OS::TripleO::Controller::Ports::InternalApiPort`` into a -more specific type ``OS::TripleO::Network::Ports::InternalApiVipPort`` so that -it will be possible to noop the VIPs or add support for more parameters as -required and independently from the controller ports resource. - -The deployment and configuration of HAProxy on the controller nodes will become -optional and driven by a new template parameter visible only to the controllers. - -It will be possible to provide via template parameters a predefined list of IPs -to be assigned to the controller nodes, on each network, so that these can be -configured as target IPs in the external load balancer, before the deployment -of the Overcloud is initiated. A new port YAML will be provided for the purpose; -when using an external load balancer this will be used for resources like -``OS::TripleO::Controller::Ports::InternalApiPort``. - -As a requirement for the deployment process to succeed, the external load -balancer must be configured in advance with the appropriate balancing rules and -target IPs. This is because the deployment process itself uses a number of -infrastructure services (database/messaging) as well as core OpenStack services -(Keystone) during the configuration steps. A validation script will be provided -so that connectivity to the VIPs can be tested in advance and hopefully avoid -false negatives during the deployment. - -Alternatives ------------- - -None. - -Security Impact ---------------- - -By filtering the incoming connections for the controller nodes, an external load -blancer might help the Overcloud survive network flood attacks or issues due -to purposely malformed API requests. - -Other End User Impact ---------------------- - -The deployer wishing to deploy with an external load balancer will have to -provide at deployment time a few more parameters, amongst which: - -* the VIPs configured on the balancer to be used by the Overcloud services - -* the IPs to be configured on the controllers, for each network - -Performance Impact ------------------- - -Given there won't be any instance of HAProxy running on the controllers, when -using an external load balancer these might benefit from a lower stress on the -TCP stack. - -Other Deployer Impact ---------------------- - -None expected unless deploying with an external load balancer. A sample -environment file will be provided to provide some guidance over the parameters -to be passed when deploying with an external load balancer. - -Developer Impact ----------------- - -In those scenarios where the deployer was using only a subset of the isolated -networks, the customization templates will need to be updated so that the new -VIPs resource type is nooped. This can be achieved with something like: - -.. code:: - - resource_registry: - OS::TripleO::Network::Ports::InternalApiVipPort: /path/to/network/ports/noop.yaml - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - gfidente - -Other contributors: - dprince - -Work Items ----------- - -* accept user provided collection of VIPs as parameter - -* make the deployment of the managed HAProxy optional - -* allow for the assignment of a predefined list of IPs to the controller nodes - -* add a validation script to test connectivity against the external VIPs - - -Dependencies -============ - -None. - - -Testing -======= - -The feature seems untestable in CI at the moment but it will be possible to test -at least the assignment of a predefined list of IPs to the controller nodes by -providing only the predefined list of IPs as parameter. - - -Documentation Impact -==================== - -In addition to documenting the specific template parameters needed when -deploying with an external load balancer, it will also be necessary to provide -some guidance for the configuration of the load balancer configuration so that -it will behave as expected in the event of a failure. Unfortunately the -configuration settings are strictly dependent on the balancer in use; we should -publish a copy of a managed HAProxy instance config to use as reference so that -a deployer could configure his external appliance similarily. - - -References -========== - -None. diff --git a/specs/mitaka/puppet-modules-deployment-via-swift.rst b/specs/mitaka/puppet-modules-deployment-via-swift.rst deleted file mode 100644 index ea7bd7fe..00000000 --- a/specs/mitaka/puppet-modules-deployment-via-swift.rst +++ /dev/null @@ -1,202 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -Puppet Module Deployment via Swift -================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/puppet-modules-deployment-via-swift - -The ability to deploy a local directory of puppet modules to an overcloud -using the OpenStack swift object service. - -Problem Description -=================== - -When deploying puppet modules to the overcloud there are currently three - options: - - * pre-install the puppet modules into a "golden" image. You can pre-install - modules via git sources or by using a distro package. - - * use a "firstboot" script to rsync the modules from the undercloud (or - some other rsync server that is available). - - * post-install the puppet modules via a package upgrade onto a running - Overcloud server by using a (RPM, Deb, etc.) - -None of the above mechanisms provides an easy workflow when making -minor (ad-hoc) changes to the puppet modules and only distro packages can be -used to provide updated puppet modules to an already deployed overcloud. -While we do have a way to rsync over updated modules on "firstboot" via -rsync this isn't a useful mechanism for operator who may wish to -use heat stack-update to deploy puppet changes without having to build -a new RPM/Deb package for each revision. - -Proposed Change -=============== - -Overview --------- - -Create an optional (opt-in) workflow that if enabled will allow an operator -to create and deploy a local artifact (tarball, distro package, etc.) of -puppet modules to a new or existing overcloud via heat stack-create and -stack-update. The mechanism would use the OpenStack object store service -(rather than rsync) which we already have available on the undercloud. -The new workflow would work like this: - - * A puppet modules artifact (tarball, distro package, etc.) would be uploaded - into a swift container. - - * The container would be configured so that a Swift Temp URL can be generated - - * A Swift Temp URL would be generated for the puppet modules URL that is - stored in swift - - * A heat environment would be generated which sets a DeployArtifactURLs - parameter to this swift URL. (the parameter could be a list so that - multiple URLs could also be downloaded.) - - * The TripleO Heat Templates would be modified so that they include a new - 'script' step which if it detects a custom DeployArtifactURLs parameter - would automatically download the artifact from the provided URL, and - deploy it locally on each overcloud role during the deployment workflow. - By "deploy locally" we mean a tarball would be extracted, and RPM would - get installed, etc. The actual deployment mechanism will be pluggable - such that both tarballs and distro packages will be supported and future - additions might be added as well so long as they also fit into the generic - DeployArtifactURLs abstraction. - - * The Operator could then use the generated heat environment to deploy - a new set of puppet modules via heat stack-create or heat stack-update. - - * TripleO client could be modified so that it automically loads - generated heat environments in a convienent location. This (optional) - extra step would make enabling the above workflow transparent and - only require the operator to run a 'upload-puppet-modules' tool to - upload and configure new puppet modules for deployment via Swift. - -Alternatives ------------- - -There are many alternatives we could use to obtain a similar workflow that -allows the operator to more deploy puppet modules from a local directory: - - * Setting up a puppet master would allow a similar workflow. The downside - of this approach is that it would require a bit of overhead, and it - is puppet specific (the deployment mechanism would need to be re-worked - if we ever had other types of on-disk files to update). - - * Rsync. We already support rsync for firstboot scripts. The downside of - rsync is it requires extra setup, and doesn't have an API like - OpenStack swift does allowing for local or remote management and updates - to the puppet modules. - -Security Impact ---------------- - -The new deployment would use a Swift Temp URL over HTTP/HTTPS. The duration -of the Swift Temp URL's can be controlled when they are signed via -swift-temp-url if extra security is desired. By using a Swift Temp URL we -avoid the need to pass the administrators credentials onto each overcloud -node for swiftclient and instead can simply use curl (or wget) to download -the updated puppet modules. Given we already deploy images over http/https -using an undercloud the use of Swift in this manner should pose minimal extra -security risks. - -Other End User Impact ---------------------- - -The ability to deploy puppet modules via Swift will be opt-in so the -impact on end users would be minimal. The heat templates will contain -a new script deployment that may take a few extra seconds to deploy on -each node (even if the feature is not enabled). We could avoid the extra -deployment time perhaps by noop'ing out the heat resource for the new -swift puppet module deployment. - -Performance Impact ------------------- - -Developers and Operators would likely be able to deploy puppet module changes -more quickly (without having to create a distro package). The actual deployment -of puppet modules via swift (downloading and extracting the tarball) would -likely be just as fast as a tarball. - -Other Deployer Impact ---------------------- - -None. - - -Developer Impact ----------------- - -Being able to more easily deploy updated puppet modules to an overcloud would -likely speed up the development update and testing cycle of puppet modules. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - dan-prince - -Work Items ----------- - - * Create an upload-puppet-modules script in tripleo-common. Initially this - may be a bash script which we ultimately refine into a Python version if - it proves useful. - - * Modify tripleo-heat-templates so that it supports a DeployArtifactURLs - parameter (if the parameter is set) attempt to deploy the list of - files from this parameter. The actual contents of the file might be - a tarball or a distribution package (RPM). - - * Modify tripleoclient so that the workflow around using upload-puppet-modules - can be "transparent". Simply running upload-puppet-modules would not only - upload the puppet modules it would also generate a Heat environment that - would then automatically configure heat stack-update/create commands - to use the new URL via a custom heat environment. - - * Update our CI scripts in tripleo-ci and/or tripleo-common so that we - make use of the new Puppet modules deployment mechanism. - - * Update tripleo-docs to make note of the new feature. - -Dependencies -============ - -None. - -Testing -======= - -We would likely want to switch to use this feature in our CI because -it allows us to avoid git cloning the same puppet modules for both -the undercloud and overcloud nodes. Simply calling the extra -upload-puppet-modules script on the undercloud as part of our -deployment workflow would enable the feature and allow it to be tested. - -Documentation Impact -==================== - -We would need to document the additional (optional) workflow associated -with deploying puppet modules via Swift. - - -References -========== - - * https://review.openstack.org/#/c/245314/ (Add support for DeployArtifactURLs) - * https://review.openstack.org/#/c/245310/ (Add scripts/upload-swift-artifacts) - * https://review.openstack.org/#/c/245172/ (tripleoclient --environment) diff --git a/specs/mitaka/refactor-puppet-manifests.rst b/specs/mitaka/refactor-puppet-manifests.rst deleted file mode 100644 index c43d42db..00000000 --- a/specs/mitaka/refactor-puppet-manifests.rst +++ /dev/null @@ -1,129 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Refactor top level puppet manifests -========================================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/refactor-puppet-manifests - -The current overcloud controller puppet manifests duplicate a large amount -of code between the pacemaker (HA) and non-ha version. We can reduce the -effort required to add new features by refactoring this code, and since -there is already a puppet-tripleo module this is the logical destination. - -Problem Description -=================== - -Large amounts of puppet/manifests/overcloud\_controller.pp are shared with -puppet/manifests/overcloud\_controller\_pacemaker.pp. When adding a feature -or fixing a mistake in the former, it is frequently also an issue in the -latter. It is a violation of the common programming principle of DRY, which -while not an inviolable rule, is usually considered good practice. - -In addition, moving this code into separate classes in another module will -make it simpler to enable/disable components, as it will be a matter of -merely controlling which classes (profiles) are included. - -Finally, it allows easier experimentation with modifying the 'ha strategy'. -Currently this is done using 'step', but could in theory be done using a -service registry. By refactoring into ha+non-ha classes this would be quite -simple to swap in/out. - -Proposed Change -=============== - -Overview --------- - -While there are significant differences in ha and non-ha deployments, in almost -all cases the ha code will be a superset of the non-ha. A simple example of -this is at the top of both files, where the load balancer is handled. The non -ha version simply includes the loadbalancing class, while the HA version -instantiates the exact same class but with some parameters changed. Across -the board the same classes are included for the openstack services, but with -manage service set to false in the HA case. - -I propose first breaking up the non-ha version into profiles which can reside -in puppet-tripleo/manifests/profile/nonha, then adding ha versions which -use those classes under puppet-tripleo-manifests/profile/pacemaker. Pacemaker -could be described as an 'ha strategy' which in theory should be replaceable. -For this reason we use a pacemaker subfolder since one day perhaps we'll have -an alternative. - -Alternatives ------------- - -We could leave things as they are, which works and isn't the end of the world, -but it's probably not optimal. - -We could use kolla or something that removes the need for puppet entirely, but -this discussion is outside the scope of this spec. - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -It will make downstreams happy since they can sub in/out classes more easily. - -Performance Impact ------------------- - -Adding wrapper classes isn't going to impact puppet compile times very much. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Changes in t-h-t and puppet-tripleo will often be coupled, as t-h-t -defines the data on which puppet-tripleo depends on. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - michaeltchapman - -Work Items ----------- - -Move overcloud controller to profile classes -Move overcloud controller pacemaker to profile classes -Move any other classes from the smaller manifests in t-h-t - -Dependencies -============ - -None - -Testing -======= - -No new features so current tests apply in their entirety. -Additional testing can be added for each profile class - -Documentation Impact -==================== - -None - -References -========== - -None diff --git a/specs/mitaka/tripleo-mistral-deployment-library.rst b/specs/mitaka/tripleo-mistral-deployment-library.rst deleted file mode 100644 index 34b9a2ee..00000000 --- a/specs/mitaka/tripleo-mistral-deployment-library.rst +++ /dev/null @@ -1,274 +0,0 @@ -============================================================ -Library support for TripleO Overcloud Deployment Via Mistral -============================================================ - -We need a TripleO library that supports the overcloud deployment workflow. - -Problem Description -=================== - -TripleO has an overcloud deployment workflow that uses Heat templates and uses -the following steps: - -* The user edits the templates and environment file. These can be stored - anywhere. -* Templates may be validated by Heat. -* Templates and environment are sent to Heat for overcloud deployment. - -This workflow is already supported by the CLI. - -However from a GUI perspective, although the workflow is straightforward, it is -not simple. Here are some of the complications that arise: - -* Some of the business logic in this workflow is contained in the CLI itself, - making it difficult for other UIs to use. -* If the TripleO overcloud deployment workflow changes, it is easy for the CLI - and GUI approach to end up on divergent paths - a dangerous situation. -* The CLI approach allows open-ended flexibility (the CLI doesn't care where - the templates come from) that is detrimental for a GUI (the GUI user doesn't - care where the templates are stored, but consistency in approach is desirable - to prevent divergence among GUIs and CLIs). - -There is a need to create common code that accommodates the flexibility of the -CLI with the ease-of-use needs of GUI consumers. - -Proposed Change -=============== - -In order to solve this problem, we propose to create a Mistral-integrated -deployment with the following: - -* Encapsulate the business logic involved in the overcloud deployment workflow - within the tripleo-common library utilizing Mistral actions and workflows. -* Provide a simplified workflow to hide unneeded complexity from GUI consumers -* Update the CLI to use this code where appropriate to prevent divergence with - GUIs. - -The first three points deserve further explanation. First, let us lay out the -proposed GUI workflow. - -1. A user pushes the Heat deployment templates into swift. -2. The user defines values for the template resource types given by Heat - template capabilities which are stored in an environment[1]. Note that this - spec will be completed by mitaka at the earliest. A workaround is discussed - below. -3. Now that the template resource types are specified, the user can configure - deployment parameters given by Heat. Edited parameters are updated and are - stored in an environment. 'Roles' can still be derived from available Heat - parameters[2]. -4. Steps 2 and 3 can be repeated. -5. With configuration complete, the user triggers the deployment of the - overcloud. The templates and environment file are taken from Swift - and sent to Heat. -6. Once overcloud deployment is complete, any needed post-deploy config is - performed. - -The CLI and GUI will both use the Swift workflow and store the templates into -Swift. This would facilitate the potential to switch to the UI from a CLI based -deployment and vice-versa. - -Mistral Workflows are composed of Tasks, which group together one or more -Actions to be executed with a Workflow Execution. The Action is implemented as -a class with an initialization method and a run method. The run method provides -a single execution point for Python code. Any persistence of state required for -Actions or Workflows will be stored in a Mistral Environment object. - -In some cases, an OpenStack Service may be missing a feature needed for TripleO -or it might only be accessible through its associated Python client. To -mitigate this issue in the short term, some of the Actions will need to be -executed directly with an Action Execution [3] which calls the Action directly and -returns instantly, but also doesn't have access to the same context as a -Workflow Execution. In theory, every action execution should be replaced by an -OpenStack service API call. - -Below is a summary of the intended Workflows and Actions to be executed from the -CLI or the GUI using the python-mistralclient or Mistral API. There may be -additional actions or library code necessary to enable these operations that -will not be intended to be consumed directly. - -Workflows: - - * Node Registration - * Node Introspection - * Plan Creation - * Plan Deletion - * Deploy - * Validation Operations - -Actions: - - * Plan List - * Get Capabilites - * Update Capabilities - * Get Parameters - * Update Parameters - * Roles List - -For Flavors and Image management, the Nova and Glance APIs will be used -respectively. - -The registration and introspection of nodes will be implemented within a -Mistral Workflow. The logic is currently in tripleoclient and will be ported, -as certain node configurations are specified as part of the logic (ramdisk, -kernel names, etc.) so the user does not have to specify those. Tagging, -listing and deleting nodes will happen via the Ironic/Inspectors APIs as -appropriate. - -A deployment plan consists of a collection of heat templates in a Swift -container, combined with data stored in a Mistral Environment. When the plan is -first created, the capabilities map data will be parsed and stored in the -associated Mistral Environment. The templates will need to be uploaded to a -Swift container with the same name as the stack to be created. While any user -could use a raw POST request to accomplish this, the GUI and CLI will provide -convenience functions improve the user experience. The convenience functions -will be implemented in an Action that can be used directly or included in a -Workflow. - -The deletion of a plan will be implemented in a Workflow to ensure there isn't -an associated stack before deleting the templates, container and Mistral -Environment. Listing the plans will be accomplished by calling -'mistral environment-list'. - -To get a list of the available Heat environment files with descriptions and -constraints, the library will have an Action that returns the information about -capabilities added during plan creation and identifies which Heat environment -files have already been selected. There will also be an action that accepts a -list of user selected Heat environment files and stores the information in the -Mistral Environment. It would be inconvenient to use a Workflow for these -actions as they just read or update the Mistral Environment and do not require -additional logic. - -The identification of Roles will be implemented in a Workflow that calls out to -Heat. - -To obtain the deployment parameters, Actions will be created that will call out -to heat with the required template information to obtain the parameters and set -the parameter values to the Environment. - -To perform TripleO validations, Workflows and associated Actions will be created -to support list, start, stop, and results operations. See the spec [4] for more -information on how the validations will be implemented with Mistral. - -Alternatives ------------- - -One alternative is to force non-CLI UIs to re-implement the business logic -currently contained within the CLI. This is not a good alternative. Another -possible alternative would be to create a REST API [5] to abstract TripleO -deployment logic, but it would require considerably more effort to create and -maintain and has been discussed at length on the mailing list. [6][7] - -Security Impact ---------------- - -Other End User Impact ---------------------- - -The --templates workflow will end up being modified to use the updated -tripleo-common library. - -Integrating with Mistral is a straightforward process and this may result in -increased usage. - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Rather than write workflow code in python-tripleoclient directly developers will -now create Mistral Actions and Workflows that help implement the requirements. - -Right now, changing the overcloud deployment workflow results in stress due to -the need to individually update both the CLI and GUI code. Converging the two -makes this a far easier proposition. However developers will need to have this -architecture in mind and ensure that changes to the --templates or --plan -workflow are maintained in the tripleo-common library (when appropriate) to -avoid unneeded divergences. - -Implementation -============== - -Assignee(s) ------------ -Primary assignees: - -* rbrady -* jtomasek -* dprince - -Work Items ----------- -The work items required are: - -* Develop the tripleo-common Mistral actions that provide all of the - functionality required for our deployment workflows. -* This involves moving much of the code out of python-tripleoclient and into - generic, narrowly focused, Mistral actions that can be consumed via the - Mistral API. -* Create new Mistral workflows to help with high level things like deployment, - introspection, node registration, etc. -* tripleo-common is more of an internal library, and its logic is meant to be - consumed (almost) solely by using Mistral - actions. Projects should not attempt to circumvent the API by using - tripleo-common as a library as much as possible. - There may be some exceptions to this for common polling functions, etc. but in - general all core workflow logic should be API driven. -* Update the CLI to consume these Mistral actions directly via - python-mistralclient. - -All patches that implement these changes must pass CI and add additional tests -as needed. - -Dependencies -============ - -None - - -Testing -======= - -The TripleO CI should be updated to test the updated tripleo-common library. - -Our intent is to make tripleoclient consume Mistral actions as we write them. -Because all of the existing upstream Tripleo CI release on tripleoclient taking -this approach ensures that our all of our workflow actions always work. This -should get us coverage on 90% of the Mistral actions and workflows and allow us -to proceed with the implementation iteratively/quickly. Once the UI is installed -and part of our upstream CI we can also rely on coverage there to ensure we -don't have breakages. - -Documentation Impact -==================== - -Mistral Actions and Workflows are sort of self-documenting and can be easily -introspected by running 'mistral workflow-list' or 'mistral action-list' on the -command line. The updated library however will have to be well-documented and -meet OpenStack standards. Documentation will be needed in both the -tripleo-common and tripleo-docs repositories. - -References -========== - -[1] https://specs.openstack.org/openstack/heat-specs/specs/mitaka/resource-capabilities.html - -[2] https://specs.openstack.org/openstack/heat-specs/specs/liberty/nested-validation.html - -[3] http://docs.openstack.org/developer/mistral/terminology/executions.html - -[4] https://review.openstack.org/#/c/255792/ - -[5] http://specs.openstack.org/openstack/tripleo-specs/specs/mitaka/tripleo-overcloud-deployment-library.html - -[6] http://lists.openstack.org/pipermail/openstack-dev/2016-January/083943.html - -[7] http://lists.openstack.org/pipermail/openstack-dev/2016-January/083757.html - diff --git a/specs/mitaka/tripleo-overcloud-deployment-library.rst b/specs/mitaka/tripleo-overcloud-deployment-library.rst deleted file mode 100644 index ff2482f0..00000000 --- a/specs/mitaka/tripleo-overcloud-deployment-library.rst +++ /dev/null @@ -1,244 +0,0 @@ -================================================ -Library support for TripleO Overcloud Deployment -================================================ - -We need a TripleO library that supports the overcloud deployment workflow. - -Problem Description -=================== - -With Tuskar insufficient for complex overcloud deployments, TripleO has moved to -an overcloud deployment workflow that bypasses Tuskar. This workflow can be -summarized as follows: - - * The user edits the templates and environment file. These can be stored - anywhere. - * Templates may be validated by Heat. - * Templates and environment are sent to Heat for overcloud deployment. - * Post-deploy, overcloud endpoints are configured. - -This workflow is already supported by the CLI. - -However from a GUI perspective, although the workflow is straightforward, it is -not simple. Here are some of the complications that arise: - - * Some of the business logic in this workflow is contained in the CLI itself, - making it difficult for other UIs to use. - * If the TripleO overcloud deployment workflow changes, it is easy for the CLI - and GUI approach to end up on divergent paths - a dangerous situation. - * The CLI approach allows open-ended flexibility (the CLI doesn't care where the - templates come from) that is detrimental for a GUI (the GUI user doesn't care - where the templates are stored, but consistency in approach is desirable to - prevent divergence among GUIs). - -There is a need to create common code that accommodates the flexibility of the -CLI with the ease-of-use needs of Python-based GUI consumers. Note that an API -will eventually be needed in order to accommodate non-Python GUIs. The work -there will be detailed in a separate spec. - -Proposed Change -=============== - -In order to solve this problem, we propose the following: - - * Encapsulate the business logic involved in the overcloud deployment workflow - within the tripleo-common library. - * Provide a simplified workflow to hide unneeded complexity from GUI consumers - - for example, template storage. - * Update the CLI to use this code where appropriate to prevent divergence with - GUIs. - -The first two points deserve further explanation. First, let us lay out the -proposed GUI workflow. We will refer to the Heat files the user desires to use -for the overcloud deployment as a 'plan'. - -1. A user creates a plan by pushing a copy of the Heat deployment templates into - a data store. -2. The user defines values for the template resource types given by Heat - template capabilities. This results in an updated resource registry in an - environment file saved to the data store. - (https://review.openstack.org/#/c/196656/7/specs/liberty/resource-capabilities.rst) - Note that this spec will be completed by mitaka at the earliest. A - workaround is discussed below. -3. Now that the template resource types are specified, the user can configure - deployment parameters given by Heat. Edited parameters are updated and an - updated environment file is saved to the data store. 'Roles' no longer exist - in Tuskar, but can still be derived from available Heat parameters. - (https://review.openstack.org/#/c/197199/5/specs/liberty/nested-validation.rst) -4. Steps 2 and 3 can be repeated. -5. With configuration complete, the user triggers the deployment of the - overcloud. The templates and environment file are taken from the data store - and sent to Heat. -6. Once overcloud deployment is complete, any needed post-deploy config is - performed. - -In order to fulfill this workflow, we propose to initially promote the use of -Swift as the template data store. This usage will be abstracted away behind -the tripleo-common library, and later updates may allow the use of other data -stores. - -Note that the Swift-workflow is intended to be an alternative to the current CLI -'--templates' workflow. Both would end up being options under the CLI; a user -could choose '--templates' or '--plan'. However they would both be backed by -common tripleo-common library code, with the '--plan' option simply calling -additional functions to pull the plan information from Swift. And GUIs that -expect a Swift-backed deployment would lose functionality if the deployment -is deployed using the '--templates' CLI workflow. - -The tripleo-common library functions needed are: - - * **Plan CRUD** - - * **create_plan(plan_name, plan_files)**: Creates a plan by creating a Swift - container matching plan_name, and placing all files needed for that plan - into that container (for Heat that would be the 'parent' templates, nested - stack templates, environment file, etc). The Swift container will be - created with object versioning active to allow for versioned updates. - * **get_plan(plan_name)**: Retrieves the Heat templates and environment file - from the Swift container matching plan_name. - * **update_plan(plan_name, plan_files)**: Updates a plan by updating the - plan files in the Swift container matching plan_name. This may necessitate - an update to the environment file to add and/or remove parameters. Although - updates are versioned, retrieval of past versions will not be implemented - until the future. - * **delete_plan(plan_name)**: Deletes a plan by deleting the Swift container - matching plan_name, but only if there is no deployed overcloud that was - deployed with the plan. - - * **Deployment Options** - - * **get_deployment_plan_resource_types(plan_name)**: Determine available - template resource types by retrieving plan_name's templates from Swift and - using the proposed Heat resource-capabilities API - (https://review.openstack.org/#/c/196656/7/specs/liberty/resource-capabilities.rst). - If that API is not ready in the required timeframe, then we will implement - a temporary workaround - a manually created map between templates and - provider resources. We would work closely with the spec developers to try - and ensure that the output of this method matches their proposed output, so - that once their API is ready, replacement is easy. - * **update_deployment_plan_resource_types(plan_name, resource_types)**: - Retrieve plan_name's environment file from Swift and update the - resource_registry tree according to the values passed in by resource_types. - Then update the environment file in Swift. - - * **Deployment Configuration** - - * **get_deployment_parameters(plan_name)**: Determine available deployment - parameters by retrieving plan_name's templates from Swift and using the - proposed Heat nested-validation API call - (https://review.openstack.org/#/c/197199/5/specs/liberty/nested-validation.rst). - * **update_deployment_parameters(plan_name, deployment_parameters)**: - Retrieve plan_name's environment file from Swift and update the parameters - according to the values passed in by deployment_parameters. Then update the - environment file in Swift. - * **get_deployment_roles(plan_name)**: Determine available deployment roles. - This can be done by retrieving plan_name's deployment parameters and - deriving available roles from parameter names; or by looking at the top- - level ResourceGroup types. - - * **Deployment** - - * **validate_plan(plan_name)**: Retrieve plan_name's templates and environment - file from Swift and use them in a Heat API validation call. - * **deploy_plan(plan_name)**: Retrieve plan_name's templates and environment - file from Swift and use them in a Heat API call to create the overcloud - stack. Perform any needed pre-processing of the templates, such as the - template file dictionary needed by Heat. This function will return a Heat - stack ID that can be used to monitor the status of the deployment. - - * **Post-Deploy** - - * **postdeploy_plan(plan_name)**: Initialize the API endpoints of the - overcloud corresponding to plan_name. - -Alternatives ------------- - -The alternative is to force non-CLI UIs to re-implement the business logic -currently contained within the CLI. This is not a good alternative. - -Security Impact ---------------- - -Other End User Impact ---------------------- - -The --templates workflow will end up being modified to use the updated -tripleo-common library. - -Python-based code would find it far easier to adapt the TripleO method of -deployment. This may result in increased usage. - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Right now, changing the overcloud deployment workflow results in stress due to -the need to individually update both the CLI and GUI code. Converging the two -makes this a far easier proposition. However developers will need to have this -architecture in mind and ensure that changes to the --templates or --plan -workflow are maintained in the tripleo-common library (when appropriate) to -avoid unneeded divergences. - -Another important item to note is that we will need to keep the TripleO CI -updated with changes, and will be responsible for fixing the CI as needed. - - -Implementation -============== - -Assignee(s) ------------ -Primary assignees: - -* tzumainn -* akrivoka -* jtomasek -* dmatthews - -Work Items ----------- - -The work items required are: - - * Develop the tripleo-common library to provide the functionality described - above. This also involves moving code from the CLI to tripleo-common. - * Update the CLI to use the tripleo-common library. - -All patches that implement these changes must pass CI and add additional tests as -needed. - - -Dependencies -============ - -We are dependent upon two HEAT specs: - - * Heat resource-capabilities API - (https://review.openstack.org/#/c/196656/7/specs/liberty/resource-capabilities.rst) - * Heat nested-validation API - (https://review.openstack.org/#/c/197199/5/specs/liberty/nested-validation.rst) - -Testing -======= - -The TripleO CI should be updated to test the updated tripleo-common library. - -Documentation Impact -==================== - -The updated library with its Swift-backed workflow will have to be well- -documented and meet OpenStack standards. Documentation will be needed in both -the tripleo-common and tripleo-docs repositories. - -References -========== diff --git a/specs/mitaka/tripleo-quickstart.rst b/specs/mitaka/tripleo-quickstart.rst deleted file mode 100644 index 65c01bf6..00000000 --- a/specs/mitaka/tripleo-quickstart.rst +++ /dev/null @@ -1,140 +0,0 @@ -================== -TripleO Quickstart -================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-quickstart - -We need a common way for developers/CI systems to quickly stand up a virtual -environment. - -Problem Description -=================== - -The tool we currently document for this use case is instack-virt-setup. -However this tool has two major issues, and some missing features: - -* There is no upstream CI using it. This means we have no way to test changes - other than manually. This is a huge barrier to adding the missing features. - -* It relies on a maze of bash scripts in the incubator repository[1] in order - to work. This is a barrier to new users, as it can take quite a bit of time - to find and then navigate that maze. - -* It has no way to use a pre-built undercloud image instead of starting from - scratch and redoing the same work that CI and every other tripleo developer - is doing on every run. Starting from a pre-built undercloud with overcloud - images prebaked can be a significant time savings for both CI systems as well - as developer test environments. - -* It has no way to create this undercloud image either. - -* There are other smaller missing features like automatically tagging the fake - baremetals with profile capability tags via instackenv.json. These would not - be too painful to implement, but without CI even small changes carry some - amount of pain. - -Proposed Change -=============== - -Overview --------- - -* Import the tripleo-quickstart[2] tool that RDO is using for this purpose. - This project is a set of ansible roles that can be used to build an - undercloud.qcow2, or alternatively to consume it. It was patterned after - instack-virt-setup, and anything configurable via instack-virt-setup is - configurable in tripleo-quickstart. - -* Use third-party CI for self-gating this new project. In order to setup an - environment similar to how developers and users can use this tool, we need - a baremetal host. The CI that currently self gates this project is setup on - ci.centos.org[3], and setting this up as third party CI would not be hard. - -Alternatives ------------- - -* One alternative is to keep using instack-virt-setup for this use case. - However, we would still need to add CI for instack-virt-setup. This would - still need to be outside of tripleoci, since it requires a baremetal host. - Unless someone is volunteering to set that up, this is not really a viable - alternative. - -* Similarly, we could use some other method for creating virtual environments. - However, this alternative is similarly constrained by needing third-party CI - for validation. - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -Using a pre-built undercloud.qcow2 drastically symplifies the virt-setup -instructions, and therefore is less error prone. This should lead to a better -new user experience of TripleO. - -Performance Impact ------------------- - -Using a pre-built undercloud.qcow2 will shave 30+ minutes from the CI -gate jobs. - -Other Deployer Impact ---------------------- - -There is no reason this same undercloud.qcow2 could not be used to deploy -real baremetal environments. There have been many production deployments of -TripleO that have used a VM undercloud. - -Developer Impact ----------------- - -The undercloud.qcow2 approach makes it much easier and faster to reproduce -exactly what is run in CI. This leads to a much better developer experience. - -Implementation -============== - -Assignee(s) ------------ -Primary assignees: - -* trown - -Work Items ----------- - -* Import the existing work from the RDO community to the openstack namespace - under the TripleO umbrella. - -* Setup third-party CI running in ci.centos.org to self-gate this new project. - (We can just update the current CI[3] to point at the new upstream location) - -* Documentation will need to be updated for the virtual environment setup. - -Dependencies -============ - -Currently, the only undercloud.qcow2 available is built in RDO. We would -either need to build one in tripleo-ci, or use the one built in RDO. - -Testing -======= - -We need a way to CI the virtual environment setup. This is not feasible within -tripleoci, since it requires a baremetal host machine. We will need to rely on -third party CI for this. - -Documentation Impact -==================== - -Overall this will be a major simplification of the documentation. - -References -========== - -[1] https://github.com/openstack/tripleo-incubator/tree/master/scripts -[2] https://github.com/redhat-openstack/tripleo-quickstart -[3] https://ci.centos.org/view/rdo/job/tripleo-quickstart-gate-mitaka-delorean-minimal/ diff --git a/specs/mitaka/tripleo-ui.rst b/specs/mitaka/tripleo-ui.rst deleted file mode 100644 index 5f726297..00000000 --- a/specs/mitaka/tripleo-ui.rst +++ /dev/null @@ -1,175 +0,0 @@ -========== -TripleO UI -========== - -We need a graphical user interface that will support deploying OpenStack using -TripleO. - -Problem Description -=================== - -Tuskar-UI, the only currently existing GUI capable of TripleO deployments, has -several significant issues. - -Firstly, its back-end relies on an obsolete version of the Tuskar API, which is -insufficient for complex overcloud deployments. - -Secondly, it is implemented as a Horizon plugin and placed under the Horizon -umbrella, which has proven to be suboptimal, for several reasons: - - * The placement under the Horizon program. In order to be able to develop the - Tuskar-UI, one needs deep familiarity with both Horizon and TripleO projects. - Furthermore, in order to be able to approve patches, one needs to be a - Horizon core reviewer. This restriction reduces the number of people who can - contribute drastically, as well as makes it hard for Tuskar-UI developers to - actually land code. - - * The complexity of the Horizon Django application. Horizon is a very complex - heavyweight application comprised of many OpenStack services. It has become - very large, inflexible and consists of several unnecessary middle layers. As - a result of this, we have been witnessing the emergence of several new GUIs - implemented as independent (usually fully client-side JavaScript) applications, - rather than as Horizon plugins. Ironic webclient[1] is one such example. This - downside of Horizon has been recognized and an attempt to address it is - described in the next point. - - * The move to Angular JS (version 1). In an attempt to address the issues listed - above, the Horizon community decided to rewrite it in Angular JS. However, - instead of doing a total rewrite, they opted for a more gradual approach, - resulting in even more middle layers (the original Django layer turned into an - API for Angular based front end). Although the intention is to eventually - get rid of the unwanted layers, the move is happening very slowly. In - addition, this rewrite of Horizon is to AngularJS version 1, which may soon - become obsolete, with version 2 just around the corner. This probably means - another complete rewrite in not too distant future. - - * Packaging issues. The move to AngularJS brought along a new set of issues - related to the poor state of packaging of nodejs based tooling in all major - Linux distributions. - -Proposed Change -=============== - -Overview --------- - -In order to address the need for a TripleO based GUI, while avoiding the issues -listed above, we propose introducing a new GUI project, *TripleO UI*, under the -TripleO program. - -As it is a TripleO specific UI, TripleO GUI will be placed under the TripleO -program, which will bring it to attention of TripleO reviewers and allow -TripleO core reviewers to approve patches. This should facilitate the code -contribution process. - -TripleO UI will be a web UI designed for overcloud deployment and -management. It will be a lightweight, independent client-side application, -designed for flexibility, adaptability and reusability. - -TripleO UI will be a fully client-side JavaScript application. It will be -stateless and contain no business logic. It will consume the TripleO REST API[2], -which will expose the overcloud deployment workflow business logic implemented -in the tripleo-common library[3]. As opposed to the previous architecture which -included many unwanted middle layers, this one will be very simple, consisting -only of the REST API serving JSON, and the client-side JavaScript application -consuming it. - -The development stack will consist of ReactJS[4] and Flux[5]. We will use ReactJS -to implement the web UI components, and Flux for architecture design. - -Due to the packaging problems described above, we will not provide any packages -for the application for now. We will simply make the code available for use. - -Alternatives ------------- - -The alternative is to keep developing Tuskar-UI under the Horizon umbrella. In -addition to all the problems outlined above, this approach would also mean a -complete re-write of Tuskar-UI back-end to make it use the new tripleo-common -library. - -Security Impact ---------------- - -This proposal introduces a brand new application; all the standard security -concerns which come with building a client-side web application apply. - -Other End User Impact ---------------------- - -We plan to build a standalone web UI which will be capable of deploying -OpenStack with TripleO. Since as of now no such GUIs exist, this can be a huge -boost for adoption of TripleO. - -Performance Impact ------------------- - -The proposed technology stack, ReactJS and Flux, have excellent performance -characteristics. TripleO UI should be a lightweight, fast, flexible application. - - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Right now, development on Tuskar-UI is uncomfortable for the reasons -detailed above. This proposal should result in more comfortable development -as it logically places TripleO UI under the TripleO program, which brings -it under the direct attention of TripleO developers and core reviewers. - -Implementation -============== - -Assignee(s) ------------ -Primary assignees: - -* jtomasek -* flfuchs -* jrist -* - -Work Items ----------- - -This is a general proposal regarding the adoption of a new graphical user -interface under the TripleO program. The implementation of specific features -will be covered in subsequent proposals. - -Dependencies -============ - -We are dependent upon the creation of the TripleO REST API[2], which in turn -depends on the tripleo-common[3] library containing all the functionality -necessary for advanced overcloud deployment. - -Alternatively, using Mistral to provide a REST API, instead of building a new -API, is currently being investigated as another option. - -Testing -======= - -TripleO UI should be thoroughly tested, including unit tests and integration -tests. Every new feature and bug fix should be accompanied by appropriate tests. - -The TripleO CI should be updated to test the TripleO UI. - -Documentation Impact -==================== - -TripleO UI will have to be well-documented and meet OpenStack standards. -We will need both developer and deployment documentation. Documentation will -live in the tripleo-docs repository. - -References -========== - -[1] https://github.com/openstack/ironic-webclient -[2] https://review.openstack.org/#/c/230432 -[3] http://specs.openstack.org/openstack/tripleo-specs/specs/mitaka/tripleo-overcloud-deployment-library.html -[4] https://facebook.github.io/react/ -[5] https://facebook.github.io/flux/ diff --git a/specs/newton/metal-to-tenant.rst b/specs/newton/metal-to-tenant.rst deleted file mode 100644 index e5bbff5d..00000000 --- a/specs/newton/metal-to-tenant.rst +++ /dev/null @@ -1,220 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -Metal to Tenant: Ironic in Overcloud -==================================== - -https://blueprints.launchpad.net/tripleo/+spec/ironic-integration - -This blueprint adds support for providing bare metal machines to tenants by -integrating Ironic to the overcloud. - - -Problem Description -=================== - -There is an increasing interest in providing bare metal machines to tenants in -the overcloud in addition to or instead of virtual instances. One example is -Sahara: users hope to achieve better performance by removing the hypervisor -abstraction layer in order to eliminate the noisy neighbor effect. For that -purpose, the OpenStack Bare metal service (Ironic) provides an API and a Nova -driver to serve bare metal instances behind the same Nova and Neutron API's. -Currently however TripleO does not support installing and configuring Ironic -and Nova to serve bare metal instances to the tenant. - - -Proposed Change -=============== - -Composable Services -------------------- - -In the bare metal deployment case, the nova-compute service is only a thin -abstraction layer around the Ironic API. The actual compute instances in -this case are the bare metal nodes. Thus a TripleO deployment with support for -only bare metal nodes will not need dedicated compute nodes in the overcloud. -The overcloud nova-compute service will therefore be placed on controller nodes. - -New TripleO composable services will be created and optionally deployed on the -controller nodes: - -* ``OS::TripleO::Services::IronicApi`` will deploy the bare metal API. - -* ``OS::TripleO::Services::IronicNovaCompute`` will deploy nova compute - with Ironic as a back end. It will also configure the nova compute to use - `ClusteredComputeManager - `_ - provide by Ironic to work around inability to have several nova compute - instances configured with Ironic. - -* ``OS::TripleO::Services::IronicConductor`` will deploy a TFTP server, - an HTTP server (for an optional iPXE environment) and an ironic-conductor - instance. The ironic-conductor instance will not be managed by pacemaker - in the HA scenario, as Ironic has its own Active/Active HA model, - which spreads load on all active conductors using a hash ring. - - There is no public data on how many bare metal nodes each conductor - can handle, but the Ironic team expects an order of hundreds of nodes - per conductor. - -Since this feature is not a requirement in all deployments, this will be -opt-in by having a separate environment file. - -Hybrid Deployments ------------------- - -For hybrid deployments with both virtual and bare metal instances, we will use -Nova host aggregates: one for all bare metal hosts, the other for all virtual -compute nodes. This will prevent virtual instances being deployed on baremetal -nodes. Note that every bare metal machine is presented as a separate -Nova compute host. These host aggregates will always be created, even for -purely bare metal deployments, as users might want to add virtual computes -later. - -Networking ----------- - -As of Mitaka, Ironic only supports flat networking for all tenants and for -provisioning. The **recommended** deployment layout will consist of two networks: - -* The ``provisioning`` / ``tenant`` network. It must have access to the - overcloud Neutron service for DHCP, and to overcloud baremetal-conductors - for provisioning. - - .. note:: While this network can technically be the same as the undercloud - provisioning network, it's not recommended to do so due to - potential conflicts between various DHCP servers provided by - Neutron (and in the future by ironic-inspector). - -* The ``management`` network. It will contain the BMCs of bare metal nodes, - and it only needs access to baremetal-conductors. No tenant access will be - provided to this network. - - .. note:: Splitting away this network is not really required if tenants are - trusted (which is assumed in this spec) and BMC access is - reasonably restricted. - -Limitations ------------ - -To limit the scope of this spec the following definitely useful features are -explicitly left out for now: - -* ``provision`` <-> ``tenant`` network separation (not yet implemented by - ironic) - -* in-band inspection (requires ironic-inspector, which is not yet HA-ready) - -* untrusted tenants (requires configuring secure boot and checking firmwares, - which is vendor-dependent) - -* node autodiscovery (depends on ironic-inspector) - -Alternatives ------------- - -Alternatively, we could leave configuring a metal-to-tenant environment up to -the operator. - -We could also have it enabled by default, but most likely it won't be required -in most deployments. - -Security Impact ---------------- - -Most of the security implications have to be handled within Ironic. Eg. wiping -the hard disk, checking firmwares, etc. Ironic needs to be configured to be -able to run these jobs by enabling automatic cleaning during node lifecycle. -It is also worth mentioning that we will assume trusted tenants for these bare -metal machines. - -Other End User Impact ---------------------- - -The ability to deploy Ironic in the overcloud will be optional. - -Performance Impact ------------------- - -If enabled, TripleO will deploy additional services to the overcloud: - -* ironic-conductor - -* a TFTP server - -* an HTTP server - -None of these should have heavy performance requirements. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -None. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - ifarkas - -Other contributors: - dtantsur, lucasagomes, mgould, mkovacik - -Work Items ----------- - -when the environment file is included, make sure: - -* ironic is deployed on baremetal-conductor nodes - -* nova compute is deployed and correctly configured, including: - - * configuring Ironic as a virt driver - - * configuring ClusteredComputeManager - - * setting ram_allocation_ratio to 1.0 - -* host aggregates are created - -* update documentation - - -Dependencies -============ - -None. - - -Testing -======= - -This is testable in the CI with nested virtualization and tests will be added -to the tripleo-ci jobs. - - -Documentation Impact -==================== - -* Quick start documentation and a sample environment file will be provided. - -* Document how to enroll new nodes in overcloud ironic (including host - aggregates) - - -References -========== - -* `Host aggregates `_ diff --git a/specs/newton/os-net-config-teaming.rst b/specs/newton/os-net-config-teaming.rst deleted file mode 100644 index fbc40712..00000000 --- a/specs/newton/os-net-config-teaming.rst +++ /dev/null @@ -1,197 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -Add Adapter Teaming to os-net-config -==================================== - -https://blueprints.launchpad.net/os-net-config/+spec/os-net-config-teaming - -This spec describes adding features to os-net-config to support adapter teaming -as an option for bonded interfaces. Adapter teaming allows additional features -over regular bonding, due to the use of the teaming agent. - -Problem Description -=================== - -os-net-config supports both OVS bonding and Linux kernel bonding, but some -users want to use adapter teaming instead of bonding. Adapter teaming provides -additional options that bonds don't support, and do support almost all of the -options that are supported by bonds. - -Proposed Change -=============== - -Overview --------- - -Add a new class similar to the existing bond classes that allows for the -configuration of the teamd daemon through teamdctl. The syntax for the -configuration of the teams should be functionally similar to configuring -bonds. - -Alternatives ------------- - -We already have two bonding methods in use, the Linux bonding kernel module, -and Open vSwitch. However, adapter teaming is becoming a best practice, and -this change will open up that possibility. - -Security Impact ---------------- - -The end result of using teaming instead of other modes of bonding should be -the same from a security standpoint. Adapter teaming does not interfere with -iptables or selinux. - - -Other End User Impact ---------------------- - -Operators who are troubleshooting a deployment where teaming is used may need -to familiarize themselves with the teamdctl utility. - -Performance Impact ------------------- - -Using teaming rather than bonding will have a mostly positive impact on -performance. Teaming is very lightweight, and may use less CPU than other -bonding modes, especially OVS. Teaming has the following impacts: - -* Fine-grained control over load balancing hashing algorithms. - -* Port-priorities and stickyness - -* Per-port monitoring. - -Other Deployer Impact ---------------------- - -In TripleO, os-net-config has existing sample templates for OVS-mode -bonds and Linux bonds. There has been some discussion with Dan Prince -about unifying the bonding templates in the future. - -The type of bond could be set as a parameter in the NIC config -templates. To this end, it probably makes sense to make the teaming -configuration as similar to the bonding configurations as possible. - -Developer Impact ----------------- - -If possible, the configuration should be as similar to the bonding -configuration as possible. In fact, it might be treated as a different -form of bond, as long as the required metadata for teaming can be -provided in the options. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Dan Sneddon - -Work Items ----------- - -* Add teaming object and unit tests. - -* Configure sample templates to demonstrate usage of teaming. - -* Test TripleO with new version of os-net-config and adapter teaming configured. - -Configuration Example ---------------------- - -The following is an example of a teaming configuration that os-net-config -should be able to implement:: - - - - type: linux_team - name: team0 - bonding_options: "{"runner": {"name": "activebackup"}, "link_watch": {"name": "ethtool"}}" - addresses: - - - ip_subnet: 192.168.0.10/24 - members: - - - type: interface - name: eno2 - primary: true - - - type: interface - name: eno3 - -The only difference between a Linux bond configuration and an adapter team -configuration in the above example is the type (linux_team), and the content -of the bonding_options (bonding has a different format for options). - -Implementation Details ----------------------- - -os-net-config will have to configure the ifcfg files for the team. The ifcfg -format for team interfaces is documented here [1]. - -If an interface is marked as primary, then the ifcfg file for that interface -should list it at a higher than default (0) priority:: - - TEAM_PORT_CONFIG='{"prio": 100}' - -The mode is set in the runner: statement, as well as any settings that -apply to that teaming mode. - -We have the option of using strictly ifcfg files or using the ip utility -to influence the settings of the adapter team. It appears from the teaming -documentation that either approach will work. - -The proposed implementation [2] of adapter teaming for os-net-config uses -only ifcfg files to set the team settings, slave interfaces, and to -set the primary interface. The potential downside of this path is that -the interface must be shut down and restarted when config changes are -made, but that is consistent with the other device types in os-net-config. -This is probably acceptable, since network changes are made rarely and -are assumed to be disruptive to the host being reconfigured. - -Dependencies -============ - -* teamd daemon and teamdctl command-line utility must be installed. teamd is - not installed by default on RHEL/CENTOS, however, teamd is currently - included in the RDO overcloud-full image. This should be added ot the list - of os-net-config RPM dependencies. - -* For LACP bonds using 802.3ad, switch support will need to be configured and - at least two ports must be configured for LACP bonding. - - -Testing -======= - -In order to test this in CI, we would need to have an environment where we -have multiple physical NICs. Adapter teaming supports modes other than LACP, -so we could possibly get away with multiple links without any special -configuration. - - -Documentation Impact -==================== - -The deployment documentation will need to be updated to cover the use of -teaming. The os-net-config sample configurations will demonstrate the use -in os-net-config. TripleO Heat template examples should also help with -deployments using teaming. - - -References -========== - -* [1] - Documentation: Creating a Network Team Using ifcfg Files - https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Configure_a_Network_Team_Using-the_Command_Line.html#sec-Creating_a_Network_Team_Using_ifcfg_Files - -* [2] - Review: Add adapter teaming support using teamd for ifcfg-systems - https://review.openstack.org/#/c/339854/ \ No newline at end of file diff --git a/specs/newton/pacemaker-next-generation-architecture.rst b/specs/newton/pacemaker-next-generation-architecture.rst deleted file mode 100644 index 4cfa252e..00000000 --- a/specs/newton/pacemaker-next-generation-architecture.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================== -Pacemaker Next Generation Architecture -====================================== - -https://blueprints.launchpad.net/tripleo/+spec/ha-lightweight-architecture - -Change the existing HA manifests and templates to deploy a minimal pacemaker -architecture, where all the openstack services are started and monitored by -systemd with the exception of: VIPs/Haproxy, rabbitmq, redis and galera. - -Problem Description -=================== - -The pacemaker architecture deployed currently via -`puppet/manifests/overcloud_controller_pacemaker.pp` manages most -service on the controllers via pacemaker. This approach, while having the -advantage of having a single entity managing and monitoring all services, does -bring a certain complexity to it and assumes that the operators are quite -familiar with pacemaker and its management of resources. The aim is to -propose a new architecture, replacing the existing one, where pacemaker -controls the following resources: - -* Virtual IPs + HAProxy -* RabbitMQ -* Galera -* Redis -* openstack-cinder-volume (as the service is not A/A yet) -* Any future Active/Passive service - -Basically every service that is managed today by a specific resource agent -and not systemd, will be still running under pacemaker. The same goes -for any service (like openstack-cinder-volume) that need to be active/passive. - -Proposed Change -=============== - -Overview --------- - -Initially the plan was to create a brand new template implementing this -new HA architecture. After a few rounds of discussions within the TripleO -community, it has been decided to actually have a single HA architecture. -The main reasons for moving to a single next generation HA architecture are due to -the amount work needed to maintain two separate architectures and to the -fact that the previous HA architecture does not bring substantial advantages -over this next generation one. - -The new architecture will enable most services via systemd and will remove most -pacemaker resource definitions with their corresponding constraints. -In terms of ordering constraints we will go from a graph like this one: -http://acksyn.org/files/tripleo/wsgi-openstack-core.pdf (mitaka) - -to a graph like this one: -http://acksyn.org/files/tripleo/light-cib-nomongo.pdf (next-generation-mitaka) - -Once this new architecture is in place and we have tested it extensively, we -can work on the upgrade path from the previous fully-fledged pacemaker HA -architecture to this new one. Since the impact of pacemaker in the new -architecture is quite small, it is possible to consider dropping the non-ha -template in the future for every deployment and every CI job. The decision -on this can be taken in a later step, even post-newton. - -Another side-benefit is that with this newer architecture the -whole upgrade/update topic is much easier to manage with TripleO, -because there is less coordination needed between pacemaker, the update -of openstack services, puppet and the update process itself. - -Note that once composable service land, this next generation architecture will -merely consist of a single environment file setting some services to be -started via systemd, some via pacemaker and a bunch of environment variables -needed for the services to reconnect even when galera and rabbitmq are down. -All services that need to be started via systemd will be done via the default -state: -https://github.com/openstack/tripleo-heat-templates/blob/40ad2899106bc5e5c0cf34c40c9f391e19122a49/overcloud-resource-registry-puppet.yaml#L124 - -The services running via pacemaker will be explicitely listed in an -environment file, like here: -https://github.com/openstack/tripleo-heat-templates/blob/40ad2899106bc5e5c0cf34c40c9f391e19122a49/environments/puppet-pacemaker.yaml#L12 - -Alternatives ------------- - -There are many alternative designs for the HA architecture. The decision -to use pacemaker only for a certain set of "core" services and all the -Active/Passive services comes from a careful balance between complexity -of the architecture and its management and being able to recover resources -in a known broken state. There is a main assumption here about native -openstack services: - -They *must* be able to start when the broker and the database are down and keep -retrying. - -The reason for using only pacemaker for the core services and not, for -example keepalived for the Virtual IPs, is to keep the stack simple and -not introduce multiple distributed resource managers. Also, if we used -only keepalived, we'd have no way of recovering from a failure beyond -trying to relocate the VIP. - -The reason for keeping haproxy under pacemaker's management is that -we can guarantee that a VIP will always run where haproxy is running, -should an haproxy service fail. - - -Security Impact ---------------- - -No changes regarding security aspects compared to the existing status quo. - -Other End User Impact ---------------------- - -The operators working with a cloud are impacted in the following ways: - -* The services (galera, redis, openstack-cinder-volume, VIPs, - haproxy) will be managed as usual via `pcs`. Pacemaker will monitor these - services and provide their status via `pcs status`. - -* All other services will be managed via `systemctl` and systemd will be - configured to automatically restart a failed service. Note, that this is - already done in RDO with (Restart={always,on-failure}) in the service files. - It is a noop when pacemaker manages the service as an override file is - created by pacemaker: - - https://github.com/ClusterLabs/pacemaker/blob/master/lib/services/systemd.c#L547 - - With the new architecture, restarting a native openstack service across - all controllers will require restarting it via `systemctl` on each node (as opposed - to a single `pcs` command as it is done today) - -* All services will be configured to retry indefinitely to connect to - the database or to the messaging broker. In case of a controller failure, - the failover scenario will be the same as with the current HA architecture, - with the difference that the services will just retry to re-connect indefinitely. - -* Previously with the HA template every service would be monitored and managed by - pacemaker. With the split between openstack services being managed by systemd and - "core" services managed by pacemaker, the operator needs to know which service - to monitor with which command. - -Performance Impact ------------------- - -No changes compared to the existing architecture. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -In the future we might see if the removal of the non-HA template is feasible, -thereby simplifying our CI jobs and have single more-maintained template. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - michele - -Other contributors: - ... - - -Work Items ----------- - -* Prepare the roles that deploy the next generation architecture. Initially, - keep it as close as possible to the existing HA template and make it simpler - in a second iteration (remove unnecessary steps, etc.) Template currently - lives here and deploys successfully: - - https://review.openstack.org/#/c/314208/ - -* Test failure scenarios and recovery scenario, open bugs against services that - misbehave in the face of database and/or broker being down. - - -Dependencies -============ - -None - -Testing -======= - -Initial smoke-testing has been completed successfully. Another set of tests -focusing on the behaviour of openstack services when galera and rabbitmq are -down is in the process of being run. - -Particular focus will be on failover scenarios and recovery times and making -sure that there are no regressions compared to the current HA architecture. - - -Documentation Impact -==================== - -Currently we do not describe the architectures as deployed by TripleO itself, -so no changes needed. A short page in the docs describing the architecture -would be a nice thing to have in the future. - -References -========== - -This design came mostly out from a meeting in Brno with the following attendees: - -* Andrew Beekhof -* Chris Feist -* Eoghan Glynn -* Fabio Di Nitto -* Graeme Gillies -* Hugh Brock -* Javier Peña -* Jiri Stransky -* Lars Kellogg-Steadman -* Mark Mcloughlin -* Michele Baldessari -* Raoul Scarazzini -* Rob Young diff --git a/specs/newton/tripleo-lldp-validation.rst b/specs/newton/tripleo-lldp-validation.rst deleted file mode 100644 index b01198cd..00000000 --- a/specs/newton/tripleo-lldp-validation.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -TripleO LLDP Validation -========================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-lldp-validation - -The Link Layer Discovery Protocol (LLDP) is a vendor-neutral link layer -protocol in the Internet Protocol Suite used by network devices for -advertising their identity, capabilities, and neighbors on an -IEEE 802 local area network, principally wired Ethernet. [1] - -The Link Layer Discover Protocol (LLDP) helps identify layer 1/2 -connections between hosts and switches. The switch port, chassis ID, -VLANs trunked, and other info is available, for planning or -troubleshooting a deployment. For instance, a deployer may validate -that the proper VLANs are supplied on a link, or that all hosts -are connected to the Provisioning network. - -Problem Description -=================== - -A detailed description of the problem: - -* Deployment networking is one of the most difficult parts of any - OpenStack deployment. A single misconfigured port or loose cable - can derail an entire multi-rack deployment. - -* Given the first point, we should work to automate validation and - troubleshooting where possible. - -* Work is underway to collect LLDP data in ironic-python-agent, - and we have an opportunity to make that data useful [2]. - - -Proposed Change -=============== - -Overview --------- - -The goal is to expose LLDP data that is collected during -introspection, and provide this data in a format that is useful for the -deployer. This work depends on the LLDP collection work being done -in ironic-python-agent [3]. - -There is work being done to implement LLDP data collection for Ironic/ -Neutron integration. Although this work is primarily focused on features -for bare-metal Ironic instances, there will be some overlap with the -way TripleO uses Ironic to provision overcloud servers. - -Alternatives ------------- - -There are many network management utilities that use CDP or LLDP data to -validate the physical networking. Some of these are open source, but none -are integrated with OpenStack. - -Alternative approaches that do not use LLDP are typically vendor-specific -and require specific hardware support. Cumulus has a solution which works -with multiple vendors' hardware, but that solution requires running their -custom OS on the Ethernet switches. - -Another approach which is common is to perform collection of the switch -configurations to a central location, where port configurations can be -viewed, or in some cases even altered and remotely pushed. The problem -with this approach is that the switch configurations are hardware and -vendor-specific, and typically a network engineer is required to read -and interpret the configuration. A unified approach that works for all -common switch vendors is preferred, along with a unified reporting format. - -Security Impact ---------------- - -The physical network report provides a roadmap to the underlying network -structure. This could prove handy to an attacker who was unaware of the -existing topology. On the other hand, the information about physical -network topology is less valuable than information about logical topology -to an attacker. LLDP contains some information about both physical and -logical topology, but the logical topology is limited to VLAN IDs. - -The network topology report should be considered sensitive but not -critical. No credentials or shared secrets are revealed in the data -collected by ironic-inspector. - -Other End User Impact ---------------------- - -This report will hopefully reduce the troubleshooting time for nodes -with failed network deployments. - -Performance Impact ------------------- - -If this report is produced as part of the ironic-inspector workflow, -then it will increase the time taken to introspect each node by a -negligible amount, perhaps a few seconds. - -If this report is called by the operator on demand, it will have -no performance impact on other components. - -Other Deployer Impact ---------------------- - -Deployers may want additional information than the per-node LLDP report. -There may be some use in providing aggregate reports, such as the number -of nodes with a specific configuration of interfaces and trunked VLANs. -This would help to highlight outliers or misconfigured nodes. - -There have been discussions about adding automated switch configuration -in TripleO. This would be a mechanism whereby deployers could produce the -Ethernet switch configuration with a script based on a configuration -template. The deployer would provide specifics like the number of nodes -and the configuration per node, and the script would generate the switch -configuration to match. In that case, the LLDP collection and analysis -would function as a validator for the automatically generated switch -port configurations. - -Developer Impact ----------------- - -The initial work will be to fill in fixed fields such as Chassis ID -and switch port. An LLDP packet can contain additional data on a -per-vendor basis, however. - -The long-term plan is to store the entire LLDP packet in the -metadata. This will have to be parsed out. We may have to work with -switch vendors to figure out how to interpret some of the data if -we want to make full use of it. - -Implementation -============== - -Some notes about implementation: - -* This Python tool will access the introspection data and produce - reports on various information such as VLANs per port, host-to-port - mapping, and MACs per host. - -* The introspection data can be retrieved with the Ironic API [4] [5]. - -* The data will initially be a set of fixed fields which are retrievable - in the JSON in the Ironic introspection data. Later, the entire - LLDP packet will be stored, and will need to be parsed outisde of the - Ironic API. - -* Although the initial implementation can return a human-readable report, - other outputs should be available for automation, such as YAML. - -* The tool that produces the LLDP report should be able to return data - on a single host, or return all of the data. - -* Some basic support for searching would be a nice feature to have. - -* This data will eventually be used by the GUI to display as a validation - step in the deployment workflow. - -Assignee(s) ------------ - -Primary assignee: - dsneddon - -Other contributors: - bfournie - -Work Items ----------- - -* Create the Python script to grab introspection data from Swift using - the API. - -* Create the Python code to extract the relevant LLDP data from the - data JSON. - -* Implement per-node reports - -* Implement aggregate reports - -* Interface with UI developers to give them the data in a form that can - be consumed and presented by the TripleO UI. - -* In the future, when the entire LLDP packet is stored, refactor logic - to take this into account. - -Testing -======= - -Since this is a report that is supposed to benefit the operator, perhaps -the best way to include it in CI is to make sure that the report gets -logged by the Undercloud. Then the report can be reviewed in the log -output from the CI run. - -In fact, this might benefit the TripleO CI process, since hardware issues -on the network would be easier to troubleshoot without having access to -the bare metal console. - - -Documentation Impact -==================== - -Documentation will need to be written to cover making use of the new -LLDP reporting tool. This should cover running the tool by hand and -interpreting the data. - - -References -========== -* [1] - Wikipedia entry on LLDP: - https://en.wikipedia.org/wiki/Link_Layer_Discovery_Protocol - -* [2] - Blueprint for Ironic/Neutron integration: - https://blueprints.launchpad.net/ironic/+spec/ironic-ml2-integration - -* [3] - Review: Support LLDP data as part of interfaces in inventory - https://review.openstack.org/#/c/320584/ - -* [4] - Accessing Ironic Introspection Data - http://tripleo.org/advanced_deployment/introspection_data.html - -* [5] - Ironic API - Get Introspection Data - http://docs.openstack.org/developer/ironic-inspector/http-api.html#get-introspection-data \ No newline at end of file diff --git a/specs/newton/tripleo-opstools-availability-monitoring.rst b/specs/newton/tripleo-opstools-availability-monitoring.rst deleted file mode 100644 index d9924b4d..00000000 --- a/specs/newton/tripleo-opstools-availability-monitoring.rst +++ /dev/null @@ -1,186 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================ -Enable deployment of availability monitoring -============================================ - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-opstools-availability-monitoring - -TripleO should be deploying out-of-the-box availability monitoring solution -to serve the overcloud. - -Problem Description -=================== - -Currently there is no such feature implemented except for possibility to deploy -sensu-server, sensu-api and uchiwa (Sensu dashboard) services in the undercloud -stack. Without sensu-client services deployed on overcloud nodes this piece -of code is useless. Due to potential of high resource consumption it is also -reasonable to remove current undercloud code to avoid possible problems -when high number of overcloud nodes is being deployed. - -Instead sensu-server, sensu-api and uchiwa should be deployed on the separate -node(s) whether it is on the undercloud level or on the overcloud level. -And so sensu-client deployment support should be flexible enough to enable -connection to external monitoring infrastructure or with Sensu stack deployed -on the dedicated overcloud node. - -Summary of use cases: - -1. sensu-server, sensu-api and uchiwa deployed in external infrastructure; -sensu-client deployed on each overcloud node -2. sensu-server, sensu-api and uchiwa deployed as a separate Heat stack in -the overcloud stack; sensu-client deployed on each overcloud node - -Proposed Change -=============== - -Overview --------- - -The sensu-client service will be deployed as a composable service on -the overcloud stack when it is explicitly stated via environment file. -Sensu checks will have to be configured as subscription checks (see [0] -for details). Each composable service will have it's own subscription string, -which will ensure that checks defined on Sensu server node (wherever it lives) -are run on the correct overcloud nodes. - -There will be implemented a possibility to deploy sensu-server, sensu-api -and uchiwa services on a stand alone node deployed by the undercloud. -This standalone node will have a dedicated purpose for monitoring -(not only for availability monitoring services, but in future also for -centralized logging services or performance monitoring services) - -The monitoring node will be deployed as a separate Heat stack to the overcloud -stack using Puppet and composable roles for required services. - -Alternatives ------------- - -None - -Security Impact ---------------- - -Additional service (sensu-client) will be installed on all overcloud nodes. -These services will have open connection to RabbitMQ instance running -on monitoring node and are used to execute commands (checks) on the overcloud -nodes. Check definition will live on the monitoring node. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -We might consider deploying separate RabbitMQ and Redis for monitoring purposes -if we want to avoid influencing OpenStack deployment in the overcloud. - -Other Deployer Impact ---------------------- - -* Sensu clients will be deployed by default on all overcloud nodes except the monitoring node. -* New Sensu common parameters: - - * MonitoringRabbitHost - - * RabbitMQ host Sensu has to connect to - - * MonitoringRabbitPort - - * RabbitMQ port Sensu has to connect to - - * MonitoringRabbitUseSSL - - * Whether Sensu should connect to RabbitMQ using SSL - - * MonitoringRabbitPassword - - * RabbitMQ password used for Sensu to connect - - * MonitoringRabbitUserName - - * RabbitMQ username used for Sensu to connect - - * MonitoringRabbitVhost - - * RabbitMQ vhost used for monitoring purposes. - -* New Sensu server/API parameters - - * MonitoringRedisHost - - * Redis host Sensu has to connect to - - * MonitoringRedisPassword - - * Redis password used for Sensu to connect - - * MonitoringChecks: - - * Full definition (for all subscriptions) of checks performed by Sensu - -* New parameters for subscription strings for each composable service: - - * For example for service nova-compute MonitoringSubscriptionNovaCompute, which will default to 'overcloud-nova-compute' - - -Developer Impact ----------------- - -Support for new node type should be implemented for tripleo-quickstart. - -Implementation -============== - -Assignee(s) ------------ - -Martin Mágr - -Work Items ----------- - -* puppet-tripleo profile for Sensu services -* puppet-tripleo profile for Uchiwa service -* tripleo-heat-templates composable service for sensu-client deployment -* tripleo-heat-templates composable service for sensu-server deployment -* tripleo-heat-templates composable service for sensu-api deployment -* tripleo-heat-templates composable service for uchiwa deployment -* Support for monitoring node in tripleo-quickstart -* Revert patch(es) implementing Sensu support in instack-undercloud - -Dependencies -============ - -* Puppet module for Sensu services: sensu-puppet [1] -* Puppet module for Uchiwa: puppet-uchiwa [2] -* CentOS Opstools SIG repo [3] - -Testing -======= - -Sensu client deployment will be tested by current TripleO CI as soon as -the patch is merged, as it will be deployed by default. - -We should consider creating CI job for deploying overcloud with monitoring -node to test the rest of the monitoring components. - -Documentation Impact -==================== - -Process of creating new node type and new options will have to be documented. - -References -========== - -[0] https://sensuapp.org/docs/latest/reference/checks.html#subscription-checks -[1] https://github.com/sensu/sensu-puppet -[2] https://github.com/Yelp/puppet-uchiwa -[3] https://wiki.centos.org/SpecialInterestGroup/OpsTools diff --git a/specs/newton/tripleo-opstools-centralized-logging.rst b/specs/newton/tripleo-opstools-centralized-logging.rst deleted file mode 100644 index 673b5436..00000000 --- a/specs/newton/tripleo-opstools-centralized-logging.rst +++ /dev/null @@ -1,147 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================== -Enable deployment of centralized logging -======================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-opstools-centralized-logging - -TripleO should be deploying with an out-of-the-box centralized logging -solution to serve the overcloud. - -Problem Description -=================== - -With a complex distributed system like OpenStack, identifying and -diagnosing a problem may require tracking a transaction across many -different systems and many different logfiles. In the absence of a -centralized logging solution, this process is frustrating to both new -and experienced operators and can make even simple problems hard to -diagnose. - -Proposed Change -=============== - -We will deploy the Fluentd_ service in log collecting mode as a -composable service on all nodes in the overcloud stack when configured -to do so by the environment. Each composable service will have its -own fluentd source configuration. - -.. _fluentd: http://www.fluentd.org/ - -To receive these messages, we will deploy a centralized logging system -running Kibana_, Elasticsearch_ and Fluentd on dedicated nodes to -provide log aggregation and analysis. This will be deployed in a -dedicated Heat stack that is separate from the overcloud stack using -composable roles. - -.. _kibana: https://www.elastic.co/products/kibana -.. _elasticsearch: https://www.elastic.co/ - -We will also support sending messages to an external Fluentd -instance not deployed by tripleo. - -Summary of use cases --------------------- - -1. Elasticsearch, Kibana and Fluentd log relay/transformer deployed as - a separate Heat stack in the overcloud stack; Fluentd log - collector deployed on each overcloud node - -2. ElasticSearch, Kibana and Fluentd log relay/transformer deployed in - external infrastructure; Fluentd log collector deployed on each - overcloud node - -Alternatives ------------- - -None - -Security Impact ---------------- - -Data collected from the logs of OpenStack services can contain -sensitive information: - -- Communication between the - fluentd agent and the log aggregator should be protected with SSL. - -- Access to the Kibana UI must have at least basic HTTP - authentication, and client access should be via SSL. - -- ElasticSearch should only allow collections over ``localhost``. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -Additional resources will be required for running Fluentd on overcloud -nodes. Log traffic from the overcloud nodes to the log aggregator -will consume some bandwidth. - -Other Deployer Impact ---------------------- - -- Fluentd will be deployed on all overcloud nodes. -- New parameters for configuring Fluentd collector. -- New parameters for configuring log collector (Fluentd, - ElasticSearch, and Kibana) - -Developer Impact ----------------- - -Support for the new node type should be implemented for tripleo-quickstart. - -Implementation -============== - -Assignee(s) ------------ - -Martin Mágr -Lars Kellogg-Stedman - -Work Items ----------- - -- puppet-tripleo profile for fluentd service -- tripleo-heat-templates composable role for FluentD collector deployment -- tripleo-heat-templates composable role for FluentD aggregator deployment -- tripleo-heat-templates composable role for ElasticSearch deployment -- tripleo-heat-templates composable role for Kibana deployment -- Support for logging node in tripleo-quickstart - -Dependencies -============ - -- Puppet module for Fluentd: `konstantin-fluentd` [1] -- Puppet module for ElasticSearch `elasticsearch-elasticsearch` [2] -- Puppet module for Kibana (tbd) -- CentOS Opstools SIG package repository - -Testing -======= - -Fluentd client deployment will be tested by current TripleO CI as soon as -the patch is merged. Because the centralized logging features will not -be enabled by default we may need to introduce specific tests for -these features. - -Documentation Impact -==================== - -Process of creating new node type and new options will have to be documented. - -References -========== - -[1] https://forge.puppet.com/srf/fluentd -[2] https://forge.puppet.com/elasticsearch/elasticsearch diff --git a/specs/newton/tripleo-ovs-dpdk.rst b/specs/newton/tripleo-ovs-dpdk.rst deleted file mode 100644 index 24a7fa4a..00000000 --- a/specs/newton/tripleo-ovs-dpdk.rst +++ /dev/null @@ -1,232 +0,0 @@ - -This work is licensed under a Creative Commons Attribution 3.0 Unported -License. - -http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Adding OVS-DPDK to Tripleo -========================================== - -Blueprint URL - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ovs-dpdk - -DPDK is a set of libraries and drivers for fast packet processing and gets as -close to wire-line speed as possible for virtual machines. - - * It is a complete framework for fast packet processing in data plane - applications. - - * Directly polls the data from the NIC. - - * Does not use interrupts - to prevent performance overheads. - - * Uses the hugepages to preallocate large regions of memory, which allows the - applications to DMA data directly into these pages. - - * DPDK also has its own buffer and ring management systems for handling - sk_buffs efficiently. - -DPDK provides data plane libraries and NIC drivers for - - - * Queue management with lockless queues. - - * Buffer manager with pre-allocated fixed size buffers. - - * PMD (poll mode drivers) to work without asynchronous notifications. - - * Packet framework (set of libraries) to help data plane packet processing. - - * Memory manager - allocates pools of memory, uses a ring to store free - objects. - -Problem Description -=================== - -* Today the installation and configuration of OVS+DPDK in openstack is done - manually after overcloud deployment. This can be very challenging for the - operator and tedious to do over a large number of compute nodes. - The installation of OVS+DPDK needs be automated in tripleo. - -* Identification of the hardware capabilities for DPDK were all done manually - today and the same shall be automated during introspection. This hardware - detection also provides the operator with the data needed for configuring - Heat templates. - -* As of today its not possible to have the co-existence of compute nodes with - DPDK enabled hardware and without DPDK enabled hardware. - - -Proposed Change -=============== - -* Ironic Python Agent shall discover the below hardware details and store it - in swift blob - - - * CPU flags for hugepages support - - If pse exists then 2MB hugepages are supported - If pdpe1gb exists then 1GB hugepages are supported - - * CPU flags for IOMMU - - If VT-d/svm exists, then IOMMU is supported, provided IOMMU support is - enabled in BIOS. - - * Compatible nics - - Shall compare it with the list of NICs whitelisted for DPDK. The DPDK - supported NICs are available at http://dpdk.org/doc/nics - - The nodes without any of the above mentioned capabilities can't be used for - COMPUTE role with DPDK. - -* Operator shall have a provision to enable DPDK on compute nodes - -* The overcloud image for the nodes identified to be COMPUTE capable and having - DPDK NICs, shall have the OVS+DPDK package instead of OVS. It shall also have - packages dpdk and driverctl. - -* The device names of the DPDK capable NIC’s shall be obtained from T-H-T. - The PCI address of DPDK NIC needs to be identified from the device name. - It is required for whitelisting the DPDK NICs during PCI probe. - -* Hugepages needs to be enabled in the Compute nodes with DPDK. - Bug: https://bugs.launchpad.net/tripleo/+bug/1589929 needs to be implemeted - -* CPU isolation needs to be done so that the CPU cores reserved for DPDK Poll - Mode Drivers (PMD) are not used by the general kernel balancing, - interrupt handling and scheduling algorithms. - Bug: https://bugs.launchpad.net/tripleo/+bug/1589930 needs to be implemented. - -* On each COMPUTE node with DPDK enabled NIC, puppet shall configure the - DPDK_OPTIONS for whitelisted NIC's, CPU mask and number of memory channels - for DPDK PMD. The DPDK_OPTIONS needs to be set in /etc/sysconfig/openvswitch - -* Os-net-config shall - - - * Associate the given interfaces with the dpdk drivers (default as vfio-pci - driver) by identifying the pci address of the given interface. The - driverctl shall be used to bind the driver persistently - - * Understand the ovs_user_bridge and ovs_dpdk_port types and configure the - ifcfg scripts accordingly. - - * The "TYPE" ovs_user_bridge shall translate to OVS type OVSUserBridge and - based on this OVS will configure the datapath type to 'netdev'. - - * The "TYPE" ovs_dpdk_port shall translate OVS type OVSDPDKPort and based on - this OVS adds the port to the bridge with interface type as 'dpdk' - - * Understand the ovs_dpdk_bond and configure the ifcfg scripts accordingly. - -* On each COMPUTE node with DPDK enabled NIC, puppet shall - - - * Enable OVS+DPDK in /etc/neutron/plugins/ml2/openvswitch_agent.ini - [OVS] - datapath_type=netdev - vhostuser_socket_dir=/var/run/openvswitch - - * Configure vhostuser ports in /var/run/openvswitch to be owned by qemu. - -* On each controller node, puppet shall - - - * Add NUMATopologyFilter to scheduler_default_filters in nova.conf. - -Alternatives ------------- - -* The boot parameters could be configured via puppet (during overcloud - deployment) as well as virt-customize (after image building or downloading). - The choice of selection of boot parameter is moved out of scope of this - blueprint and would be tracked via - https://bugs.launchpad.net/tripleo/+bug/1589930. - -Security impact ---------------- - -* We have no firewall drivers which support ovs-dpdk at present. Security group - support with conntrack is a possible option, and this work is in progress. - Security groups will not be supported. - - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -* OVS-DPDK can augment 3 times dataplane performance. - Refer http://goo.gl/Du1EX2 - -Other Deployer Impact ---------------------- - -* The operator shall ensure that the VT-d/IOMMU virtualization technology is - enabled in BIOS of the compute nodes. - -* Post deployment, operator shall modify the VM flavors for using hugepages, - CPU pinning - Ex: nova flavor-key m1.small set "hw:mem_page_size=large" - - -Developer Impact ----------------- - -None - -Implementation -============== - - -Assignee(s) ------------ - -Primary assignees: - -* karthiks -* sanjayu - -Work Items ----------- - -* The proposed changes discussed earlier will be the work items - -Dependencies -============ - -* We are dependent on composable roles, as this is something we would - require only on specific compute nodes and not generally on all the nodes. - -* To enable Hugepages, bug: https://bugs.launchpad.net/tripleo/+bug/1589929 - needs to be implemeted - -* To address boot parameter changes for CPU isolation, - bug: https://bugs.launchpad.net/tripleo/+bug/1589930 needs to be implemented - -Testing -======= - -* Since DPDK needs specific hardware support, this feature cant be tested under - CI. We will need third party CI for validating it. - -Documentation Impact -==================== - -* Manual steps that needs to be done by the operator shall be documented. - Ex: configuring BIOS for VT-d, adding boot parameter for CPU isolation, - hugepages, post deploymenent configurations. - -Refrences -========= - -* Manual steps to setup DPDK in RedHat Openstack Platform 8 - https://goo.gl/6ymmJI - -* Setup procedure for CPU pinning and NUMA topology - http://goo.gl/TXxuhv - -* DPDK supported NICS - http://dpdk.org/doc/nics - - - diff --git a/specs/newton/tripleo-sriov.rst b/specs/newton/tripleo-sriov.rst deleted file mode 100644 index 78644a5a..00000000 --- a/specs/newton/tripleo-sriov.rst +++ /dev/null @@ -1,250 +0,0 @@ -This work is licensed under a Creative Commons Attribution 3.0 Unported -License. - -http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Adding SR-IOV to Tripleo -========================================== - -Blueprint URL: - https://blueprints.launchpad.net/tripleo/+spec/tripleo-sriov - -SR-IOV is a specification that extends the PCI Express specification and allows -a PCIe device to appear to be multiple separate physical PCIe devices. - -SR-IOV provides one or more Virtual Functions (VFs) and a Physical Function(PF) - - * Virtual Functions (VF's) are ‘lightweight’ PCIe functions that contain the - resources necessary for data movement but have a carefully minimized set - of configuration resources. - - * Physical Function (PF) are full PCIe functions that include the SR-IOV - Extended Capability. This capability is used to configure and manage - the SR-IOV functionality. - -The VF’s could be attached to VMs like a dedicated PCIe device and thereby the -usage of SR-IOV NICs boosts the networking performance considerably. - - -Problem Description -=================== - -* Today the installation and configuration of SR-IOV feature is done manually - after overcloud deployment. It shall be automated via tripleo. - -* Identification of the hardware capabilities for SR-IOV were all done manually - today and the same shall be automated during introspection. The hardware - detection also provides the operator, the data needed for configuring Heat - templates. - -Proposed Change -=============== - -Overview --------- - -* Ironic Python Agent will discover the below hardware details and store it in - swift blob - - - * SR-IOV capable NICs: - Shall read /sys/bus/pci/devices/.../sriov_totalvfs and check if its non - zero, inorder to identify if the NIC is SR-IOV capable - - * VT-d or IOMMU support in BIOS: - The CPU flags shall be read to identify the support. - -* DIB shall include the package by default - openstack-neutron-sriov-nic-agent. - -* The nodes without any of the above mentioned capabilities can't be used for - COMPUTE role with SR-IOV - -* SR-IOV drivers shall be loaded during bootup via persistent module loading - scripts. These persistent module loading scripts shall be created by the - puppet manifests. - -* T-H-T shall provide the below details - - * supported_pci_vendor_devs - configure the vendor-id/product-id couples in - the nodes running neutron-server - - * max number of vf's - persistent across reboots - - * physical device mappings - Add physical device mappings ml2_conf_sriov.ini - file in compute node - -* On the nodes running the Neutron server, puppet shall - - * enable sriovnicswitch in the /etc/neutron/plugin.ini file - mechanism_drivers = openvswitch,sriovnicswitch - This configuration enables the SR-IOV mechanism driver alongside - OpenvSwitch. - - * Set the VLAN range for SR-IOV in the file /etc/neutron/plugin.ini, present - in the network node - network_vlan_ranges = : - : Ex : network_vlan_ranges = fabric0:15:20 - - * Configure the vendor-id/product-id couples if it differs from - “15b3:1004,8086:10ca” in /etc/neutron/plugins/ml2/ml2_conf_sriov.ini - supported_pci_vendor_devs = 15b3:1004,8086:10ca, - - * Configure neutron-server.service to use the ml2_conf_sriov.ini file - [Service] Type=notify User=neutron ExecStart=/usr/bin/neutron-server - --config-file /usr/share/neutron/neutron-dist.conf --config-file - /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini - --config-file /etc/neutron/plugins/ml2/ml2_conf_sriov.ini --log-file - /var/log/neutron/server.log - -* In the nodes running nova scheduler, puppet shall - - * add PciPassthroughFilter filter to the list of scheduler_default_filters. - This needs to be done to allow proper scheduling of SR-IOV devices - -* On each COMPUTE+SRIOV node, puppet shall configure /etc/nova/nova.conf - - * Associate the available VFs with each physical network - Ex: pci_passthrough_whitelist={"devname": "enp5s0f1", - "physical_network":"fabric0"} - - PCI passthrough whitelist entries use the following syntax: ["device_id": - "",] ["product_id": "",] ["address": - "[[[[]:]]:][][.[]]" | "devname": "Ethernet - Interface Name",] "physical_network":"Network label string" - - VF's that needs to be excluded from agent configuration shall be added in - [sriov_nic]/exclude_devices. T-H-T shall configure this. - - Multiple whitelist entries per host are supported. - -* Puppet shall - - * Setup max number of VF's to be configured by the operator - echo required_max_vfs > /sys/bus/pci/devices/.../sriov_numvfs - Puppet will also validate the required_max_vfs, so that it does not go - beyond the supported max on the device. - - * Enable NoopFirewallDriver in the - '/etc/neutron/plugins/ml2/sriov_agent.ini' file. - - [securitygroup] - firewall_driver = neutron.agent.firewall.NoopFirewallDriver - - * Add mappings to the /etc/neutron/plugins/ml2/sriov_agent.ini file. Ex: - physical_device_mappings = fabric0:enp4s0f1 - In this example, fabric0 is the physical network, and enp4s0f1 is the - physical function - -* Puppet shall start the SR-IOV agent on Compute - - * systemctl enable neutron-sriov-nic-agent.service - - * systemctl start neutron-sriov-nic-agent.service - - -Alternatives ------------- - -None - -Security impact ---------------- - -* We have no firewall drivers which support SR-IOV at present. - Security groups will be disabled only for SR-IOV ports in compute hosts. - - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -* SR-IOV provides near native I/O performance for each virtual machine on a - physical server. Refer - http://goo.gl/HxZvXX - - -Other Deployer Impact ---------------------- - -* The operator shall ensure that the BIOS supports VT-d/IOMMU virtualization - technology on the compute nodes. - -* IOMMU needs to be enabled in the Compute+SR-IOV nodes. Boot parameters - (intel_iommu=on or amd_iommu=pt) shall be added in the grub.conf, using the - first boot scripts (THT). - -* Post deployment, operator shall - - * Create neutron ports prior to creating VM’s (nova boot) - neutron port-create fabric0_0 --name sr-iov --binding:vnic-type direct - - * Create the VM with the required flavor and SR-IOV port id - Ex: nova boot --flavor m1.small --image --nic port-id= - vnf0 - -Developer Impact ----------------- - -None - -Implementation -============== - - -Assignee(s) ------------ - -Primary assignees: - -* karthiks -* sanjayu - - -Work Items ----------- - -* Documented above in the Proposed changes - - -Dependencies -============ - -* We are dependent on composable roles as SR-IOV specific changes is something - we would require on specific compute nodes and not generally on all the - nodes. Blueprint - - https://blueprints.launchpad.net/tripleo/+spec/composable-services-within-roles - -Testing -======= - -* Since SR-IOV needs specific hardware support, this feature cant be tested - under CI. We will need third party CI for validating it. - -Documentation Impact -==================== - -* Manual steps that needs to be done by the operator shall be documented. - Ex: configuring BIOS for VT-d, IOMMU, post deploymenent configurations. - -Refrences -========= - -* SR-IOV support for virtual networking - https://goo.gl/eKP1oO - -* Enable SR-IOV functionality available in OpenStack - http://docs.openstack.org/liberty/networking-guide/adv_config_sriov.html - -* Introduction to SR-IOV - http://goo.gl/m7jP3 - -* Setup procedure for CPU pinning and NUMA topology - http://goo.gl/TXxuhv - -* /sys/bus/pci/devices/.../sriov_totalvfs - This file appears when a physical - PCIe device supports SR-IOV. - https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-bus-pci - diff --git a/specs/newton/undercloud-upgrade.rst b/specs/newton/undercloud-upgrade.rst deleted file mode 100644 index d6866b8c..00000000 --- a/specs/newton/undercloud-upgrade.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================== -Undercloud Upgrade -================== - -https://blueprints.launchpad.net/tripleo/+spec/undercloud-upgrade - -Our currently documented upgrade path for the undercloud is very problematic. -In fact, it doesn't work. A number of different patches are attempting to -address this problem (see the `References`_ section), but they all take slightly -different approaches that are not necessarily compatible with each other. - -Problem Description -=================== - -The undercloud upgrade must be carefully orchestrated. A few of the problems -that can be encountered during an undercloud upgrade if things are not done -or not done in the proper order: - -#. Services may fail and get stuck in a restart loop - -#. Service databases may not be properly upgraded - -#. Services may fail to stop and prevent the upgraded version from starting - -Currently there is not agreement over who should be responsible for running -the various steps of the undercloud upgrade. Getting everyone on the same -page regarding this is the ultimate goal of this spec. - -Also of note is the MariaDB major version update flow from -`Upgrade documentation (under and overcloud)`_. This will need to be -addressed as part of whatever upgrade solution we decide to pursue. - -Proposed Change -=============== - -I'm going to present my proposed solution here, but will try to give a fair -overview of the other proposals in the `Alternatives`_ section. Others -should feel free to push modifications or follow-ups if I miss anything -important, however. - -Overview --------- - -Services must be stopped before their respective package update is run. -This is because the RPM specs for the services include a mandatory restart to -ensure that the new code is running after the package is updated. On a major -version upgrade, this can and does result in broken services because the config -files are not always forward compatible, so until Puppet is run again to -configure them appropriately the service cannot start. The broken services -can cause other problems as well, such as the yum update taking an excessively -long time because it times out waiting for the service to restart. It's worth -noting that this problem does not exist on an HA overcloud because Pacemaker -stubs out the service restarts in the systemd services so the package update -restart becomes a noop. - -Because the undercloud is not required to have extremely high uptime, I am in -favor of just stopping all of the services, updating all the packages, then -re-running the undercloud install to apply the new configs and start the -services again. This ensures that the services are not restarted by the -package update - which only happens if the service was running at the time of -the update - and that there is no chance of an old version of a service being -left running and interfering with the new version, as can happen when moving -a service from a standalone API process to httpd. - -instack-undercloud will be responsible for implementing the process described -above. However, to avoid complications with instack-undercloud trying to -update itself, tripleoclient will be responsible for updating -instack-undercloud and its dependencies first. This two-step approach -should allow us to sanely use an older tripleoclient to run the upgrade -because the code in the client will be minimal and should not change from -release to release. Upgrade-related backports to stable clients should not -be needed in any foreseeable case. Any potential version-specific logic can -live in instack-undercloud. The one exception being that we may need to -initially backport this new process to the previous stable branch so we can -start using it without waiting an entire cycle. Since the current upgrade -process does not work correctly there, I think this would be a valid bug fix -backport. - -A potential drawback of this approach is that it will not automatically -trigger the Puppet service db-syncs because Puppet is not aware that the -version has changed if we update the packages separately. However, I feel -this is a case we need to handle sanely anyway in case a package is updated -outside Puppet either intentionally or accidentally. To that end, we've -already merged a patch to always run db-syncs on the undercloud since they're -idempotent anyway. See `Stop all services before upgrading`_ for a link to -the patch. - -MariaDB -------- - -Regarding the MariaDB issue mentioned above, I believe that regardless of the -approach we take, we should automate the dump and restore of the database as -much as possible. Either solution should be able to look at the version of -mariadb before yum update and the version after, and decide whether the db -needs to be dumped. If a user updates the package manually outside the -undercloud upgrade flow then they will be responsible for the db upgrade -themselves. I think this is the best we can do, short of writing some sort -of heuristic that can figure out whether the existing db files are for an -older version of MariaDB and doing the dump/restore based on that. - -Updates vs. Upgrades --------------------- - -I am also proposing that we not differentiate between minor updates and major -upgrades on the undercloud. Because we don't need to be as concerned with -uptime there, any additional time required to treat all upgrades as a -potential major version upgrade should be negligible, and it avoids us -having to maintain and test multiple paths. - -Additionally, the difference between a major and minor upgrade becomes very -fuzzy for anyone upgrading between versions of master. There may be db -or rpc changes that require the major upgrade flow anyway. Also, the whole -argument assumes we can even come up with a sane, yet less-invasive update -strategy for the undercloud anyway, and I think our time is better spent -elsewhere. - -Alternatives ------------- - -As shown in `Don't update whole system on undercloud upgrade`_, another -option is to limit the manual yum update to just instack-undercloud and make -Puppet responsible for updating everything else. This would allow Puppet -to handle all of the upgrade logic internally. As of this writing, there is -at least one significant problem with the patch as proposed because it does -not update the Puppet modules installed on the undercloud, which leaves us -in a chicken and egg situation with a newer instack-undercloud calling older -Puppet modules to run the update. I believe this could be solved by also -updating the Puppet modules along with instack-undercloud. - -Drawbacks of this approach would be that each service needs to be orchestrated -correctly in Puppet (this could also be a feature, from a Puppet CI -perspective), and it does not automatically handle things like services moving -from standalone to httpd. This could be mitigated by the undercloud upgrade -CI job catching most such problems before they merge. - -I still personally feel this is more complicated than the proposal above, but -I believe it could work, and as noted could have benefits for CI'ing upgrades -in Puppet modules. - -There is one other concern with this that is less a functional issue, which is -that it significantly alters our previous upgrade methods, and might be -problematic to backport as older versions of instack-undercloud were assuming -an external package update. It's probably not an insurmountable obstacle, but -I do feel it's worth noting. Either approach is going to require some amount -of backporting, but this may require backporting in non-tripleo Puppet modules -which may be more difficult to do. - -Security Impact ---------------- - -No significant security impact one way or another. - -Other End User Impact ---------------------- - -This will likely have an impact on how a user runs undercloud upgrades, -especially compared to our existing documented upgrade method. -Ideally all of the implementation will happen behind the ``openstack undercloud -upgrade`` command regardless of which approach is taken, but even that is a -change from before. - -Performance Impact ------------------- - -The method I am suggesting can do an undercloud upgrade in 20-25 -minutes end-to-end in a scripted CI job. - -The performance impact of the Puppet approach is unknown to me. - -The performance of the existing method where service packages are updated with -the service still running is terrible - upwards of two hours for a full -upgrade in some cases, assuming the upgrade completes at all. This is largely -due to the aforementioned problem with services restarting before their config -files have been updated. - -Other Deployer Impact ---------------------- - -Same as the end user impact. In this case I believe they're the same person. - -Developer Impact ----------------- - -Discussed somewhat in the proposals, but I believe my approach is a little -simpler from the developer perspective. They don't have to worry about the -orchestration of the upgrade, they only have to provide a valid configuration -for a given version of OpenStack. The one drawback is that if we add any new -services on the undercloud, their db-sync must be wired into the "always run -db-syncs" list. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees: - -* bnemec -* EmilienM - -Other contributors (I'm essentially listing everyone who has been involved in -upgrade work so far): - -* lbezdick -* bandini -* marios -* jistr - -Work Items ----------- - -* Implement an undercloud upgrade CI job to test upgrades. -* Implement the selected approach in the undercloud upgrade command. - - -Dependencies -============ - -None - -Testing -======= - -A CI job is already underway. See `Undercloud Upgrade CI Job`_. This should -provide reasonable coverage on a per-patch basis. We may also want to test -undercloud upgrades in periodic jobs to ensure that it is possible to deploy -an overcloud with an upgraded undercloud. This probably takes too long to be -done in the regular CI jobs, however. - -There has also been discussion of running Tempest API tests on the upgraded -undercloud, but I'm unsure of the status of that work. It would be good to -have in the standalone undercloud upgrade job though. - - -Documentation Impact -==================== - -The docs will need to be updated to reflect the new upgrade method. Hopefully -this will be as simple as "Run openstack undercloud upgrade", but that remains -to be seen. - - -References -========== - -Stop all services before upgrading ----------------------------------- -Code: https://review.openstack.org/331804 - -Docs: https://review.openstack.org/315683 - -Always db-sync: https://review.openstack.org/#/c/346138/ - -Don't update whole system on undercloud upgrade ------------------------------------------------ -https://review.openstack.org/327176 - -Upgrade documentation (under and overcloud) -------------------------------------------- -https://review.openstack.org/308985 - -Undercloud Upgrade CI Job -------------------------- -https://review.openstack.org/346995 diff --git a/specs/newton/validations.rst b/specs/newton/validations.rst deleted file mode 100644 index 4055c15c..00000000 --- a/specs/newton/validations.rst +++ /dev/null @@ -1,159 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================== -TripleO Deployment Validations -============================== - -We need ways in TripleO for performing validations at various stages of the -deployment. - -Problem Description -=================== - -TripleO deployments, and more generally all OpenStack deployments, are complex, -error prone, and highly dependent on the environment. An appropriate set of -tools can help engineers to identify potential problems as early as possible -and fix them before going further with the deployment. - -People have already developed such tools [1], however they appear more like -a random collection of scripts than a well integrated solution within TripleO. -We need to expose the validation checks from a library so they can be consumed -from the GUI or CLI without distinction and integrate flawlessly within TripleO -deployment workflow. - -Proposed Change -=============== - -We propose to extend the TripleO Overcloud Deployment Mistral workflow [2] to -include Actions for validation checks. - -These actions will need at least to: - -* List validations -* Run and stop validations -* Get validation status -* Persist and retrieve validation results -* Permit grouping validations by 'deployment stage' and execute group operations - -Running validations will be implemented in a workflow to ensure the nodes meet -certain expectations. For example, a baremetal validation may require the node -to boot on a ramdisk first. - -Mistral workflow execution can be started with the `mistral execution-create` -command and can be stopped with the `mistral execution-update` command by -setting the workflow status to either SUCCESS or ERROR. - -Every run of the workflow (workflow execution) is stored in Mistral's DB and -can be retrieved for later use. The workflow execution object contains all -information about the workflow and its execution, including all output data and -statuses for all the tasks composing the workflow. - -By introducing a reasonable validation workflows naming, we are able to use -workflow names to identify stage at which the validations should run and -trigger all validations of given stage (e.g. -tripleo.validation.hardware.undercloudRootPartitionDiskSizeCheck) - -Using the naming conventions, the user is also able to register a new -validation workflow and add it to the existing ones. - -Alternatives ------------- - -One alternative is to ship a collection of scripts within TripleO to be run by -engineers at different stages of the deployment. This solution is not optimal -because it requires a lot of manual work and does not integrate with the UI. - -Another alternative is to build our own API, but it would require significantly -more effort to create and maintain. This topic has been discussed at length on -the mailing list. - -Security Impact ---------------- - -The whole point behind the validations framework is to permit running scripts -on the nodes, thus providing access from the control node to the deployed nodes -at different stages of the deployment. Special care needs to be taken to grant -access to the target nodes using secure methods and ensure only trusted scripts -can be executed from the library. - -Other End User Impact ---------------------- - -We expect reduced deployment time thanks to early issue detection. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -Developers will need to keep the TripleO CI updated with changes, and will be -responsible for fixing the CI as needed. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees: - -* shadower -* mandre - -Work Items ----------- - -The work items required are: - -* Develop the tripleo-common Mistral actions that provide all of the - functionality required for the validation workflow. -* Write an initial set of validation checks based on real deployment - experience, starting by porting existing validations [1] to work with the - implemented Mistral actions. - -All patches that implement these changes must pass CI and add additional tests as -needed. - - -Dependencies -============ - -We are dependent upon the tripleo-mistral-deployment-library [2] work. - - -Testing -======= - -The TripleO CI should be updated to test the updated tripleo-common library. - - -Documentation Impact -==================== - -Mistral Actions and Workflows are sort of self-documenting and can be easily -introspected by running 'mistral workflow-list' or 'mistral action-list' on the -command line. The updated library however will have to be well-documented and -meet OpenStack standards. Documentation will be needed in both the -tripleo-common and tripleo-docs repositories. - - -References -========== - -* [1] Set of tools to help detect issues during TripleO deployments: - https://github.com/rthallisey/clapper -* [2] Library support for TripleO Overcloud Deployment Via Mistral: - https://specs.openstack.org/openstack/tripleo-specs/specs/mitaka/tripleo-mistral-deployment-library.html diff --git a/specs/newton/workflow-simplification.rst b/specs/newton/workflow-simplification.rst deleted file mode 100644 index 2aef17ab..00000000 --- a/specs/newton/workflow-simplification.rst +++ /dev/null @@ -1,212 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Workflow Simplification -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/workflow-simplification - -The TripleO workflow is still too complex for many (most?) users to follow -successfully. There are some fairly simple steps we can take to improve -that situation. - -Problem Description -=================== - -The current TripleO workflow grew somewhat haphazardly out of a collection -of bash scripts that originally made up instack-undercloud. These scripts -started out life as primarily a proof of concept exercise to demonstrate -that the idea was viable, and while the steps still work fine when followed -correctly, it seems "when followed correctly" is too difficult today, at least -based on the feedback I'm hearing from users. - -Proposed Change -=============== - -Overview --------- - -There seem to be a number of low-hanging fruit candidates for cleanup. In the -order in which they appear in the docs, these would be: - -#. **Node registration** Why is this two steps? Is there ever a case where we - would want to register a node but not configure it to be able to boot? - If there is, is it a significant enough use case to justify the added - step every time a user registers nodes? - - I propose that we configure boot on newly registered nodes automatically. - Note that this will probably require us to also update the boot - configuration when updating images, but again this is a good workflow - improvement. Users are likely to forget to reconfigure their nodes' boot - images after updating them in Glance. - - .. note:: This would not remove the ``openstack baremetal configure boot`` - command for independently updating the boot configuration of - Ironic nodes. In essence, it would just always call the - configure boot command immediately after registering nodes so - it wouldn't be a mandatory step. - - This also means that the deploy ramdisk would have to be built - and loaded into Glance before registering nodes, but our - documented process already satisfies that requirement, and we - could provide a --no-configure-boot param to import for cases - where someone wanted to register nodes without configuring them. - -#. **Flavor creation** Nowhere in our documentation do we recommend or - provide guidance on customizing the flavors that will be used for - deployment. While it is possible to deploy solely based on flavor - hardware values (ram, disk, cpu), in practice it is often simpler - to just assign profiles to Ironic nodes and have scheduling done solely - on that basis. This is also the method we document at this time. - - I propose that we simply create all of the recommended flavors at - undercloud install time and assign them the appropriate localboot and - profile properties at that time. These flavors would be created with the - minimum supported cpu, ram, and disk values so they would work for any - valid hardware configuration. This would also reduce the possibility of - typos in the flavor creation commands causing avoidable deployment - failures. - - These default flavors can always be customized if a user desires, so there - is no loss of functionality from making this change. - -#. **Node profile assignment** This is not currently part of the standard - workflow, but in practice it is something we need to be doing for most - real-world deployments with heterogeneous hardware for controllers, - computes, cephs, etc. Right now the documentation requires running an - ironic node-update command specifying all of the necessary capabilities - (in the manual case anyway, this section does not apply to the AHC - workflow). - - os-cloud-config does have support for specifying the node profile in - the imported JSON file, but to my knowledge we don't mention that anywhere - in the documentation. This would be the lowest of low-hanging - fruit since it's simply a question of documenting something we already - have. - - We could even give the generic baremetal flavor a profile and have our - default instackenv.json template include that[1], with a note that it can - be overridden to a more specific profile if desired. If users want to - change a profile assignment after registration, the node update command - for ironic will still be available. - - 1. For backwards compatibility, we might want to instead create a new flavor - named something like 'default' and use that, leaving the old baremetal - flavor as an unprofiled thing for users with existing unprofiled nodes. - -Alternatives ------------- - -tripleo.sh -~~~~~~~~~~ -tripleo.sh addresses the problem to some extent for developers, but it is -not a viable option for real world deployments (nor should it be IMHO). -However, it may be valuable to look at tripleo.sh for guidance on a simpler -flow that can be more easily followed, as that is largely the purpose of the -script. A similar flow codified into the client/API would be a good result -of these proposed changes. - -Node Registration -~~~~~~~~~~~~~~~~~ -One option Dmitry has suggested is to make the node registration operation -idempotent, so that it can be re-run any number of times and will simply -update the details of any already registered nodes. He also suggested -moving the bulk import functionality out of os-cloud-config and (hopefully) -into Ironic itself. - -I'm totally in favor of both these options, but I suspect that they will -represent a significantly larger amount of work than the other items in this -spec, so I think I'd like that to be addressed as an independent spec since -this one is already quite large. - -Security Impact ---------------- - -Minimal, if any. This is simply combining existing deployment steps. If we -were to add a new API for node profile assignment that would have some slight -security impact as it would increase our attack surface, but I feel even that -would be negligible. - -Other End User Impact ---------------------- - -Simpler deployments. This is all about the end user. - -Performance Impact ------------------- - -Some individual steps may take longer, but only because they will be -performing actions that were previously in separate steps. In aggregate -the process should take about the same time. - -Other Deployer Impact ---------------------- - -If all of these suggested improvements are implemented, it will make the -standard deployment process somewhat less flexible. However, in the -Proposed Change section I attempted to address any such new limitations, -and I feel they are limited to the edgiest of edge cases that in most cases -can still be implemented through some extra manual steps (which likely would -have been necessary anyway - they are edge cases after all). - -Developer Impact ----------------- - -There will be some changes in the basic workflow, but as noted above the same -basic steps will be getting run. Developers will see some impact from the -proposed changes, but as they will still likely be using tripleo.sh for an -already simplified workflow it should be minimal. - -Implementation -============== - -Assignee(s) ------------ - -bnemec - -Work Items ----------- - -* Configure boot on newly registered nodes automatically. -* Reconfigure boot on nodes after deploy images are updated. -* Remove explicit step for configure boot from the docs, but leave the actual - function itself in the client so it can still be used when needed. -* Create flavors at undercloud install time and move documentation on creating - them manually to the advanced section of the docs. -* Add a 'default' flavor to the undercloud. -* Update the sample instackenv.json to include setting a profile (by default, - the 'default' profile associated with the flavor from the previous step). - - - -Dependencies -============ - -Nothing that I'm aware of. - - -Testing -======= - -As these changes are implemented, we would need to update tripleo.sh to match -the new flow, which will result in the changes being covered in CI. - - -Documentation Impact -==================== - -This should reduce the number of steps in the basic deployment flow in the -documentation. It is intended to simplify the documentation. - - -References -========== - -Proposed change to create flavors at undercloud install time: -https://review.openstack.org/250059 -https://review.openstack.org/251555 diff --git a/specs/ocata/capture-environment-status-and-logs.rst b/specs/ocata/capture-environment-status-and-logs.rst deleted file mode 100644 index 3c7f836b..00000000 --- a/specs/ocata/capture-environment-status-and-logs.rst +++ /dev/null @@ -1,133 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================================== -Tool to Capture Environment Status and Logs -=========================================== - -https://blueprints.launchpad.net/tripleo/+spec/capture-environment-status-and-logs - -To aid in troubleshooting, debugging, and reproducing issues we should create -or integrate with a tool that allows an operator or developer to collect and -generage a single bundle that provides the state and history of a deployed -environment. - -Problem Description -=================== - -Currently there is no single command that can be run via either the -tripleoclient or via the UI that will generage a single artifact to be used -to report issues when failures occur. - -* tripleo-quickstart_, tripleo-ci_ and operators collect the logs for bug - reports in different ways. - -* When a failure occurs, many different peices of information must be collected - to be able to understand where the failure occured. If the logs required are - not asked for, an operator may not know to what to provide for - troubleshooting. - - -Proposed Change -=============== - -Overview --------- - -TripleO should provide a unified method for collecting status and logs from the -undercloud and overcloud nodes. The tripleoclient should support executing a -workflow to run status and log collection processes via sosreport_. The output -of the sosreport_ should be collected and bundled together in a single location. - -Alternatives ------------- - -Currently, various shell scripts and ansible tasks are used by the CI processes -to perform log collection. These scripts are not maintained in combination with -the core TripleO and may require additional artifacts that are not installed by -default with a TripleO environment. - -tripleo-quickstart_ uses ansible-role-tripleo-collect-logs_ to collect logs. - -tripleo-ci_ uses bash scripts to collect the logs. - -Fuel uses timmy_. - -Security Impact ---------------- - -The logs and status information may be considered sensitive information. The -process to trigger status and logs should require authentication. Additionally -we should provide a basic password protection mechanism for the bundle of logs -that is created by this process. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -None. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - alex-schultz - - -Work Items ----------- - -* Ensure OpenStack `sosreport plugins`_ are current. -* Write a TripleO sosreport plugin. -* Write a `Mistral workflow`_ to execute sosreport and collect artifacts. -* Write python-tripleoclient_ integration to execute Mistral workflows. -* Update documentation and CI scripts to leverage new collection method. - - -Dependencies -============ - -None. - -Testing -======= - -As part of CI testing, the new tool should be used to collect environment logs. - -Documentation Impact -==================== - -Documentation should be updated to reflect the standard ways to collect the logs -using the tripleo client. - -References -========== - -.. _ansible-role-tripleo-collect-logs: https://github.com/redhat-openstack/ansible-role-tripleo-collect-logs -.. _Mistral workflow: http://docs.openstack.org/developer/mistral/terminology/workflows.html -.. _python-tripleoclient: https://github.com/openstack/python-tripleoclient -.. _tripleo-ci: https://github.com/openstack-infra/tripleo-ci -.. _tripleo-quickstart: https://github.com/openstack/tripleo-quickstart -.. _sosreport: https://github.com/sosreport/sos -.. _sosreport plugins: https://github.com/sosreport/sos/tree/master/sos/plugins -.. _timmy: https://github.com/openstack/timmy diff --git a/specs/ocata/composable-ha-architecture.rst b/specs/ocata/composable-ha-architecture.rst deleted file mode 100644 index 8efb59f6..00000000 --- a/specs/ocata/composable-ha-architecture.rst +++ /dev/null @@ -1,201 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================== -Composable HA architecture -========================== - -https://blueprints.launchpad.net/tripleo/+spec/composable-ha - -Since Newton, we have the following services managed by pacemaker: - -* Cloned and master/slave resources: - galera, redis, haproxy, rabbitmq - -* Active/Passive resources: - VIPs, cinder-volume, cinder-backup, manila-share - -It is currently not possible to compose the above service in the same -way like we do today via composable roles for the non-pacemaker services -This spec aims to address this limitation and let the operator be more flexible -in the composition of the control plane. - -Problem Description -=================== - -Currently tripleo has implemented no logic whatsoever to assign specific pacemaker -managed services to roles/nodes. - -* Since we do not have a lot in terms of hard performance data, we typically support - three controller nodes. This is perceived as a scalability limiting factor and there is - a general desire to be able to assign specific nodes to specific pacemaker-managed - services (e.g. three nodes only for galera, five nodes only for rabbitmq) - -* Right now if the operator deploys on N controllers he will get N cloned instances - of the non-A/P pacemaker services on the same N nodes. We want to be able to - be much more flexible. E.g. deploy galera on the first 3 nodes, rabbitmq on the - remaining 5 nodes, etc. - -* It is also desirable for the operator to be able to choose on which nodes the A/P - resources will run. - -* We also currently have a scalability limit of 16 nodes for the pacemaker cluster. - -Proposed Change -=============== - -Overview --------- - -The proposal here is to keep the existing cluster in its current form, but to extend -it in two ways: -A) Allow the operator to include a specific service in a custom node and have pacemaker -run that resource only on that node. E.g. the operator can define the following custom nodes: - -* Node A - pacemaker - galera - -* Node B - pacemaker - rabbitmq - -* Node C - pacemaker - VIPs, cinder-volume, cinder-backup, manila-share, redis, haproxy - -With the above definition the operator can instantiate any number of A, B or C nodes -and scale up to a total of 16 nodes. Pacemaker will place the resources only on -the appropriate nodes. - -B) Allow the operator to extend the cluster beyond 16 nodes via pacemaker remote. -For example an operator could define the following: - -* Node A - pacemaker - galera - rabbitmq - -* Node B - pacemaker-remote - redis - -* Node C - pacemaker-remote - VIPs, cinder-volume, cinder-backup, manila-share, redis, haproxy - -This second scenario would allow an operator to extend beyond the 16 nodes limit. -The only difference to scenario 1) is the fact that the quorum of the cluster is -obtained only by the nodes from Node A. - -The way this would work is that the placement on nodes would be controllerd by location -rules that would work based on node properties matching. - -Alternatives ------------- - -A bunch of alternative designs was discussed and evaluated: -A) A cluster per service: - -One possible architecture would be to create a separate pacemaker cluster for -each HA service. This has been ruled out mainly for the following reasons: - -* It cannot be done outside of containers -* It would create a lot of network traffic - -* It would increase the management/monitoring of the pacemaker resources and clusters - exponentially - -* Each service would still be limited to 16 nodes -* A new container fencing agent would have to be written - -B) A single cluster where only the clone-max property is set for the non A/P services - -This would be still a single cluster, but unlike today where the cloned and -master/slave resources run on every controller we would introduce variables to -control the maximum number of nodes a resource could run on. E.g. -GaleraResourceCount would set clone-max to a value different than the number of -controllers. Example: 10 controllers, galera has clone-max set to 3, rabbit to -5 and redis to 3. -While this would be rather simple to implement and would change very little in the -current semantics, this design was ruled out: - -* We'd still have the 16 nodes limit -* It would not provide fine grained control over which services live on which nodes - -Security Impact ---------------- - -No changes regarding security aspects compared to the existing status quo. - -Other End User Impact ---------------------- - -No particular impact except added flexibility in placing pacemaker-managed resources. - -Performance Impact ------------------- - -The performance impact here is that with the added scalability it will be possible for -an operator to dedicate specific nodes for certain pacemaker-managed services. -There are no changes in terms of code, only a more flexible and scalable way to deploy -services on the control plane. - -Other Deployer Impact ---------------------- - -This proposal aims to use the same method that the custom roles introduced in Newton -use to tailor the services running on a node. With the very same method it will be possible -to do that for the HA services managed by pacemaker today. - -Developer Impact ----------------- - -No impact - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - michele - -Other contributors: - cmsj, abeekhof - -Work Items ----------- - -We need to work on the following: - -1. Add location rule constraints support in puppet -2. Make puppet-tripleo set node properties on the nodes where a service profile -3. Create corresponding location rules -4. Add a puppet-tripleo pacemaker-remote profile - -Dependencies -============ - -No additional dependencies are required. - -Testing -======= - -We will need to test the flexible placement of the pacemaker-managed services -within the CI. This can be done within today's CI limitations (i.e. in the three -controller HA job we can make sure that the placement is customized and working) - -Documentation Impact -==================== - -No impact - -References -========== - -Mostly internal discussions within the HA team at Red Hat diff --git a/specs/ocata/containerize-tripleo-overcloud.rst b/specs/ocata/containerize-tripleo-overcloud.rst deleted file mode 100644 index 8590f840..00000000 --- a/specs/ocata/containerize-tripleo-overcloud.rst +++ /dev/null @@ -1,212 +0,0 @@ -=============================== -Deploying TripleO in Containers -=============================== - -https://blueprints.launchpad.net/tripleo/+spec/containerize-tripleo - -Ability to deploy TripleO in Containers. - -Problem Description -=================== - -Linux containers are changing how the industry deploys applications by offering -a lightweight, portable and upgradeable alternative to deployments on a physical -host or virtual machine. - -Since TripleO already manages OpenStack infrastructure by using OpenStack -itself, containers could be a new approach to deploy OpenStack services. It -would change the deployment workflow but could extend upgrade capabilities, -orchestration, and security management. - -Benefits of containerizing the openstack services include: - - * Upgrades can be performed by swapping out containers. - * Since the entire software stack is held within the container, - interdependencies do not affect deployments of services. - * Containers define explicit state and data requirements. Ultimately if we - moved to kubernetes all volumes would be off the host making the host - stateless. - * Easy rollback to working containers if upgrading fails. - * Software shipped in each container has been proven to work for this service. - * Mix and match versions of services on the same host. - * Immutable containers provide a consistent environment upon startup. - -Proposed Change -=============== - -Overview --------- - -The intention of this blueprint is to introduce containers as a method of -delivering an OpenStack installation. We currently have a fully functioning -containerized version of the compute node but we would like to extend this to -all services. In addition it should work with the new composable roles work that -has been recently added. - -The idea is to have an interface within the heat templates that adds information -for each service to be started as a container. This container format should -closely resemble the Kubernetes definition so we can possibly transition to -Kubernetes in the future. This work has already been started here: - - https://review.openstack.org/#/c/330659/ - -There are some technology choices that have been made to keep things usable and -practical. These include: - - * Using Kolla containers. Kolla containers are built using the most popular - operating system choices including CentOS, RHEL, Ubuntu, etc. and are a - good fit for our use case. - * We are using a heat hook to start these containers directly via docker. - This minimizes the software required on the node and maps directly to the - current baremetal implementation. Also maintaining the heat interface - keeps the GUI functional and allows heat to drive upgrades and changes to - containers. - * Changing the format of container deployment to match Kubernetes for - potential future use of this technology. - * Using CentOS in full (not CentOS Atomic) on the nodes to allow users to - have a usable system for debugging. - * Puppet driven configuration that is mounted into the container at startup. - This allows us to retain our puppet configuration system and operate in - parallel with existing baremetal deployment. - -Bootstrapping -------------- - -Once the node is up and running, there is a systemd service script that runs -which starts the docker agents container. This container has all of the -components needed to bootstrap the system. This includes: - - * heat agents including os-collect-config, os-apply-config etc. - * puppet-agent and modules needed for the configuration of the deployment. - * docker client that connects to host docker daemon. - * environment for configuring networking on the host. - -This containers acts as a self-installing container. Once started, this -container will use os-collect-config to connect back to heat. The heat agents -then perform the following tasks: - - * Set up an etc directory and runs puppet configuration scripts. This - generates all the config files needed by the services in the same manner - it would if run without containers. These are copied into a directory - accessible on the host and by all containerized services. - * Begin starting containerized services and other steps as defined in the - heat template. - -Currently all containers are implemented using net=host to allow the services to -listen directly on the host network(s). This maintains functionality in terms of -network isolation and IPv6. - -Security Impact ---------------- - -There shouldn't be major security impacts from this change. The deployment -shouldn't be affected negatively by this change from a security standpoint but -unknown issues might be found. SELinux support is implemented in Docker. - -End User Impact ---------------- - -* Debugging of containerized services will be different as it will require - knowledge about docker (kubernetes in the future) and other tools to access - the information from the containers. -* Possibly provide more options for upgrades and new versions of services. -* It'll allow for service isolation and better dependency management - -Performance Impact ------------------- - -Very little impact: - - * Runtime performance should remain the same. - * We are noticing a slightly longer bootstrapping time with containers but that - should be fixable with a few easy optimizations. - -Deployer Impact ---------------- - -From a deployment perspective very little changes: - * Deployment workflow remains the same. - * There may be more options for versions of different services since we do - not need to worry about interdependency issues with the software stack. - -Upgrade Impact --------------- - -This work aims to allow for resilent, transparent upgrades from baremetal -overcloud deployments to container based ones. - -Initially we need to transition to containers: - * Would require node reboots. - * Automated upgrades should be possible as services are the same, just - containerized. - * Some state may be moved off nodes to centralized storage. Containers very - clearly define required data and state storage requirements. - -Upgrades could be made easier: - * Individual services can be upgraded because of reduced interdependencies. - * It is easier to roll back to a previous version of a service. - * Explicit storage of data and state for containers makes it very clear what - needs to be preserved. Ultimately state information and data will likely - not exist on individual nodes. - -Developer Impact ----------------- - -The developer work flow changes slighly. Instead of interacting with the service -via systemd and log files, you will interact with the service via docker. - -Inside the compute node: - * sudo docker ps -a - * sudo docker logs - * sudo docker exec -it /bin/bash - -Implementation -============== - -Assignee(s) - rhallisey - imain - flaper87 - mandre - -Other contributors: - dprince - emilienm - -Work Items ----------- - -* Heat Docker hook that starts containers (DONE) -* Containerized Compute (DONE) -* TripleO CI job (INCOMPLETE - https://review.openstack.org/#/c/288915/) -* Containerized Controller -* Automatically build containers for OpenStack services -* Containerized Undercloud - -Dependencies -============ - -* Composable roles. -* Heat template interface which allows extensions to support containerized - service definitions. - -Testing -======= -TripleO CI would need a new Jenkins job that will deploy an overcloud in -containers by using the selected solution. - -Documentation Impact -==================== -https://github.com/openstack/tripleo-heat-templates/blob/master/docker/README-containers.md - -* Deploying TripleO in containers -* Debugging TripleO containers - -References -========== -* https://docs.docker.com/misc/ -* https://etherpad.openstack.org/p/tripleo-docker-puppet -* https://docs.docker.com/articles/security/ -* http://docs.openstack.org/developer/kolla/ -* https://review.openstack.org/#/c/209505/ -* https://review.openstack.org/#/c/227295/ diff --git a/specs/ocata/gui-deployment-configuration.rst b/specs/ocata/gui-deployment-configuration.rst deleted file mode 100644 index 3dc79ca1..00000000 --- a/specs/ocata/gui-deployment-configuration.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -GUI Deployment configuration improvements -========================================== - -TripleO UI deployment configuration is based on enabling environments provided by -deployment plan (tripleo-heat-templates) and letting user set parameter values. - -This spec proposes improvements to this approach. - -Blueprint: https://blueprints.launchpad.net/tripleo/+spec/deployment-configuration-improvements - -Problem Description -=================== - -The general goal of TripleO UI is to guide user through the deployment -process and provide relevant information along the way, so user does not -have to search for a context in documentation or by analyzing TripleO templates. - -There is a set of problems identified with a current deployment configuration -solution. Resolving those problems should lead to improved user experience when -making deployment design decisions. - -The important information about the usage of environment and relevant parameters -is usually included as a comment in environment file itself. This is not consumable by GUI. -We currently use capabilities-map.yaml to define environment meta data to work -around this. - -* As the number of environments is growing it is hard to keep capabilities-map.yaml - up to date. When certain environment is added, capabilities-map.yaml is usually - not updated by the same developer, which leads to inaccuracy in environment - description when added later. - -* The environments depend on each other and potentially collide when used together - -* There are no means to list and let user set parameters relevant to certain - environment. These are currently listed as comments in environments - not - consumable by GUI (example: [1]) - -* There are not enough means to organize parameters coming as a result of - heat validate - -* Not all parameters defined in tripleo-heat-templates have correct type set - and don't include all relevant information that Hot Spec provides. - (constraints...) - -* Same parameters are defined in multiple templates in tripleo-heat-templates - but their definition differs - -* List of parameters which are supposed to get auto-generated when value is not - provided by user are hard-coded in deployment workflow - -Proposed Change -=============== - -Overview --------- - -* Propose environment metadata to track additional information about environment - directly as part of the file in Heat (partially in progress [2]). Similar concept is - already present in heat resources [3]. - In the meantime update tripleo-common environment listing feature to read - environments and include environment metadata. - - Each TripleO environment file should define: - - .. code:: - - metadata: - label: - description: - - resource_registry: - ... - - parameter_defaults: - ... - - -* With the environment metadata in place, capabilities-map.yaml purpose would - simplify to defining grouping and dependencies among environments. - -* Implement environment parameter listing in TripleO UI - -* To organize parameters we should use ParameterGroups. - (related discussion: [4]) - -* Make sure that same parameters are defined the same way across tripleo-heat-templates - There may be exceptions but in those cases it must be sure that two templates which - define same parameter differently won't be used at the same time. - -* Update parameter definitions in TripleO templates, so the type actually matches - expected parameter value (e.g. 'string' vs 'boolean') This will result in correct - input type being used in GUI - -* Define a custom constraint for parameters which are supposed to be auto-generated. - -Alternatives ------------- - -Potential alternatives to listing environment related parameters are: - -* Use Parameter Groups to match template parameters to an environment. This - solution ties the template with an environment and clutters the template. - - -* As the introduction of environment metadata depends on having this feature accepted - and implemented in Heat, alternative solution is to keep title and description in - capabilities map as we do now - -Security Impact ---------------- - -No significant security impact - -Other End User Impact ---------------------- - -Resolving mentioned problems greatly improves the TripleO UI workflow and -makes deployment configuration much more streamlined. - -Performance Impact ------------------- - -Described approach allows to introduce caching of Heat validation which is -currently the most expensive operation. Cache gets invalid only in case -when a deployment plan is updated or switched. - -Other Deployer Impact ---------------------- - -Same as End User Impact - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - jtomasek - -Other contributors: - rbrady - -Work Items ----------- - -* tripleo-heat-templates: update environments to include metadata (label, - description), update parameter_defaults to include all parameters relevant - to the environment - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/update-environment-files-with-related-parameters - -* tripleo-heat-templates: update capabilities-map.yaml to map environment - grouping and dependencies - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/update-capabilities-map-to-map-environment-dependencies - -* tripleo-heat-templates: create parameter groups for deprecated and internal - parameters - -* tripleo-heat-templates: make sure that same parameters have the same definition - - bug: https://bugs.launchpad.net/tripleo/+bug/1640243 - -* tripleo-heat-templates: make sure type is properly set for all parameters - - bug: https://bugs.launchpad.net/tripleo/+bug/1640248 - -* tripleo-heat-templates: create custom constraint for autogenerated parameters - - bug: https://bugs.launchpad.net/tripleo/+bug/1636987 - -* tripleo-common: update environments listing to combine capabilities map with - environment metadata - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/update-capabilities-map-to-map-environment-dependencies - -* tripleo-ui: Environment parameters listing - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/get-environment-parameters - -* tripleo-common: autogenerate values for parameters with custom constraint - - bug: https://bugs.launchpad.net/tripleo/+bug/1636987 - -* tripleo-ui: update environment configuration to reflect API changes, provide means to display and configure environment parameters - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/tripleo-ui-deployment-configuration - -* tripleo-ui: add client-side parameter validations based on parameter type - and constraints - - bugs: https://bugs.launchpad.net/tripleo/+bug/1638523, https://bugs.launchpad.net/tripleo/+bug/1640463 - -* tripleo-ui: don't show parameters included in deprecated and internal groups - -Dependencies -============ - -* Heat Environment metadata discussion [2] - -* Heat Parameter Groups discussion [3] - -Testing -======= - -The changes should be covered by unit tests in tripleo-common and GUI - -Documentation Impact -==================== - -Part of this effort should be proper documentation of how TripleO environments -as well as capabilities-map.yaml should be defined - -References -========== - -[1] https://github.com/openstack/tripleo-heat-templates/blob/b6a4bdc3e4db97785b930065260c713f6e70a4da/environments/storage-environment.yaml - -[2] http://lists.openstack.org/pipermail/openstack-dev/2016-June/097178.html - -[3] http://docs.openstack.org/developer/heat/template_guide/hot_spec.html#resources-section. - -[4] http://lists.openstack.org/pipermail/openstack-dev/2016-August/102297.html diff --git a/specs/ocata/gui-plan-import-export.rst b/specs/ocata/gui-plan-import-export.rst deleted file mode 100644 index c1f14807..00000000 --- a/specs/ocata/gui-plan-import-export.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -GUI: Import/Export Deployment Plan -================================== - -Add two features to TripleO UI: - -* Import a deployment plan with a Mistral environment -* Export a deployment plan with a Mistral environment - -Blueprint: https://blueprints.launchpad.net/tripleo/+spec/gui-plan-import-export - - -Problem Description -=================== - -Right now, the UI only supports simple plan creation. The user needs to upload -the plan files, make the environment selection and set the parameters. We want -to add a plan import feature which would allow the user to import the plan -together with a complete Mistral environment. This way the selection of the -environment and parameters would be stored and automatically imported, without -any need for manual configuration. - -Conversely, we want to allow the user to export a plan together with a Mistral -environment, using the UI. - - -Proposed Change -=============== - -Overview --------- - -In order to identify the Mistral environment when importing a plan, I propose -we use a JSON formatted file and name it 'plan-environment.json'. This file -should be uploaded to the Swift container together with the rest of the -deployment plan files. The convention of calling the file with a fixed name is -enough for it to be detected. Once this file is detected by the tripleo-common -workflow handling the plan import, the workflow then creates (or updates) the -Mistral environment using the file's contents. In order to avoid possible future -unintentional overwriting of environment, the workflow should delete this file -once it has created (or updated) the Mistral environment with its contents. - -Exporting the plan should consist of downloading all the plan files from the -swift container, adding the plan-environment.json, and packing it all up in -a tarball. - -Alternatives ------------- - -One alternative is what we have now, i.e. making the user perform all the -environment configuration settings and parameter settings manually each time. -This is obviously very tedious and the user experience suffers greatly as a -result. - -The alternative to deleting the plan-environment.json file upon its -processing is to leave in the swift container and keep it in sync with all -the updates that might happen thereafter. This can get very complicated and is -entirely unnecessary, so deleting the file instead is a better choice. - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -The import and export features will only be triggered on demand (user clicks -on button, or similar), so they will have no performance impact on the rest -of the application. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -None - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - akrivoka - -Other contributors: - jtomasek - d0ugal - -Work Items ----------- - -* tripleo-common: Enhance plan creation/update to consume plan-environment.json - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/enhance-plan-creation-with-plan-environment-json - -* tripleo-common: Add plan export workflow - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/plan-export-workflow - -* python-tripleoclient: Add plan export command - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/plan-export-command - -* tripleo-ui: Integrate plan export into UI - - bluerpint: https://blueprints.launchpad.net/tripleo/+spec/plan-export-gui - -Note: We don't need any additional UI (neither GUI nor CLI) for plan import - the -existing GUI elements and CLI command for plan creation can be used for import -as well. - - -Dependencies -============ - -None - - -Testing -======= - -The changes should be covered by unit tests in tripleo-ui, tripleo-common and -python-tripleoclient. - - -Documentation Impact -==================== - -User documentation should be enhanced by adding instructions on how these two -features are to be used. - - -References -========== - -None \ No newline at end of file diff --git a/specs/ocata/om-dual-backends.rst b/specs/ocata/om-dual-backends.rst deleted file mode 100644 index c8d110a6..00000000 --- a/specs/ocata/om-dual-backends.rst +++ /dev/null @@ -1,190 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================================ -Enable deployment of alternative backends for oslo.messaging -============================================================ - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/om-dual-backends - -This spec describes adding two functional capabilities to the messaging -services of an overcloud deployment. The first capability is to enable -the selection and configuration of separate messaging backends for -oslo.messaging RPC and Notification communications. The second -capability is to introduce support for a brokerless messaging backend -for oslo.messaging RPC communications via the AMQP 1.0 Apache -qpid-dispatch-router. - - -Problem Description -=================== - -The oslo.messaging library supports the deployment of dual messaging system -backends. This enables alternative backends to be deployed for RPC and -Notification messaging communications. Users have identified the -constraints of using a store and forward (broker based) messaging system for RPC -communications and are seeking direct messaging (brokerless) -approaches to optimize the RPC messaging pattern. In addition to -operational challenges, emerging distributed cloud architectures -define requirements around peer-to-peer relationships and geo-locality -that can be addressed through intelligent messaging transport routing -capabilities such as is provided by the AMQP 1.0 qpid-dispatch-router. - - -Proposed Change -=============== - -Overview --------- - -Provide the capability to select and configure alternative -transport_url's for oslo.messaging RPCs and Notifications across -overcloud OpenStack services. - -Retain the current default behavior to deploy the rabbitMQ server as -the messaging backend for both RPC and Notification communications. - -Introduce an alternative deployment of the qpid-dispatch-router as the -messaging backend for RPC communications. - -Utilize the oslo.messaging AMQP 1.0 driver for delivering RPC services -via the dispatch-router messaging backend. - -Alternatives ------------- - -The configuration of dual backends for oslo.messaging could be -performed post overcloud deployment. - -Security Impact ---------------- - -The end result of using the AMQP 1.0 dispatch-router as an alternative -messaging backend for oslo.messaging RPC communications should be the -same from a security standpoint. The driver/router solution provides -SSL and SASL support in parity to the current rabbitMQ server deployment. - -Other End User Impact ---------------------- - -The configuration of the dual backends for RPC and Notification -messaging communications should be transparent to the operation of the OpenStack -services. - -Performance Impact ------------------- - -Using a dispatch-router mesh topology rather than broker clustering -for messaging communications will have a positive impact on -performance and scalability by: - -* Directly expanding connection capacity - -* Providing parallel communication flows across the mesh - -* Increasing aggregate message transfer capacity - -* Improving resource utilization of messaging infrastructure - -Other Deployer Impact ---------------------- - -The deployment of the dispatch-router, however, will be new to -OpenStack operators. Operators will need to learn the -architectural differences as compared to a broker cluster -deployment. This will include capacity planning, monitoring, -troubleshooting and maintenance best practices. - -Developer Impact ----------------- - -Support for alternative oslo.messaging backends and deployment of -qpid-dispatch-router in addition to rabbitMQ should be implemented for -tripleo-quickstart. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -* John Eckersberg - -* Andy Smith - - -Work Items ----------- - -* Update overcloud templates for dual backends and dispatch-router service - -* Add dispatch-router packages to overcloud image elements - -* Add services template for dispatch-router - -* Update OpenStack services base templates to select and configure - transport_urls for RPC and Notification - -* Deploy dispatch-router for controller and compute for topology - -* Test failure and recovery scenarios for dispatch-router - -Transport Configuration ------------------------ - -The oslo.messaging configuration options define a default and -additional notification transport_url. If the notification -transport_url is not specified, oslo.messaging will use the default -transport_url for both RPC and Notification messaging communications. - -The transport_url parameter is of the form:: - - transport://user:pass@host1:port[,hostN:porN]/virtual_host - -Where the transport scheme specifies the RPC or Notification backend as -one of rabbit or amqp, etc. Oslo.messaging is deprecating the host, -port and auth configuration options. All drivers will get these -options via the transport_url. - - -Dependencies -============ - -Support for dual backends in and AMQP 1.0 driver integration -with the dispatch-router depends on oslo.messaging V5.10 or later. - - -Testing -======= - -In order to test this in CI, an environment will be needed where dual -messaging system backends (e.g. rabbitMQ server and dispatch-router -server) are deployed. Any existing hardware configuration should be -appropriate for the dual backend deployment. - - -Documentation Impact -==================== - -The deployment documentation will need to be updated to cover the -configuration of dual messaging system backends and the use of the -dispatch-router. TripleO Heat template examples should also help with -deployments using dual backends. - - -References -========== - -* [1] https://blueprints.launchpad.net/oslo.messaging/+spec/amqp-dispatch-router -* [2] http://qpid.apache.org/components/dispatch-router/ -* [3] http://docs.openstack.org/developer/oslo.messaging/AMQP1.0.html -* [4] https://etherpad.openstack.org/p/ocata-oslo-consistent-mq-backends -* [5] https://github.com/openstack/puppet-qdr diff --git a/specs/ocata/ssl-certmonger.rst b/specs/ocata/ssl-certmonger.rst deleted file mode 100644 index f9dd8e43..00000000 --- a/specs/ocata/ssl-certmonger.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================ -PKI management of the overcloud using Certmonger -================================================ - -There is currently support for enabling SSL for the public endpoints of the -OpenStack services. However, certain use cases require the availability of SSL -everywhere. This spec proposes an approach to enable it. - -Problem Description -=================== - -Even though there is support for deploying both the overcloud and the -undercloud with TLS/SSL support for the public endpoints, there are deployments -that demand the usage of encrypted communications through all the interfaces. - -The current approach for deploying SSL in TripleO is to inject the needed -keys/certificates through Heat environment files; this requires the -pre-creation of those. While this approach works for the public-facing -services, as we attempt to secure the communication between different -services, and in different levels of the infrastructure, the amount of keys -and certificates grows. So, getting the deployer to generate all the -certificates and manage them will be quite cumbersome. - -On the other hand, TripleO is not meant to handle the PKI of the cloud. And -being the case that we will at some point need to enable the deployer to be -able to renew, revoke and keep track of the certificates and keys deployed in -the cloud, we are in need of a system with such capabilities. - -Instead of brewing an OpenStack-specific solution ourselves. I propose the -usage of already existing systems that will make this a lot easier. - -Proposed Change -=============== - -Overview --------- - -The proposal is to start using certmonger[1] in the nodes of the overcloud to -interact with a CA for managing the certificates that are being used. With this -tool, we can request the fetching of the needed certificates for interfaces -such as the internal OpenStack endpoints, the database cluster and the message -broker for the cloud. Those certificates will in turn have automatic tracking, -and for cases where there is a certificate to identify the node, it could -even automatically request a renewal of the certificate when needed. - -Certmonger is already available in several distributions (both Red Hat or -Debian based) and has the capability of interacting with several CAs, so if the -operator already has a working one, they could use that. On the other hand, -certmonger has the mechanism for registering new CAs, and executing scripts -(which are customizable) to communicate with those CAs. Those scripts are -language independent. But for means of the open source community, a solution -such as FreeIPA[2] or Dogtag[3] could be used to act as a CA and handle the -certificates and keys for us. Note that it's possible to write a plugin for -certmonger to communicate with Barbican or another CA, if that's what we would -like to go for. - -In the FreeIPA case, this will require a full FreeIPA system running either on -another node in the cluster or in the undercloud in a container[4]. - -For cases where the services are terminated by HAProxy, and the overcloud being -in an HA-deployment, the controller nodes will need to share a certificate that -HAProxy will present when accessed. In this case, the workflow will be as -following: - -#. Register the undercloud as a FreeIPA client. This configures the kerberos - environment and provides access control to the undercloud node. -#. Get keytab (credentials) corresponding to the undercloud in order to access - FreeIPA, and be able to register nodes. -#. Create a HAProxy service -#. Create a certificate/key for that service -#. Store the key in FreeIPA's Vault. -#. Create each of the controllers to be deployed as hosts in FreeIPA (Please - see note about this) -#. On each controller node get the certificate from service entry. -#. Fetch the key from the FreeIPA vault. -#. Set certmonger to keep track of the resulting certificates and - keys. - -.. note:: - - While the process of creating each node beforehand could sound cumbersome, - this can be automated to increase usability. The proposed approach is to - have a nova micro-service that automatically registers the nodes from the - overcloud when they are created [5]. This hook will not only register the - node in the system, but will also inject an OTP which the node can use to - fetch the required credentials and get its corresponding certificate and - key. The aforementioned OTP is only used for enrollment. Once enrollment - has already taken place, certmonger can already be used to fetch - certificates from FreeIPA. - - However, even if this micro-service is not in place, we could pass the OTP - via the TripleO Heat Templates (in the overcloud deployment). So it is - possible to have the controllers fetching their keytab and subsequently - request their certificates even if we don't have auto-enrollment in place. - -.. note:: - - Barbican could also be used instead of FreeIPA's Vault. With the upside of - it being an already accepted OpenStack service. However, Barbican will also - need to have a backend, which might be Dogtag in our case, since having an - HSM for the CI will probably not be an option. - -Now, for services such as the message broker, where an individual certificate -is required per-host, the process is much simpler, since the nodes will have -already been registered in FreeIPA and will be able to fetch their credentials. -Now we can just let certmonger do the work and request, and subsequently track -the appropriate certificates. - -Once the certificates and keys are present in the nodes, then we can let the -subsequent steps of the overcloud deployment process take place; So the -services will be configured to use those certificates and enable TLS where the -deployer specifies it. - -Alternatives ------------- - -The alternative is to take the same approach as we did for the public -endpoints. Which is to simply inject the certificates and keys to the nodes. -That would have the downside that the certificates and keys will be pasted in -heat environment files. This will be problematic for services such as RabbitMQ, -where we are giving a list of nodes for communication, because to enable SSL in -it, we need to have a certificate per-node serving as a message broker. -In this case two approaches could be taken: - -* We will need to copy and paste each certificate and key that is needed for - each of the nodes. With the downside being how much text needs to be copied, - and the difficulty of keeping track of the certificates. On the other hand, - each time a node is removed or added, we need to make sure we remember to add - a certificate and a key for it in the environment file. So this becomes a - scaling and a usability issue too. - -* We could also give in an intermediate certificate, and let TripleO create the - certificates and keys per-service. However, even if this fixes the usability - issue, we still cannot keep track of the specific certificates and keys that - are being deployed in the cloud. - -Security Impact ---------------- - -This approach enables better security for the overcloud, as it not only eases -us to enable TLS everywhere (if desired) but it also helps us keep track and -manage our PKI. On the other hand, it enables other means of security, such as -mutual authentication. In the case of FreeIPA, we could let the nodes have -client certificates, and so they would be able to authenticate to the services -(as is possible with tools such as HAProxy or Galera/MySQL). However, this can -come as subsequent work of this. - -Other End User Impact ---------------------- - -For doing this, the user will need to pass extra parameters to the overcloud -deployment, such as the CA information. In the case of FreeIPA, we will need to -pass the host and port, the kerberos realm, the kerberos principal of the -undercloud and the location of the keytab (the credentials) for the undercloud. - -However, this will be reflected in the documentation. - -Performance Impact ------------------- - -Having SSL everywhere will degrade the performance of the overcloud overall, as -there will be some overhead in each call. However, this is a known issue and -this is why SSL everywhere is optional. It should only be enabled for deployers -that really need it. - -The usage of an external CA or FreeIPA shouldn't impact the overcloud -performance, as the operations that it will be doing are not recurrent -operations (issuing, revoking or renewing certificates). - -Other Deployer Impact ---------------------- - -If a deployer wants to enable SSL everywhere, they will need to have a working -CA for this to work. Or if they don't they could install FreeIPA in a node. - -Developer Impact ----------------- - -Discuss things that will affect other developers working on OpenStack. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - jaosorior - - -Work Items ----------- - -* Enable certmonger and the FreeIPA client tools in the overcloud image - elements. - -* Include the host auto-join hook for nova in the undercloud installation. - -* Create nested templates that will be used in the existing places for the - NodeTLSData and NodeTLSCAData. These templates will do the certmonger - certificate fetching and tracking. - -* Configure the OpenStack internal endpoints to use TLS and make this optional - through a heat environment. - -* Configure the Galera/MySQL cluster to use TLS and make this optional through - a heat environment. - -* Configure RabbitMQ to use TLS (which means having a certificate for each - node) and make this optional through a heat environment - -* Create a CI gate for SSL everywhere. This will include a FreeIPA installation - and it will enable SSL for all the services, ending in the running of a - pingtest. For the FreeIPA preparations, a script running before the overcloud - deployment will add the undercloud as a client, configure the appropriate - permissions for it and deploy a keytab so that it can use the nova hook. - Subsequently it will create a service for the OpenStack internal endpoints, - and the database, which it will use to create the needed certificates and - keys. - - -Dependencies -============ - -* This requires the following bug to be fixed in Nova: - https://bugs.launchpad.net/nova/+bug/1518321 - -* Also requires the packaging of the nova hook. - - -Testing -======= - -We will need to create a new gate in CI to test this. - - -Documentation Impact -==================== - -The documentation on how to use an external CA and how to install and use -FreeIPA with TripleO needs to be created. - - -References -========== - -[1] https://fedorahosted.org/certmonger/ -[2] http://www.freeipa.org/page/Main_Page -[3] http://pki.fedoraproject.org/wiki/PKI_Main_Page -[4] http://www.freeipa.org/page/Docker -[5] https://github.com/richm/rdo-vm-factory/blob/use-centos/rdo-ipa-nova/novahooks.py diff --git a/specs/ocata/step-by-step-validation.rst b/specs/ocata/step-by-step-validation.rst deleted file mode 100644 index 7c3ac745..00000000 --- a/specs/ocata/step-by-step-validation.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================= -Step by step validation -======================= - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/step-by-step-validation - -Validate each step during the installation to be able to stop fast in -case of errors and provide feedback on which components are in error. - -Problem Description -=================== - -During deployment, problems are often spotted at the end of the -configuration and can accumulate on top of each other making it -difficult to find the root cause. - -Deployers and developers will benefit by having the installation -process fails fast and spotting the lowest level possible components -causing the problem. - -Proposed Change -=============== - -Overview --------- - -Leverage the steps already defined in Tripleo to run a validation tool -at the end of each step. - -During each step, collect assertions about what components are -configured on each host then at the end of the step, run a validation -tool consumming the assertions to report all the failed assertions. - -Alternatives ------------- - -We could use Puppet to add assertions in the code to validate what has -been configured. The drawback of this approach is the difficulty to -have a good reporting on what are the issues compared to a specialized -tool that can be run outside of the installer if needed. - -The other drawback to this approach is that it can't be reused in -future if/when we support non-puppet configuration and it probably -also can't be used when we use puppet to generate an external config -file for containers. - -Security Impact ---------------- - -* some validations may require access to sensitive data like passwords - or keys to access the components. - -Other End User Impact ---------------------- - -This feature will be activated automatically in the installer. - -If needed, the deployer or developper will be able to launch the tool -by hand to validate a set of assertions. - -Performance Impact ------------------- - -We expect the validations to take less than one minute by step. - -Other Deployer Impact ---------------------- - -The objective is to have a fastest iterative process by failing fast. - -Developer Impact ----------------- - -Each configuration module will need to generate assertions to be -consummed by the validation tool. - - -Implementation -============== - -Note that this approach (multiple step application of ansible in -localhost mode via heat) for upgrades and it will work well for -validations too. - -https://review.openstack.org/#/c/393448/ - -Assignee(s) ------------ - -Primary assignee: - -Other contributors to help validate services: - - -Work Items ----------- - -* generate assertions about the configured components on the server - being configured in yaml files. - -* implement the validation tool leveraging the work that has already - been done in ``tripleo-validations`` that will do the following - steps: - - 1. collect yaml files from the servers on the undercloud. - - 2. run validations in parallel on each server from the undercloud. - - 3. report all issues and exit with 0 if no error or 1 if there is at - least one error. - -Dependencies -============ - -To be added. - -Testing -======= - -The change will be used automatically in the CI so it will always be tested. - -Documentation Impact -==================== - -We'll need to document integration with whatever validation tool is -used, e.g so that those integrating new services (or in future -out-of-tree additional services) can know how to integrate with the -validation. - -References -========== - -A similar approach was used in SpinalStack using serverspec. See -https://github.com/redhat-cip/config-tools/blob/master/verify-servers.sh - -A collection of Ansible playbooks to detect and report potential -issues during TripleO deployments: -https://github.com/openstack/tripleo-validations - -Prototype of composable upgrades with Heat+Ansible: -https://review.openstack.org/#/c/393448/ diff --git a/specs/ocata/third-party-gating-with-tripleo-quickstart.rst b/specs/ocata/third-party-gating-with-tripleo-quickstart.rst deleted file mode 100644 index 35f8973d..00000000 --- a/specs/ocata/third-party-gating-with-tripleo-quickstart.rst +++ /dev/null @@ -1,258 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================================== -Make tripleo third party ci toolset tripleo-quickstart -====================================================== - -https://blueprints.launchpad.net/tripleo/+spec/use-tripleo-quickstart-and-tripleo-quickstart-extras-for-the-tripleo-ci-toolset - -Devstack being the reference CI deployment of OpenStack does a good job at -running both in CI and locally on development hardware. -TripleO-Quickstart (TQ)`[3]`_ and TripleO-QuickStart-Extras (TQE) can provide -an equivalent experience like devstack both in CI and on local development -hardware. TQE does a nice job of breaking down the steps required to install an -undercloud and deploy and overcloud step by step by creating bash scripts on the -developers system and then executing them in the correct order. - - -Problem Description -=================== - -Currently there is a population of OpenStack developers that are unfamiliar -with TripleO and our TripleO CI tools. It's critical that this population have -a tool which can provide a similar user experience that devstack currently -provides OpenStack developers. - -Recreating a deployment failure from TripleO-CI can be difficult for developers -outside of TripleO. Developers may need more than just a script that executes -a deployment. Ideally developers have a tool that provides a high level -overview, a step-by-step install process with documentation, and a way to inject -their local patches or patches from Gerrit into the build. - -Additionally there may be groups outside of TripleO that want to integrate -additional code or steps to a deployment. In this case the composablity of the -CI code is critical to allow others to plugin, extend and create their own steps -for a deployment. - - -Proposed Change -=============== - -Overview --------- - -Replace the tools found in openstack-infra/tripleo-ci that drive the deployment -of tripleo with TQ and TQE. - -Alternatives ------------- - -One alternative is to break down TripleO-CI into composable shell scripts, and -improve the user experience `[4]`_. - -Security Impact ---------------- - -No known additional security vulnerabilities at this time. - -Other End User Impact ---------------------- - -We expect that newcomers to TripleO will have an enhanced experience -reproducing results from CI. - -Performance Impact ------------------- - -Using an undercloud image with preinstalled rpms should provide a faster -deployment end-to-end. - -Other Deployer Impact ---------------------- - -None at this time. - -Developer Impact ----------------- - -This is the whole point really and discussed elsewhere in the spec. However, -this should provide a quality user experience for developers wishing to setup -TripleO. - -TQE provides a step-by-step, well documented deployment of TripleO. -Furthermore, and is easy to launch and configure:: - - bash quickstart.sh -p quickstart-extras.yml -r quickstart-extras-requirements.txt --tags all - -Everything is executed via a bash shell script, the shell scripts are customized -via jinja2 templates. Users can see the command prior to executing it when -running it locally. Documentation of what commands were executed are -automatically generated per execution. - -Node registration and introspection example: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* Bash script:: - - https://ci.centos.org/artifacts/rdo/jenkins-tripleo-quickstart-promote-newton-delorean-minimal-31/undercloud/home/stack/overcloud-prep-images.sh - - -* Execution log:: - - https://ci.centos.org/artifacts/rdo/jenkins-tripleo-quickstart-promote-newton-delorean-minimal-31/undercloud/home/stack/overcloud_prep_images.log.gz - -* Generated rst documentation:: - - https://ci.centos.org/artifacts/rdo/jenkins-tripleo-quickstart-promote-newton-delorean-minimal-31/docs/build/overcloud-prep-images.html - -Overcloud Deployment example: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -* Bash script:: - - https://ci.centos.org/artifacts/rdo/jenkins-tripleo-quickstart-promote-newton-delorean-minimal_pacemaker-31/undercloud/home/stack/overcloud-deploy.sh.gz - -* Execution log:: - - https://ci.centos.org/artifacts/rdo/jenkins-tripleo-quickstart-promote-newton-delorean-minimal_pacemaker-31/undercloud/home/stack/overcloud_deploy.log.gz - -* Generated rST documentation:: - - https://ci.centos.org/artifacts/rdo/jenkins-tripleo-quickstart-promote-master-current-tripleo-delorean-minimal-37/docs/build/overcloud-deploy.html - -Step by Step Deployment: -^^^^^^^^^^^^^^^^^^^^^^^^ - -There are times when a developer will want to walk through a deployment step-by-step, -run commands by hand, and try to figure out what exactly is involved with -a deployment. A developer may also want to tweak the settings or add a patch. -To do the above the deployment can not just run through end to end. - -TQE can setup the undercloud and overcloud nodes, and then just add add already -configured scripts to install the undercloud and deploy the overcloud -successfully. Essentially allowing the developer to ssh to the undercloud and -drive the installation from there with prebuilt scripts. - -* Example:: - - ./quickstart.sh --no-clone --bootstrap --requirements quickstart-extras-requirements.txt --playbook quickstart-extras.yml --skip-tags undercloud-install,undercloud-post-install,overcloud-deploy,overcloud-validate --release newton - -Composability: -^^^^^^^^^^^^^^ - -TQE is not a single tool, it's a collection of composable Ansible roles. These -Ansible roles can coexist in a single Git repository or be distributed to many -Git repositories. See "Additional References." - -Why have two projects? Why risk adding complexity? -One of the goals of the TQ and TQE is to not assume we are -writing code that works for everyone, on every deployment type, and in any -kind of infrastructure. To ensure that TQE developers can not block outside -contributions (roles, additions, and customization to either TQ or TQE), -it was thought best to uncouple as well and make it as composable -as possible. Ansible playbooks after all, are best used as a method to just -call roles so that anyone can create playbooks with a variety of roles in the -way that best suits their purpose. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - - weshayutin - -Other contributors: - - trown - - sshnaidm - - gcerami - - adarazs - - larks - -Work Items ----------- - -- Enable third party testing `[1]`_ -- Enable TQE to run against the RH2 OVB OpenStack cloud `[2]`_ -- Move the TQE roles into one or many OpenStack Git Repositories, see the roles listed - in the "Additional References" - - -Dependencies -============ - -- A decision needs to be made regarding `[1]`_ -- The work to enable third party testing in rdoproject needs to be completed - -Testing -======= - -There is a work in progress testing TQE against the RH2 OVB cloud atm `[2]`_. TQE -has been vetted for quite some time with OVB on other clouds. - - -Documentation Impact -==================== - -What is the impact on the docs? Don't repeat details discussed above, but -please reference them here. - - -References -========== -* `[1]`_ -- http://lists.openstack.org/pipermail/openstack-dev/2016-October/105248.html -* `[2]`_ -- https://review.openstack.org/#/c/381094/ -* `[3]`_ -- https://etherpad.openstack.org/p/tripleo-third-party-ci-quickstart -* `[4]`_ -- https://blueprints.launchpad.net/tripleo/+spec/make-tripleo-ci-externally-consumable - -.. _[1]: http://lists.openstack.org/pipermail/openstack-dev/2016-October/105248.html -.. _[2]: https://review.openstack.org/#/c/381094/ -.. _[3]: https://etherpad.openstack.org/p/tripleo-third-party-ci-quickstart -.. _[4]: https://blueprints.launchpad.net/tripleo/+spec/make-tripleo-ci-externally-consumable - -Additional References -===================== - -TQE Ansible role library ------------------------- - -* Undercloud roles: - - * https://github.com/redhat-openstack/ansible-role-tripleo-baremetal-virt-undercloud - * https://github.com/redhat-openstack/ansible-role-tripleo-pre-deployment-validate ( under development ) - -* Overcloud roles: - - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-config - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-flavors - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-images - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-network - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud - * https://github.com/redhat-openstack/ansible-role-tripleo-ssl ( under development ) - -* Utility roles: - - * https://github.com/redhat-openstack/ansible-role-tripleo-cleanup-nfo - * https://github.com/redhat-openstack/ansible-role-tripleo-collect-logs - * https://github.com/redhat-openstack/ansible-role-tripleo-gate - * https://github.com/redhat-openstack/ansible-role-tripleo-provision-heat - * https://github.com/redhat-openstack/ansible-role-tripleo-image-build - -* Post Deployment roles: - - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-upgrade - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-scale-nodes - * https://github.com/redhat-openstack/ansible-role-tripleo-tempest - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-validate - * https://github.com/redhat-openstack/ansible-role-tripleo-validate-ipmi - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-validate-ha - -* Baremetal roles: - - * https://github.com/redhat-openstack/ansible-role-tripleo-baremetal-prep-virthost - * https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-baremetal \ No newline at end of file diff --git a/specs/ocata/tripleo-composable-upgrades.rst b/specs/ocata/tripleo-composable-upgrades.rst deleted file mode 100644 index 0be057d0..00000000 --- a/specs/ocata/tripleo-composable-upgrades.rst +++ /dev/null @@ -1,197 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================== -Composable Service Upgrades -=========================== - -https://blueprints.launchpad.net/tripleo/+spec/overcloud-upgrades-per-service - -In the Newton release TripleO delivered a new capability to deploy arbitrary -custom roles_ (groups of nodes) with a lot of flexibility of which services -are placed on which roles (using roles_data.yaml_). This means we can no -longer make the same assumptions about a specific service running on a -particular role (e.g Controller). - -The current upgrades workflow_ is organised around the node role determining -the order in which that given node and services deployed therein are upgraded. -The workflow dictates "swifts", before "controllers", before "cinders", before -"computes", before "cephs". The reasons for this ordering are beyond the scope -here and ultimately inconsequential, since the important point to note is -there is a hard coded relationship between a given service and a given node -with respect to upgrading that service (e.g. a script that upgrades all -services on "Compute" nodes). For upgrades from Newton to Ocata we can no -longer make these assumptions about services being tied to a specific role, -so a more composable model is needed. - -Consensus after the initial discussion during the Ocata design summit session_ -was that: - - * Re-engineering the upgrades workflow for Newton to Ocata is necessary - because 'custom roles' - * We should start by moving the upgrades logic into the composable service - templates in the tripleo-heat-templates (i.e. into each service) - * There is still a need for an over-arching workflow - albeit service - rather than role oriented. - * It is TBD what will drive that workflow. We will use whatever will be - 'easier' for a first iteration, especially given the Ocata development - time contraints. - -Problem Description -=================== - -As explained in the introduction above, the current upgrades workflow_ can no -longer work for composable service deployments. Right now the upgrade scripts -are organised around and indeed targetted at specific nodes: the upgrade -script for swifts_ is different to that for computes_ or for controllers (split -across a number_ of_ steps_) cinders_ or cephs_. These scripts are invoked -as part of a worfklow where each step is either a heat stack update or -invocation of the upgrade-non-controller.sh_ script to execute the node -specific upgrade script (delivered as one of the earlier steps in the workflow) -on non controllers. - -One way to handle this problem is to decompose the upgrades logic -from those monolithic per-node upgrade scripts into per-service upgrades logic. -This should live in the tripleo-heat-templates puppet services_ templates for -each service. For the upgrade of a give service we need to express: - - * any pre-upgrade requirements (run a migration, stop a service, pin RPC) - * any post upgrade (migrations, service starts/reload config) - * any dependencies on other services (upgrade foo only after bar) - -If we organise the upgrade logic in this manner the idea is to gain the -flexibility to combine this dynamically into the new upgrades workflow. -Besides the per-service upgrades logic the worklow will also need to handle -and provide for any deployment wide upgrades related operations such as -unpin of the RPC version once all services are successfully running Ocata, or -upgrading of services that aren't directly managed or configured by the -tripleo deployment (like openvswitch as just one example), or even the delivery -of a new kernel which will require a reboot on the given service node after -all services have been upgraded. - - -Proposed Change -=============== - -The first step is to work out where to add upgrades related configuration to -each service in the tripleo-heat-templates services_ templates. The exact -format will depend on what we end up using to drive the workflow. We could -include them in the *outputs* as 'upgrade_config', like:: - - outputs: - role_data: - description: Role data for the Nova Compute service. - value: - service_name: nova_compute - ... - upgrade_tasks: - - name: RPC pin nova-compute - exec: "crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute" - tags: step1 - - name: stop nova-compute - service: name=openstack-nova-compute state=stopped - tags: step2 - - name: update heat database - command: nova-manage db_sync - tags: step3 - - name: start nova-compute - service: name=openstack-nova-compute state=started - tags: step4 - ... - -The current proposal is for the upgrade snippets to be expressed in Ansible. -The initial focus will be to drive the upgrade via the existing tripleo -tooling, e.g heat applying ansible similar to how heat applies scripts for -the non composable implementation. In future it may also be possible to -expose the per-role ansible playbooks to enable advanced operators to drive -the upgrade workflow directly, perhaps used in conjunction with the dynamic -inventory provided for tripleo validations. - -One other point of note that was brought up in the Ocata design summit -session_ and which should factor into the design here is that operators may -wish to run the upgrade in stages rather than all at once. It could still be -the case that the new workflow can differentiate between 'controlplane' -vs 'non-controlplane' services. The operator could then upgrade controlplane -services as one stand-alone upgrade step and then later start to roll out the -upgrade of non-controlplane services. - -Alternatives ------------- - -One alternative is to have a stand-alone upgrades workflow driven by ansible. -Some early work and prototyping was done as well as a (linked from the -Ocata design summit session_). Ultimately the proposal was abandoned but it is -still possible that we will use ansible for the upgrade logic as described -above. We could also explore exposing the resulting ansible playbooks for -advanced operators to invoke as part of their own tooling. - -Other End User Impact ---------------------- -Significant change in the tripleo upgrades workflow. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: shardy - -Other contributors: marios, emacchi, matbu, chem, lbezdick, - - -Work Items ----------- -Some prototyping by shardy at -"WIP prototyping composable upgrades with Heat+Ansible" at -I39f5426cb9da0b40bec4a7a3a4a353f69319bdf9_ - - * Decompose the upgrades logic into each service template in the tht - * Design a workflow that incorporates migrations, the per-service upgrade - scripts and any deployment wide upgrades operations. - * Decide how this workflow is to be invoked (mistral? puppet? bash?) - * profit! - - -Dependencies -============ - - - -Testing -======= - -Hopefully we can use the soon to be added upgrades job_ to help with the -development and testing of this feature and obviously guard against changes -that break upgrades. Ideally we will expand that to include jobs for each of -the stable branches (upgrade M->N and N->O). The M->N would exercise the -previous upgrades workflow whereas N->O would be exercising the work developed -as part of this spec. - - -Documentation Impact -==================== - - -References -========== - - -.. _roles: https://blueprints.launchpad.net/tripleo/+spec/custom-roles -.. _roles_data.yaml: https://github.com/openstack/tripleo-heat-templates/blob/78500bc2e606bd1f80e05d86bf7da4d1d27f77b1/roles_data.yaml -.. _workflow: http://docs.openstack.org/developer/tripleo-docs/post_deployment/upgrade.html -.. _session: https://etherpad.openstack.org/p/ocata-tripleo-upgrades -.. _swifts: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_object_storage.sh -.. _computes: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_compute.sh -.. _number: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh -.. _of: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh -.. _steps: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh -.. _cinders: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_block_storage.sh -.. _cephs: https://github.com/openstack/tripleo-heat-templates/blob/stable/newton/extraconfig/tasks/major_upgrade_ceph_storage.sh -.. _upgrade-non-controller.sh: https://github.com/openstack/tripleo-common/blob/01b68d0b0cdbd0323b7f006fbda616c12cbf90af/scripts/upgrade-non-controller.sh -.. _services: https://github.com/openstack/tripleo-heat-templates/tree/master/puppet/services -.. _I39f5426cb9da0b40bec4a7a3a4a353f69319bdf9 : https://review.openstack.org/#/c/393448/ -.. _job: https://bugs.launchpad.net/tripleo/+bug/1583125 diff --git a/specs/ocata/tripleo-opstools-performance-monitoring.rst b/specs/ocata/tripleo-opstools-performance-monitoring.rst deleted file mode 100644 index 5f9ba2fe..00000000 --- a/specs/ocata/tripleo-opstools-performance-monitoring.rst +++ /dev/null @@ -1,105 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Enable deployment of performace monitoring -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-opstools-performance-monitoring - -TripleO should have a possibility to automatically setup and install -the performance monitoring agent (collectd) to service the overcloud. - -Problem Description -=================== - -We need to easily enable operators to connect overcloud nodes to performance -monitoring stack. The possible way to do so is to install collectd agent -together with set of plugins, depending on a metrics we want to collect -from overcloud nodes. - -Summary of use cases: - -1. collectd deployed on each overcloud node reporting configured metrics -(via collectd plugins) to external collector. - -Proposed Change -=============== - -Overview --------- - -The collectd service will be deployed as a composable service on -the overcloud stack when it is explicitly stated via environment file. - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -Metric collection and transport to the monitoring node can create I/O which -might have performance impact on monitored nodes. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Lars Kellogg-Stedman (larsks) - -Other contributors: - Martin Magr (mmagr) - -Work Items ----------- - -* puppet-tripleo profile for collectd service -* tripleo-heat-templates composable service for collectd deployment - -Dependencies -============ - -* Puppet module for collectd service: puppet-collectd [1] -* CentOS Opstools SIG repo [2] - -Testing -======= - -We should consider creating CI job for deploying overcloud with monitoring -node to perform functional testing. - - -Documentation Impact -==================== - -New template parameters will have to be documented. - - -References -========== - -[1] https://github.com/voxpupuli/puppet-collectd -[2] https://wiki.centos.org/SpecialInterestGroup/OpsTools diff --git a/specs/ocata/tripleo-repos.rst b/specs/ocata/tripleo-repos.rst deleted file mode 100644 index 8a0a25c9..00000000 --- a/specs/ocata/tripleo-repos.rst +++ /dev/null @@ -1,139 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================== - TripleO Repo Management Tool -============================== - -https://blueprints.launchpad.net/tripleo/tripleo-repos - -Create a tool to handle the repo setup for TripleO - -Problem Description -=================== - -The documented repo setup steps for TripleO are currently: - -* 3 curls -* a sed -* a multi-line bash command -* a yum install -* (optional) another yum install and sed command - -These steps are also implemented in multiple other places, which means every -time a change needs to be made it has to be done in at least three different -places. The stable branches also need slightly different commands which further -complicates the documentation. They also need to appear in multiple places -in the docs (e.g. virt system setup, undercloud install, image build, -undercloud upgrade). - -Proposed Change -=============== - -Overview --------- - -My proposal is to abstract away the repo management steps into a standalone -tool. This would essentially change the repo setup from the process -described above to something like:: - - sudo yum install -y http://tripleo.org/tripleo-repos.rpm - sudo tripleo-repos current - -Historical note: The original proposal was called dlrn-repo because it was -dealing exclusively with dlrn repos. Now that we've started to add more -repos like Ceph that are not from dlrn, that name doesn't really make sense. - -This will mean that when repo setup changes are needed (which happen -periodically), they only need to be made in one place and will apply to both -developer and user environments. - -Alternatives ------------- - -Use tripleo.sh's repo setup. However, tripleo.sh is not intended as a -user-facing tool. It's supposed to be a thin wrapper that essentially -implements the documented deployment commands. - -Security Impact ---------------- - -The tool would need to make changes to the system's repo setup and install -packages. This is the same thing done by the documented commands today. - -Other End User Impact ---------------------- - -This would be a new user-facing CLI. - -Performance Impact ------------------- - -No meaningful change - -Other Deployer Impact ---------------------- - -Deployers would need to switch to this new method of configuring the -TripleO repos in their deployments. - -Developer Impact ----------------- - -There should be little to no developer impact because they are mostly using -other tools to set up their repos, and those tools should be converted to use -the new tool. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bnemec - -Other contributors: - - -Work Items ----------- - -* Update the proposed tool to match the current repo setup -* Import code into gerrit -* Package tool -* Publish the package somewhere easily accessible -* Update docs to use tool -* Convert existing developer tools to use this tool - - -Dependencies -============ - -NA - -Testing -======= - -tripleo.sh would be converted to use this tool so it would be covered by -existing CI. - - -Documentation Impact -==================== - -Documentation would be simplified. - - -References -========== - -Original proposal: -http://lists.openstack.org/pipermail/openstack-dev/2016-June/097221.html - -Current version of the tool: -https://github.com/cybertron/dlrn-repo diff --git a/specs/ocata/undercloud-heat.rst b/specs/ocata/undercloud-heat.rst deleted file mode 100644 index f0846101..00000000 --- a/specs/ocata/undercloud-heat.rst +++ /dev/null @@ -1,177 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================ -composable-undercloud -================================ - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/heat-undercloud - -Deploy the undercloud with Heat instead of elements. This will allow us to use -composable services for the Undercloud and better fits with the architecture -of TripleO (providing a feedback loop between the Undercloud and Overcloud). -Furthermore this gets us a step closer to an HA undercloud and will help -us potentially convert the Undercloud to containers as work is ongoing -in t-h-t for containers as well. - -Problem Description -=================== - -The Undercloud today uses instack-undercloud. Instack undercloud is built -around the concept of 'instack' which uses elements to install service. - -* When instack-undercloud started we shared elements across the undercloud - and overcloud via the tripleo-image-elements project. This is no longer the - case, thus we have lost the feedback loop of using the same elements in - both the overcloud and undercloud. - -* We retro-fitted instack-undercloud with a single element called - puppet-stack-config that contains a single (large) puppet manifest for - all the services. Being able to compose the Undercloud would be more - scalable. - -* A maintenance problem. Ideally we could support the under and overcloud with the same tooling. - -Proposed Change -=============== - -Overview --------- - -We can use a single process Heat API/Engine in noauth mode to leverage -recent "composable services" work in the tripleo-heat-templates project. - -* A new heat-all launcher will be created. - -* We will run the heat-all launcher with "noauth" middleware to skip keystone - auth at a high level. - -* The heat-all process will use fake RPC driver and SQLite thus avoiding - the need to run RabbitMQ or MySQL on the deployment server for bootstrapping. - -* To satisfy client library requirements inside heat we will run a fake keystone - API (a thread in our installer perhaps), that will return just enough to - make these clients functionally work in noauth mode. - -* The new "deployed-server" feature in tripleo-heat-templates will make it - it possible to create Heat "server" objects and thus run - OS::Heat::SoftwareDeployment resources on pre-installed servers. - -* We will use os-collect-config to communicate with the local Heat API via - the Heat signal transport. We will run os-collect-config until the - stack finished processing and either completes or fails. - -Alternatives ------------- - -* Create another tool which can read composable services in - tripleo-heat-templates. This tool would be required to have feature - parity with Heat such that things like parameters, nested stacks, - environments all worked in a similar fashion so that we could share the - template work across the Undercloud and Overcloud. This approach isn't - really feasable. - -* Use an alternate tool like Ansible. This would creating duplicate services - in Ansible playbooks for each service we require in the Undercloud. This - approach isn't ideal in that it involves duplicate work across the Undercloud - and Overcloud. Ongoing work around multi-node configuration and containers - would need to be duplicated into both the Overcloud (tripleo-heat-templates) - and Undercloud (Ansible) frameworks. - -Security Impact ---------------- - -* The approach would run Heat on a single node in noauth mode. Heat - API and the fake Keystone stub would listen on 127.0.0.1 only. This - would be similar to other projects which allow noauth in local mode - as well. - -Other End User Impact ---------------------- - -* We would again have a single template language driving our Undercloud - and Overcloud tooling. Heat templates are very well documented. - -Performance Impact ------------------- - -* Initial testing shows the single process Heat API/Engine is quite light - taking only 70MB of RAM on a machine. - -* The approach is likely to be on-par with the performance of - instack-undercloud. - - -Other Deployer Impact ---------------------- - -* The format of undercloud.conf may change. We will add a - 'compat' layer which takes the format of 'undercloud.conf' today - and sets Heat parameters and or includes heat environments to give - feature parity and an upgrade path for existing users. Additional, - CI jobs will also be created to ensure users who upgrade from - previous instack environments can use the new tool. - -Developer Impact ----------------- - -* Developers would be able to do less work to maintain the UnderCloud by - sharing composable services. - -* Future work around composable upgrades could also be utilized and shared - across the Undercloud and Overcloud. - - -Implementation -============== - -Assignee(s) ------------ - -dprince (dan-prince on LP) - -Work Items ----------- - -* Create heat-all launcher. - -* Create python-tripleoclient command to run 'undercloud deploy'. - -* Create undercloud.yaml Heat templates. - - -Dependencies -============ - -* Heat all launcher and noauth middleware. - -Testing -======= - -Swapping in the new Undercloud as part of CI should allow us to fully test it. - -Additionally, we will have an upgrade job that tests an upgrade from -an instack-undercloud installation to a new t-h-t driven Undercloud install. - -Documentation Impact -==================== - -Documentation changes will need to be made that explains new config -interfaces (Heat parameters and environments). We could minimiz doc changes -by developing a 'compat' interface to process the legacy undercloud.conf -and perhaps even re-use the 'undercloud install' task in python-tripleoclient -as well so it essentially acts the same on the CLI. - -References -========== - -* Onward dark owl presentation: https://www.youtube.com/watch?v=y1qMDLAf26Q - -* https://etherpad.openstack.org/p/tripleo-composable-undercloud - -* https://blueprints.launchpad.net/tripleo/+spec/heat-undercloud diff --git a/specs/ocata/undercloud-ntp-server.rst b/specs/ocata/undercloud-ntp-server.rst deleted file mode 100644 index 4ae68f65..00000000 --- a/specs/ocata/undercloud-ntp-server.rst +++ /dev/null @@ -1,142 +0,0 @@ -============================= -TripleO Undercloud NTP Server -============================= - -The Undercloud should provide NTP services for when external NTP services are -not available. - -Problem Description -=================== - -NTP services are required to deploy with HA, but we rely on external services. -This means that TripleO can't be installed without Internet access or a local -NTP server. - -This has several drawbacks: - -* The NTP server is a potential point of failure, and it is an external - dependency. - -* Isolated deployments without Internet access are not possible without - additional effort (manually deploying an NTP server). - -* Infra CI is dependent on an external resource, leading to potential - false negative test runs or CI failures. - -Proposed Change -=============== - -Overview --------- - -In order to address this problem, the Undercloud installation process should -include setting up an NTP server on the local Undercloud. The use of this -NTP server would be optional, but we may wish to make it a default. Having -a default is better than none, since HA deployments will fail without time -synchronization between the controller cluster members. - -The operation of the NTP server on the Undercloud would be primarily of use -in small or proof-of-concept deployments. It is expected that sufficiently -large deployments will have an infrastructure NTP server already operating -locally. - -Alternatives ------------- - -The alternative is to continue to require external NTP services, or to -require manual steps to set up a local NTP server. - -Security Impact ---------------- - -Since the NTP server is required for syncing the HA, a skewed clock on one -controller (in relation to the other controllers) may make it ineligable to -participate in the HA cluster. If more than one controller's clock is skewed, -the entire cluster will fail to operate. This opens up an opportunity for -denial-of-service attacks against the cloud, either by causing NTP updates -to fail, or using a man-in-the-middle attack where deliberately false NTP -responses are returned to the controllers. - -Of course, operating the NTP server on the Undercloud moves that attack -vector down to the Undercloud, so sufficient security hardening should be done -on the Undercloud and/or the attached networks. We may wish to bind the NTP -server only to the provisioning (control plane) network. - -Other End User Impact ---------------------- - -This may make the life of the installer easier, since they don't need to open -a network connection to an NTP server or set up a local NTP server. - -Performance Impact ------------------- - -The operation of the NTP server should have a negligible impact on Undercloud -performance. It is a lightweight protocol and the daemon requires little -resources. - -Other Deployer Impact ---------------------- - -We now require that a valid NTP server be configured either in the templates -or on the deployment command-line. This requirement would be optional if we had -a default pointing to NTP services on the Undercloud. - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ -Primary assignees: - -* dsneddon@redhat.com -* bfournie@redhat.com - -Work Items ----------- - -The TripleO Undercloud installation scripts will have to be modified to include -the installation and configuration of an NTP server. This will likely be done -using a composable service for the Undercloud, with configuration data taken -from undercloud.conf. The configuration should include a set of default NTP -servers which are reachable on the public Internet for when no servers are -specified in undercloud.conf. - -Implement opening up iptables for NTP on the control plane network (bound to -only one IP/interface [ctlplane] if possible). - -Dependencies -============ - -The NTP server RPMs must be installed, and upstream NTP servers must be -identified (although we might configure a default such as pool.ntp.org) - -Testing -======= - -Since proper operation of the NTP services are required for successful -deployment of an HA overcloud, this functionality will be tested every time -a TripleO CI HA job is run. - -We may also want to implement a validation that ensures that the NTP server -can reach its upstream stratum 1 servers. This will ensure that the NTP -server is serving up the correct time. This is optional, however, since the -only dependency is that the overcloud nodes agree on the time, not that it -be correct. - -Documentation Impact -==================== - -The setup and configuration of the NTP server should be documented. Basic NTP -best practices should be communicated. - -References -========== - -* [1] - Administration Guide Draft/NTP - Fedora Project - https://fedoraproject.org/wiki/Administration_Guide_Draft/NTP diff --git a/specs/ocata/validations-in-workflows.rst b/specs/ocata/validations-in-workflows.rst deleted file mode 100644 index 00ac91a2..00000000 --- a/specs/ocata/validations-in-workflows.rst +++ /dev/null @@ -1,224 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================ -Validations in TripleO Workflows -================================ - -https://blueprints.launchpad.net/tripleo/+spec/validations-in-workflows - -The Newton release introduced TripleO validations -- a set of -extendable checks that identify potential deployment issues early and -verify that the deployed OpenStack is set up properly. These -validations are automatically being run by the TripleO UI, but there -is no support for the command line workflow and they're not being -exercised by our CI jobs either. - - -Problem Description -=================== - -When enabled, TripleO UI runs the validations at the appropriate phase -of the planning and deployment. This is done within the TripleO UI -codebase and therefore not available to python-tripleoclient or -the CI. - -The TripleO deployer can run the validations manually, but they need -to know at which point to do so and they will need to do it by calling -Mistral directly. - -This causes a disparity between the command line and GUI experience -and complicates the efforts to exercise the validations by the CI. - - -Proposed Change -=============== - -Overview --------- - -Each validation already advertises where in the planning/deployment -process it should be run. This is under the ``vars/metagata/groups`` -section. In addition, the ``tripleo.validations.v1.run_groups`` -Mistral workflow lets us run all validations belonging to a given -group. - -For each validation group (currently ``pre-introspection``, ``pre-deployment`` -and ``post-deployment``) we will update the appropriate workflow in -tripleo-common to optionally call ``run_groups``. - -Each of the workflows above will receive a new Mistral input called -``run_validations``. It will be a boolean value that indicates whether -the validations ought to be run as part of that workflow or not. - -To expose this functionality to the command line user, we will add an -option for enabling/disabling validations into python-tripleoclient -(which will set the ``run_validations`` Mistral input) and a way to -show the results of each validation to the screen output. - -When the validations are run, they will report their status to Zaqar -and any failures will block the deployment. The deployer can disable -validations if they wish to proceed despite failures. - -One unresolved question is the post-deployment validations. The Heat -stack create/update Mistral action is currently asynchronous and we -have no way of calling actions after the deployment has finished. -Unless we change that, the post-deployment validations may have to be -run manually (or via python-tripleoclient). - - -Alternatives ------------- - -1. Document where to run each group and how and leave it at that. This - risks that the users already familiar with TripleO may miss the - validations or that they won't bother. - - We would still need to find a way to run validations in a CI job, - though. - -2. Provide subcommands to run validations (and groups of validations) - into python-tripleoclient and rely on people running them manually. - - This is similar to 1., but provides an easier way of running a - validation and getting its result. - - Note that this may be a useful addition even if with the proposal - outlined in this specification. - -3. Do what the GUI does in python-tripleoclient, too. The client will - know when to run which validation and will report the results back. - - The drawback is that we'll need to implement and maintain the same - set of rules in two different codebases and have no API to do them. - I.e. what the switch to Mistral is supposed to solve. - - - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -We will need to modify python-tripleoclient to be able to display the -status of validations once they finished. TripleO UI already does this. - -The deployers may need to learn about the validations. - -Performance Impact ------------------- - -Running a validation can take about a minute (this depends on the -nature of the validation, e.g. does it check a configuration file or -does it need to log in to all compute nodes). - -This may can be a concern if we run multiple validations at the same -time. - -We should be able to run the whole group in parallel. It's possible -we're already doing that, but this needs to be investigated. -Specifically, does ``with-items`` run the tasks in sequence or in -parallel? - -There are also some options that would allow us to speed up the -running time of a validation itself, by using common ways of speeding -up Ansible playbooks in general: - -* Disabling the default "setup" task for validations that don't need - it (this task gathers hardware and system information about the - target node and it takes some time) -* Using persistent SSH connections -* Making each validation task run independently (by default, Ansible - runs a task on all the nodes, waits for its completion everywhere - and then moves on to another task) -* Each validation runs the ``tripleo-ansible-inventory`` script which - gathers information about deployed servers and configuration from - Mistral and Heat. Running this script can be slow. When we run - multiple validations at the same time, we should generate the - inventory only once and cache the results. - -Since the validations are going to be optional, the deployer can -always choose not to run them. On the other hand, any slowdown should -ideally outweigh the time spent investigating failed deployments. - -We will also document the actual time difference. This information -should be readily available from our CI environments, but we should -also provide measurements on the bare metal. - - -Other Deployer Impact ---------------------- - -Depending on whether the validations will be run by default or not, -the only impact should be an option that lets the deployer to run them -or not. - - -Developer Impact ----------------- - -The TripleO developers may need to learn about validations, where to -find them and how to change them. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - tsedovic - -Other contributors: - None - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - -* Add ``run_validations`` input and call ``run_groups`` from the - deployment and node registration workflows -* Add an option to run the validations to python-tripleoclient -* Display the validations results with python-tripleoclient -* Add or update a CI job to run the validations -* Add a CI job to tripleo-validations - - -Dependencies -============ - -None - - -Testing -======= - -This should make the validations testable in CI. Ideally, we would -verify the expected success/failure for the known validations given -the CI environment. But having them go through the testing machinery -would be a good first step to ensure we don't break anything. - - -Documentation Impact -==================== - -We will need to document the fact that we have validations, where they -live and when and how are they being run. - - -References -========== - -* http://docs.openstack.org/developer/tripleo-common/readme.html#validations -* http://git.openstack.org/cgit/openstack/tripleo-validations/ -* http://docs.openstack.org/developer/tripleo-validations/ diff --git a/specs/pike/aide-database.rst b/specs/pike/aide-database.rst deleted file mode 100644 index 0995c5d3..00000000 --- a/specs/pike/aide-database.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================== -AIDE - Intrustion Detection Database -==================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-aide-database - -AIDE (Advanced Intrusion Detection Environment) is a file and directory -integrity verification system. It computes a checksum of object -attributes, which are then stored into a database. Operators can then -run periodic checks against the current state of defined objects and -verify if any attributes have been changed (thereby suggesting possible -malicious / unauthorised tampering). - -Problem Description -=================== - -Security Frameworks such as DISA STIG [1] / CIS [3] require that AIDE be -installed and configured on all Linux systems. - -To enable OpenStack operators to comply with the aforementioned security -requirements, they require a method of automating the installation of -AIDE and initialization of AIDE's integrity Database. They also require -a means to perform a periodic integrity verification run. - -Proposed Change -=============== - -Overview --------- - -Introduce a puppet-module to manage the AIDE service and ensure the AIDE -application is installed, create rule entries and a CRON job to allow -a periodic check of the AIDE database or templates to allow monitoring -via Sensu checks as part of OpTools. - -Create a tripleo-heat-template service to allow population of hiera data -to be consumed by the puppet-module managing AIDE. - -The proposed puppet-module is lhinds-aide [2] as this module will accept -rules declared in hiera data, initialize the Database and enables CRON -entries. Other puppet AIDE modules were missing hiera functionality or -other features (such as CRON population). - -Within tripleo-heat-templates, a composable service will be created to -feed a rule hash into the AIDE puppet module as follows: - - AIDERules: - description: Mapping of AIDE config rules - type: json - default: {} - -The Operator can then source an environment file and provide rule -information as a hash: - - parameter_defaults: - AIDERules: - 'Monitor /etc for changes': - content: '/etc p+sha256' - order : 1 - 'Monitor /boot for changes': - content: '/boot p+u+g+a' - order : 2 - -Ops Tool Integration --------------------- - -In order to allow active monitoring of AIDE events, a sensu check can -be created to perform an interval based verification of AIDE monitored -files (set using ``AIDERules``) against the last initialized database. - -Results of the Sensu activated AIDE verification checks will then be fed -to the sensu server for alerting and archiving. - -The Sensu clients (all overcloud nodes) will be configured with a -standalone/passive check via puppet-sensu module which is already -installed on overcloud image. - -If the Operator should choose not to use OpTools, then they can still -configure AIDE using the traditional method by means of a CRON entry. - -Alternatives ------------- - -Using a puppet-module coupled with a TripleO service is the most -pragmatic approach to populating AIDE rules and managing the AIDE -service. - -Security Impact ---------------- - -AIDE is an integrity checking application and therefore requires -Operators insure the security of AIDE's database is protected from -tampering. Should an attacker get access to the database, they could -attempt to hide malicious activity by removing records of file integrity -hashes. - -The default location is currently `/var/lib/aide/$database` which -puppet-aide sets with privileges of `0600` and ownership of -`root \ root`. - -AIDE itself introduces no security impact to any OpenStack projects -and has no interaction with any OpenStack services. - -Other End User Impact ---------------------- - -The service interaction will occur via heat templates and the TripleO -UI (should a capability map be present). - -Performance Impact ------------------- - -No Performance Impact - -Other Deployer Impact ---------------------- - -The service will be utlised by means of an environment file. Therefore, -should a deployer not reference the environment template using the -`openstack overcloud deploy -e` flag, there will be no impact. - -Developer Impact ----------------- - -No impact on other OpenStack Developers. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - lhinds - -Work Items ----------- - -1. Add puppet-aide [1] to RDO as a puppet package - -2. Create TripleO Service for AIDE - -3. Create Capability Map - -4. Create CI Job - -5. Submit documentation to tripleo-docs. - - -Dependencies -============ - -Dependency on lhinds-aide Puppet Module. - -Testing -======= - -Will be tested in TripleO CI by adding the service and an environment -template to a TripleO CI scenario. - -Documentation Impact -==================== - -Documentation patches will be made to explain how to use the service. - -References -========== - -Original Launchpad issue: https://bugs.launchpad.net/tripleo/+bug/1665031 - -[1] https://www.stigviewer.com/stig/red_hat_enterprise_linux_6/2016-07-22/finding/V-38489 - -[2] https://forge.puppet.com/lhinds/aide - -[3] -file:///home/luke/project-files/tripleo-security-hardening/CIS_Red_Hat_Enterprise_Linux_7_Benchmark_v2.1.0.pdf - -[3] -file:///home/luke/project-files/tripleo-security-hardening/CIS_Red_Hat_Enterprise_Linux_7_Benchmark_v2.1.0.pdf diff --git a/specs/pike/container-healthchecks.rst b/specs/pike/container-healthchecks.rst deleted file mode 100644 index 2772ce9c..00000000 --- a/specs/pike/container-healthchecks.rst +++ /dev/null @@ -1,148 +0,0 @@ -=========================================== -Container Healthchecks for TripleO Services -=========================================== - -https://blueprints.launchpad.net/tripleo/+spec/container-healthchecks - -An OpenStack deployment involves many services spread across many -hosts. It is important that we provide tooling and APIs that make it -as easy as possible to monitor this large, distributed environment. -The move to containerized services in the overcloud [1] -brings with it many opportunities, such as the ability to bundle -services with their associated health checks and provide a standard -API for assessing the health of the service. - -[1]: https://blueprints.launchpad.net/tripleo/+spec/containerize-tripleo - -Problem Description -=================== - -The people who are in the best position to develop appropriate health -checks for a service are generally those people responsible for -developing the service. Unfortunately, the task of setting up -monitoring generally ends up in the hands of cloud operators or some -intermediary. - -I propose that we take advantage of the bundling offered by -containerized services and create a standard API with which an -operator can assess the health of a service. This makes life easier -for the operator, who can now provide granular service monitoring -without requiring detailed knowledge about every service, and it -allows service developers to ensure that services are monitored -appropriately. - -Proposed Change -=============== - -Overview --------- - -The Docker engine (since version 1.12), as well as most higher-level -orchestration frameworks, provide a standard mechanism for validating -the health of a container. Docker itself provides the -HEALTHCHECK_ directive, while Kubernetes has explicit -support for `liveness and readiness probes`_. Both -mechanisms work by executing a defined command inside the container, -and using the result of that executing to determine whether or not the -container is "healthy". - -.. _liveness and readiness probes: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ -.. _healthcheck: https://docs.docker.com/engine/reference/builder/#healthcheck - -I propose that we explicitly support these interfaces in containerized -TripleO services through the following means: - -1. Include in every container a `/openstack/healthcheck` command that - will check the health of the containerized service, exit with - status ``0`` if the service is healthy or ``1`` if not, and provide - a message on ``stdout`` describing the nature of the error. - -2. Include in every Docker image an appropriate ``HEALTHCHECK`` - directive to utilize the script:: - - HEALTHCHECK CMD /openstack/healthcheck - -3. If Kubernetes becomes a standard part of the TripleO deployment - process, we may be able to implement liveness or readiness probes - using the same script:: - - livenessProbe: - exec: - command: - - /openstack/healthcheck - -Alternatives ------------- - -The alternative is the status quo: services do not provide a standard -healthcheck API, and service monitoring must be configured -individually by cloud operators. - -Security Impact ---------------- - -N/A - -Other End User Impact ---------------------- - -Users can explicitly run the healthcheck script to immediately assess -the state of a service. - -Performance Impact ------------------- - -This proposal will result in the periodic execution of tasks on the -overcloud hosts. When designing health checks, service developers -should select appropriate check intervals such that there is minimal -operational overhead from the health checks. - -Other Deployer Impact ---------------------- - -N/A - -Developer Impact ----------------- - -Developers will need to determine how best to assess the health of a -service and provide the appropriate script to perform this check. - -Implementation -============== - -Assignee(s) ------------ - -N/A - -Work Items ----------- - -N/A - -Dependencies -============ - -- This requires that we implement `containerize-tripleo-overcloud`_ - blueprint. - -.. _containerize-tripleo-overcloud: https://specs.openstack.org/openstack/tripleo-specs/specs/ocata/containerize-tripleo-overcloud.html - -Testing -======= - -TripleO CI jobs should be updated to utilize the healthcheck API to -determine if services are running correctly. - -Documentation Impact -==================== - -Any documentation describing the process of containerizing a service -for TripleoO must be updated to describe the healthcheck API. - -References -========== - -N/A - diff --git a/specs/pike/containerized-services-logs.rst b/specs/pike/containerized-services-logs.rst deleted file mode 100644 index 57526663..00000000 --- a/specs/pike/containerized-services-logs.rst +++ /dev/null @@ -1,305 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================================== -Best practices for logging of containerized services -==================================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/containerized-services-logs - -Containerized services shall persist its logs. There are many ways to address -that. The scope of this blueprint is to suggest best practices and intermediate -implementation steps for Pike release as well. - -Problem Description -=================== - -Pike will be released with a notion of hybrid deployments, which is some -services may be running in containers and managed by docker daemon, and -some may be managed by systemd or Pacemaker and placed on hosts directly. - -The notion of composable deployments as well assumes end users and -developers may want to deploy some services non-containerized and tripleo -heat templates shall not prevent them from doing so. - -Despite the service placement type, end users and developers shall get all -logs persisted, consistent and available for future analysis. - -Proposed Change -=============== - -Overview --------- - -.. note:: As the spec transitions from Pike, some of the sections below are - split into the Pike and Queens parts. - -The scope of this document for Pike is limited to recommendations for -developers of containerized services, bearing in mind use cases for hybrid -environments. It addresses only intermediate implementation steps for Pike and -smooth UX with upgrades from Ocata to Pike, and with future upgrades from Pike -as well. - -A `12factor `_ is the general guideline for logging -in containerized apps. Based on it, we rephrase our main design assumption as: -"each running process writes its only event stream to be persisted outside -of its container". And we put an additional design constraint: "each container -has its only running foreground process, nothing else requires persistent -logs that may outlast the container execution time". This assumes all streams -but the main event stream are ephemeral and live no longer than the container -instance does. - -.. note:: HA statefull services may require another approach, see the - alternatives section for more details. - -The scope for future releases, starting from Queens, shall include best -practices for collecting (shipping), storing (persisting), processing (parsing) -and accessing (filtering) logs of hybrid TripleO deployments with advanced -techniques like EFK (Elasticsearch, Fluentd, Kibana) or the like. Hereafter -those are referred as "future steps". - -Note, this is limited to OpenStack and Linux HA stack (Pacemaker and Corosync). -We can do nothing to the rest of the supporting and legacy apps like -webservers, load balancing revers proxies, database and message queue clusters. -Even if we could, this stays out of OpenStack specs scope. - -Here is a list of suggested best practices for TripleO developers for Pike: - -* Host services shall keep writing logs as is, having UIDs, logging configs, - rotation rules and target directories unchanged. - - .. note:: Host services changing its control plane to systemd or pacemaker - in Ocata to Pike upgrade process, may have logging configs, rules and - destinations changed as well, but this is out of the scope of this spec. - -* Containerized services that normally log to files under the `/var/log` dir, - shall keep logging as is inside of containers. The logs shall be persisted - with hostpath mounted volumes placed under the `/var/log/containers` path. - This is required because of the hybrid use cases. For example, containerized - nova services access `/var/log/nova` with different UIDs than the host - services would have. Given that, nova containers should have log volumes - mounted as ``-v /var/log/nova:/var/log/containers/nova`` in order to not - bring conflicts. Persisted log files then can be pulled by a node agent like - fluentd or rsyslog and forwarded to a central logging service. - -* Containerized services that can only log to syslog facilities: bind mount - /dev/log into all tripleo service containers as well so that the host - collects the logs via journald. This should be a standard component of our - container "API": we guarantee (a) a log directory and (b) a syslog socket - for *every* containerized service. Collected journald logs then can be pulled - by a node agent like fluentd or rsyslog and forwarded to a central logging - service. - -* Containerized services that leverage Kolla bootstrap, extended start and/or - config facilities, shall be templated with Heat deployment steps as the - following: - - * Host prep tasks to ensure target directories pre-created for hosts. - - * Kolla config's permissions to enforce ownership for log dirs (hostpath - mounted volumes). - - * Init containers steps to chown log directories early otherwise. Kolla - bootstrap and DB sync containers are normally invoked before the - `kolla_config` permissions to be set. Therefore come init containers. - -* Containerized services that do not use Kolla and run as root in containers - shall be running from a separate user namespace remapped to a non root host - user, for security reasons. No such services are currently deployed by - TripleO, though. - - .. note:: Docker daemon would have to be running under that remapped non root - user as well. See docker documentation for the ``--userns-remap`` option. - -* Containerized services that run under pacemaker (or pacemaker remote) - control plane and do not fall into any of the given cases: bind mount - /dev/log as well. At this stage the way services log is in line with the best - practice w.r.t "dedicated log directory to avoid conflicts". Pacemaker - bundles isolate the containerized resources' logs on the host into - `/var/log/pacemaker/bundles/{resource}`. - -Future steps TBD. - -Alternatives ------------- - -Those below come for future steps only. - -Alternatively to hostpath mounted volumes, create a directory structure such -that each container has a namespace for its logs somewhere under `/var/log`. -So, a container named 12345 would have *all its logs* in the -`/var/log/container-12345` directory structure (requires clarification). -This also alters the assumption that in general there is only one main log -per a container, which is the case for highly available containerized -statefull services bundled with pacemaker remote, with multiple logs to -capture, like `/var/log/pacemaker.log`, logs for cluster bootstrapping -events, control plane agents, helper tools like rsyncd, and the statefull -service itself. - -When we have control over the logging API (e.g. via oslo.log), we can forsake -hostpath mounted volumes and configure containerized services to output to -syslog (via bind mount `/dev/log`) so that the host collects the logs via -journald). Or configure services to log only to stdout, so that docker daemon -collects logs and ships them to the journald. - -.. note:: The "winning" trend is switching all (including openstack - services) to syslog and log nothing to the /var/log/, e.g. just bind-mount - ``-v /dev/null:/var/log`` for containers. - -Or use a specialized log driver like the oslo.log fluentd logging driver -(instead of the default journald or json-file) to output to a fluentd log agent -running on the host or containerized as well, which would then aggregate logs -from all containers, annotate with node metadata, and use the fluentd -`secure_forward` protocol to send the logs to a remote fluentd agent like -common logging. - -These are not doable for Pike as requiring too many changes impacting upgrade -UX as well. Although, this is the only recommended best practice and end goal for -future releases and future steps coming after Pike. - -Security Impact ---------------- - -As the spec transitions from Pike, the section is split into the Pike and -Queens parts. - -UID collisions may happen for users in containers to occasionally match another -user IDs on the host. And to allow those to access logs of foreign services. -This should be mitigated with SELinux policies. - -Future steps impact TBD. - -Other End User Impact ---------------------- - -As the spec transitions from Pike, the section is split into the Pike and -Queens parts. - -Containerized and host services will be logging under different paths. The former -to the `/var/log/containers/foo` and `/var/log/pacemaker/bundles/*`, the latter -to the `/var/log/foo`. This impacts logs collecting tools like -`sosreport `_ et al. - -Future steps impact TBD. - -Performance Impact ------------------- - -As the spec transitions from Pike, the section is split into the Pike and -Queens parts. - -Hostpath mounted volumes bring no performance overhead for containerized -services' logs. Host services are not affected by the proposed change. - -Future steps impact is that handling of the byte stream of stdout can -have a significant impact on performance. - -Other Deployer Impact ---------------------- - -As the spec transitions from Pike, the section is split into the Pike and -Queens parts. - -When upgrading from Ocata to Pike, containerized services will change its -logging destination directory as described in the end user impact section. -This also impacts logs collecting tools like sosreport et al. - -Logrotate scripts must be adjusted for the `/var/log/containers` and -`/var/log/pacemaker/bundles/*` as well. - -Future steps impact TBD. - -Developer Impact ----------------- - -As the spec transitions from Pike, the section is split into the Pike and -Queens parts. - -Developers will have to keep in mind the recommended intermediate best -practices, when designing heat templates for TripleO hybrid deployments. - -Developers will have to understand Kolla and Docker runtime internals, although -that's already the case once we have containerized services onboard. - -Future steps impact (to be finished): - -* The notion of Tracebacks in the events is difficult to handle as a byte - stream, because it becomes the responsibility of the apps to ensure output - of new-line separated text is not interleaved. That notion of Tracebacks - needs to be implemented apps side. - -* Oslo.log is really emitting a stream of event points, or trace points, with - rich metadata to describe those events. Capturing that metadata via a byte - stream later needs to be implemented. - -* Event streams of child processes, forked even temporarily, should or may need - to be captured by the parent events stream as well. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bogdando - -Other contributors: - michele - flaper87 - larsks - dciabrin - -Work Items ----------- - -As the spec transitions from Pike, the work items are split into the Pike and -Queens parts: - -* Implement an intermediate logging solution for tripleo-heat-templates for - containerized services that log under `/var/log` (flaper87, bogdando). Done - for Pike. -* Come up with an intermediate logging solution for containerized services that - log to syslog only (larsks). Done for Pike. -* Come up with a solution for HA containerized services managed by Pacemaker - (michele). Done for Pike. -* Make sure that sosreport collects `/var/log/containers/*` and - `/var/log/pacemaker/bundles/*` (no assignee). Pending for Pike. -* Adjust logrotate scripts for the `/var/log/containers` and - `/var/log/pacemaker/bundles/*` paths (no assignee). Pending for Pike. -* Verify if the namespaced `/var/log/` for containers works and fits the case - (no assignee). -* Address the current state of OpenStack infrastructure apps as they are, and - gently move them towards these guidelines referred as "future steps" (no - assignee). - -Dependencies -============ - -None. - -Testing -======= - -Existing CI coverage fully fits the proposed change needs. - -Documentation Impact -==================== - -The given best practices and intermediate solutions built from those do not -involve changes visible for end users but those given in the end users impact -section. The same is true for developers and dev docs. - -References -========== - -* `Sosreport tool `_. -* `Pacemaker container bundles `_. -* `User namespaces in docker `_. -* `Docker logging drivers `_. -* `Engineering blog posts `_. diff --git a/specs/pike/deployment-plan-management.rst b/specs/pike/deployment-plan-management.rst deleted file mode 100644 index 409c6955..00000000 --- a/specs/pike/deployment-plan-management.rst +++ /dev/null @@ -1,230 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -Deployment Plan Management changes -================================== - -https://blueprints.launchpad.net/tripleo/+spec/deployment-plan-management-refactor - -The goal of this work is to improve GUI and CLI interoperability by changing the way -deployment configuration is stored, making it more compact and simplify plan import -and export. - -Problem Description -=================== - -The problem is broadly described in mailing list discussion [1]. This spec is a result -of agreement achieved in that discussion. - -TripleO-Common library currently operates on Mistral environment for storing plan -configuration although not all data are stored there since there are additional files -which define plan configuration (roles_data.yaml, network_data.yaml, capabilities-map.yaml) -which are currently used by CLI to drive certain parts of deployment configuration. -This imposes a problem of synchronization of content of those files with Mistral -environment when plan is imported or exported. - -TripleO-Common needs to be able to provide means for roles and networks management. - -Proposed Change -=============== - -Overview --------- - -TripleO plan configuration data should be stored in single place rather than in multiple -(mistral environment + plan meta files stored in Swift container). - -TripleO-Common should move from using mistral environment to storing the information -in file (plan-environment.yaml) in Swift container so all plan configuration data -are stored in 'meta' files in Swift and tripleo-common provides API to perform operations -on this data. - -Plan meta files: capabilities-map.yaml, roles_data.yaml, network_data.yaml [3], -plan-environment.yaml - -Proposed plan-environment.yaml file structure:: - - version: 1.0 - - name: A name of a plan which this file describes - description: > - A description of a plan, it's usage and potential summary of features it provides - template: overcloud.yaml - environments: - - path: overcloud-resource-registry-puppet.yaml - parameter_defaults: - ControllerCount: 1 - passwords: - TrovePassword: "vEPKFbdpTeesCWRmtjgH4s7M8" - PankoPassword: "qJJj3gTg8bTCkbtYtYVPtzcyz" - KeystoneCredential0: "Yeh1wPLUWz0kiugxifYU19qaf5FADDZU31dnno4gJns=" - - -This solution makes whole plan configuration stored in Swift container together with -rest of plan files, simplifies plan import/export functionality as no synchronization -is necessary between the Swift files and mistral environment. Plan configuration is -more straightforward and CLI/GUI interoperability is improved. - -Initially the plan configuration is going to be split into multiple 'meta' files -(plan-environment.yaml, capabilities-map.yaml, roles_data.yaml, network_data.yaml) -all stored in Swift container. -As a next step we can evaluate a solution which merges them all into plan-environment.yaml - -Using CLI workflow user works with local files. Plan, Networks and Roles are configured by -making changes directly in relevant files (plan-management.yaml, roles_data.yaml, ...). -Plan is created and templates are generated on deploy command. - -TripleO Common library will implement CRUD actions for Roles and Networks -management. This will allow clients to manage Roles and Networks and generate relevant -templates (see work items). - -TripleO UI and other clients use tripleo-common library which operates on plan stored in -Swift container. - - -Alternatives ------------- - -Alternative approach is treating Swift 'meta' files as an input during plan creation -and synchronize them to Mistral environment when plan is imported which is described -initially in [1] and is used in current plan import/export implementation [2] - -This solution needs to deal with multiple race conditions, makes plan import/export -much more complicated and overall solution is not simple to understand. Using this -solution should be considered if using mistral environment as a plan configuration -storage has some marginal benefits over using file in Swift. Which is not the case -according to the discussion [1] - -As a subsequent step to proposed solution, it is possible to join all existing -'meta' files into a single one. - -Security Impact ---------------- - -None. - -Other End User Impact ---------------------- - -CLI/GUI interoperability is improved - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -This change makes Deployment Plan import/export functionality much simpler as well as -makes the tripleo-common operate on the same set of files as CLI does. It is much -easier to understand the CLI users how tripleo-common works as it does not do any -swift files -> mistral environment synchronization on the background. - -TripleO-Common can introduce functionality manage Roles and Networks which perfectly -matches to how CLI workflow does it. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - akrivoka - -Other contributors: - * d0ugal - * rbrady - * jtomasek - -Work Items ----------- - -* [tripleo-heat-templates] Update plan-environment.yaml to match new specification. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/update-plan-environment-yaml - -* [tripleo-common] Update relevant actions to store data in plan-environment.yaml in - Swift instead of using mistral-environment. Migrate any existing data away from Mistral. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/stop-using-mistral-env - -* [tripleo-common] On plan creation/update tripleo-common validates the plan and checks - that roles_data.yaml and network_data.yaml exist as well as validates it's format. - On success, plan creation/update templates are generated/regenerated. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/validate-roles-networks - -* [tripleo-common] Provide a GetRoles action to list current roles in json format by reading - roles_data.yaml. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/get-roles-action - -* [tripleo-common] Provide a GetNetworks action to list current networks in json format - by reading network_data.yaml. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/get-networks-action - -* [tripleo-common] Provide an UpdateRoles action to update Roles. It takes data in - json format validates it's contents and persists them in roles_data.yaml, after - successful update, templates are regenerated. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/update-roles-action - -* [tripleo-common] Provide an UpdateNetworks action to update Networks. It takes data in - json format validates it's contents and persists them in network_data.yaml. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/update-networks-action - -* [tripleo-ui] Provide a way to create/list/update/delete Roles by calling tripleo-common - actions. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/roles-crud-ui - -* [tripleo-ui] Provide a way to create/list/update/delete Networks by calling tripleo-common - actions. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/networks-crud-ui - -* [tripleo-ui] Provide a way to assign Networks to Roles. - - blueprint: https://blueprints.launchpad.net/tripleo/+spec/networks-roles-assignment-ui - -* [python-tripleoclient] Update CLI to use tripleo-common actions for operations - that currently modify mistral environment - - related bug: https://bugs.launchpad.net/tripleo/+bug/1635409 - -Dependencies -============ - -None. - -Testing -======= - -Feature will be tested as part of TripleO CI - -Documentation Impact -==================== - -Documentation should be updated to reflect the new capabilities of GUI (Roles/Networks management), -a way to use plan-environment.yaml via CLI workflow and CLI/GUI interoperability using plan import -and export features. - -References -========== - -[1] http://lists.openstack.org/pipermail/openstack-dev/2017-February/111433.html -[2] https://specs.openstack.org/openstack/tripleo-specs/specs/ocata/gui-plan-import-export.html -[3] https://review.openstack.org/#/c/409921/ diff --git a/specs/pike/environment-generator.rst b/specs/pike/environment-generator.rst deleted file mode 100644 index a6db9e47..00000000 --- a/specs/pike/environment-generator.rst +++ /dev/null @@ -1,167 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================ -Sample Environment Generator -============================ - -A common tool to generate sample Heat environment files would be beneficial -in two main ways: - -* Consistent formatting and details. Every environment file would include - parameter descriptions, types, defaults, etc. - -* Ease of updating. The parameters can be dynamically read from the templates - which allows the sample environments to be updated automatically when - parameters are added or changed. - -Problem Description -=================== - -Currently our sample environments are hand written, with no consistency in -terms of what is included. Most do not include a description of what all -the parameters do, and almost none include the types of the parameters or the -default values for them. - -In addition, the environment files often get out of date because developers -have to remember to manually update them any time they make a change to the -parameters for a given feature or service. This is tedious and error-prone. - -The lack of consistency in environment files is also a problem for the UI, -which wants to use details from environments to improve the user experience. -When environments are created manually, these details are likely to be missed. - -Proposed Change -=============== - -Overview --------- - -A new tool, similar to the oslo.config generator, will allow us to eliminate -these problems. It will take some basic information about the environment and -use the parameter definitions in the templates to generate the sample -environment file. - -The resulting environments should contain the following information: - -* Human-readable Title -* Description -* parameter_defaults describing all the available parameters for the - environment -* Optional resource_registry with any necessary entries - -Initially the title and description will simply be comments, but eventually we -would like to get support for those fields into Heat itself so they can be -top-level keys. - -Ideally the tool would be able to update the capabilities map automatically as -well. At some point there may be some refactoring done there to eliminate the -overlap, but during the transition period this will be useful. - -This is also a good opportunity to impose some organization on the environments -directory of tripleo-heat-templates. Currently it is mostly a flat directory -that contains all of the possible environments. It would be good to add -subdirectories that group related environments so they are easier to find. - -The non-generated environments will either be replaced by generated ones, -when that makes sense, or deprecated in favor of a generated environment. -In the latter case the old environments will be left for a cycle to allow -users transition time to the new environments. - -Alternatives ------------- - -We could add more checks to the yaml-validate tool to ensure environment files -contain the required information, but this still requires more developer -time and doesn't solve the maintenance problems as parameters change. - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -Users should get an improved deployment experience through more complete and -better documented sample environments. Existing users who are referencing -the existing sample environments may need to switch to the new generated -environments. - -Performance Impact ------------------- - -No runtime performance impact. Initial testing suggests that it may take a -non-trivial amount of time to generate all of the environments, but it's not -something developers should have to do often. - -Other Deployer Impact ---------------------- - -See End User Impact - -Developer Impact ----------------- - -Developers will need to write an entry in the input file for the tool rather -than directly writing sample environments. The input format of the tool will -be documented, so this should not be too difficult. - -When an existing environment is deprecated in favor of a generated one, a -release note should be written by the developer making the change in order to -communicate it to users. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - bnemec - -Other contributors: - jtomasek - -Work Items ----------- - -* Update the proposed tool to reflect the latest design decisions -* Convert existing environments to be generated - - -Dependencies -============ - -No immediate dependencies, but in the long run we would like to have some -added functionality from Heat to allow these environments to be more easily -consumed by the UI. However, it was agreed at the PTG that we would proceed -with this work and make the Heat changes in parallel so we can get some of -the benefits of the change as soon as possible. - - -Testing -======= - -Any environments used in CI should be generated with the tool. We will want -to add a job that exercises the tool as well, probably a job that ensures any -changes in the patch under test are reflected in the environment files. - - -Documentation Impact -==================== - -We will need to document the format of the input file. - - -References -========== - -`Initial proposed version of the tool -`_ - -https://etherpad.openstack.org/p/tripleo-environment-generator diff --git a/specs/pike/gui-logging.rst b/specs/pike/gui-logging.rst deleted file mode 100644 index 575dcd06..00000000 --- a/specs/pike/gui-logging.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========== -GUI logging -=========== - -The TripleO GUI currently has no way to persist logging information. - -Problem Description -=================== - -The TripleO GUI is a web application without its own dedicated backend. As -such, any and all client-side errors are lost when the End User reloads the page -or navigates away from the application. When things go wrong, the End User is -unable to retrieve client-side logs because this information is not persisted. - -Proposed Change -=============== - -Overview --------- - -I propose that we use Zaqar as a persistence backend for client-side logging. -At present, the web application is already communicating with Zaqar using -websockets. We can use this connection to publish new messages to a dedicated -logging queue. - -Zaqar messages have a TTL of one hour. So once every thirty minutes, Mistral -will query Zaqar using crontrigger, and retrieve all messages from the -``tripleo-ui-logging`` queue. Mistral will then look for a file called -``tripleo-ui-log`` in Swift. If this file exists, Mistral will check its size. -If the size exceeds a predetermined size (e.g. 10MB), Mistral will rename it to -``tripleo-ui-log-``, and create a new file in its place. The file -will then receive the messages from Zaqar, one per line. Once we reach, let's -say, a hundred archives (about 1GB) we can start removing dropping data in order -to prevent unnecessary data accumulation. - -To view the logging data, we can ask Swift for 10 latest messages with a prefix -of ``tripleo-ui-log``. These files can be presented in the GUI for download. -Should the user require, we can present a "View more" link that will display the -rest of the collected files. - -Alternatives ------------- - -None at this time - -Security Impact ---------------- - -There is a chance of logging sensitive data. I propose that we apply some -common scrubbing mechanism to the messages before they are stored in Swift. - -Other End User Impact ---------------------- - -Performance Impact ------------------- - -Sending additional messages over an existing websocket connection should have -a negligible performance impact on the web application. Likewise, running -hourly cron tasks in Mistral shouldn't impose a significant burden on the -undercloud machine. - -Other Deployer Impact ---------------------- - -Developer Impact ----------------- - -Developers should also benefit from having a centralized logging system in -place as a means of improving productivity when debugging. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - hpokorny - -Work Items ----------- - -* Introduce a central logging system (already in progress, see `blueprint`_) -* Introduce a global error handler -* Convert all logging messages to JSON using a standard format -* Configuration: the name for the Zaqar queue to carry the logging data -* Introduce a Mistral workflow to drain a Zaqar queue and publish the acquired - data to a file in Swift -* Introduce GUI elements to download the log files - -Dependencies -============ - -Testing -======= - -We can write unit tests for the code that handles sending messages over the -websocket connection. We might be able to write an integration smoke test that -will ensure that a message is received by the undercloud. We can also add some -testing code to tripleo-common to cover the logic that drains the queue, and -publishes the log data to Swift. - -Documentation Impact -==================== - -We need to document the default name of the Zaqar queue, the maximum size of -each log file, and how many log files can be stored at most. On the End User -side, we should document the fact that a GUI-oriented log is available, and the -way to get it. - -References -========== - -.. _blueprint: https://blueprints.launchpad.net/tripleo/+spec/websocket-logging diff --git a/specs/pike/send-mail-tool.rst b/specs/pike/send-mail-tool.rst deleted file mode 100644 index fa0a4a5a..00000000 --- a/specs/pike/send-mail-tool.rst +++ /dev/null @@ -1,129 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================ -Tool send email with tripleo tempest results -============================================ - -https://blueprints.launchpad.net/tripleo/+spec/send-mail-tool - -To speed up the troubleshooting, debugging and reproducing TripleO tempest -results, we should have a list of people responsible to receive email status -about tempest failures, containing a list of all the failures and failures -that are known issues and are being covered by some opened bug in launchpad. - -Problem Description -=================== - -Currently there is periodic TripleO jobs running tempest, and these results -are not being verified whether is failing or passing. -Even if there is someone responsible to verify these runs, still is a manual -job go to logs web site, check what's the latest job, go to the logs, verify -if tempest ran, list the number of failures, check against a list if these -failures are known failures or new ones, and only after all these steps, -start to work to identify the root cause of the problem. - -Proposed Change -=============== - -Overview --------- - -TripleO should provide a unified method for send email for a list of -users who would be responsible to take action when something goes wrong with -tempest results. -The method should run at the end of every run, in the validate-tempest role, -and read the log file, either by the output generated by tempest, or by the -logs uploaded to the logs website, identifying failures on tempest and report -it by mail, or save the mail content in a file to be verified later. The mail -should contain information such list of failures, list of known -failures, date, link to the logs of the run, and any other information that -might be relevant. - -Alternatives ------------- - -One of the alternatives would be openstack-health, where the user can -subscribe into the rss feed of one of the jobs using a third party application. -Right now, openstack-health doesn't support user subscription or send emails. - -Security Impact ---------------- - -None, since it will use a API running in some cloud service to send the email, -so the username and password remain secure. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -Developers in different teams will be more involved in TripleO CI debugging. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - arxcruz - - -Work Items ----------- - -* The script should be writen in Python -* Should be part of validate-tempest role in tripleo-quickstart-extras -* Should be able to read the logs in any run in http://logs.openstack.org -* Once it reads the log, collect information about the failures, - passing and known failures or taking tempest output and parsing it directly. -* Be able to work with Jinja2 template to send email, so it's - possible to have different templates for different types of job -* Read the list of address that the report should be sent - * The list is a dictionary mapping the email address to a list of tests - and/or jobs where the users are interested. -* Render the template with the proper data -* Send the report - - -Dependencies -============ - -None. - -Testing -======= - -As part of CI testing, the new tool should be used to send a -report to a list of interested people - -Documentation Impact -==================== - -Documentation should be updated to reflect the standard ways -to send the report and call the script at the end of every -periodic run. - -References -========== - -Sagi mail tempest: -https://github.com/sshnaidm/various/blob/master/check_tests.py - diff --git a/specs/pike/tripleo-ceph-ansible-integration.rst b/specs/pike/tripleo-ceph-ansible-integration.rst deleted file mode 100644 index 7af09620..00000000 --- a/specs/pike/tripleo-ceph-ansible-integration.rst +++ /dev/null @@ -1,571 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============================================== - Enable TripleO to Deploy Ceph via Ceph Ansible -=============================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ceph-ansible - -Enable TripleO to deploy Ceph via Ceph Ansible using a new Mistral -workflow. This will make the Ceph installation less tightly coupled -with TripleO but the existing operator interfaces to deploy Ceph with -TripleO will still be supported until the end of the Queens release. - -Problem Description -=================== - -The Ceph community maintains ceph-ansible to deploy and manage Ceph. -Members of the TripleO community maintain similar tools too. This is -a proposal to have TripleO trigger the Ceph community's tools via -Mistral as an alternative method to deploy and manage Ceph. - -Benefits of using another project to deploy and manage Ceph -=========================================================== - -Avoid duplication of effort ---------------------------- - -If there is a feature or bug fix in the Ceph community's tools not in -the tools used by TripleO, then members of the TripleO community could -allow deployers to use those features directly instead of writing -their own implementation. If this proposal is successful, then it -might result in not maintaining two code bases, (along with the bug -fixes and testing included) in the future. For example, if -ceph-ansible fixed a bug to correctly handle alternative system paths -to block devices, e.g. /dev/disk/by-path/ in lieu of /dev/sdb, then -the same bug would not need to be fixed in puppet-ceph. This detail -would also be nicely abstracted from a deployer because this spec -proposes maintaining parity with TripleO Heat Templates. Thus, the -deployer would not need to change the `ceph::profile::params::osds` -parameter as the same list of OSDs would work. - -In taking this approach it's possible for there to be cases where -TripleO's deployment architecture may have unique features that don't -exist within ceph-ansible. In these cases, efforts may need to be -taken so ensure such a features remian in parity with this approach. -In no way, does this proposal enable a TripleO deployer to bypass -TripleO and use ceph-ansible directly. Also, because Ceph is not an -OpenStack service itself but a service that TripleO uses, this -approach remains consistent with the TripleO mission. - - -Consistency between OpenStack and non-OpenStack Ceph deployments ----------------------------------------------------------------- - -A deployer may seek assistance from the Ceph community with a Ceph -deployment and this process will be simplified if both deployments -were done using the same tool. - -Enable Decoupling of Ceph management from TripleO -------------------------------------------------- - -The complexity of Ceph management can be moved to a different tool -and abstracted, where appropriate, from TripleO making the Ceph -management aspect of TripleO less complex. Combining this with -containerized Ceph would offer flexible deployment options. This -is a deployer benefit that is difficult to deliver today. - -Features in the Ceph community's tools not in TripleO's tools -------------------------------------------------------------- - -The Ceph community tool, ceph-ansible [1]_, offers benefits to -OpenStack users not found in TripleO's tool chain, including playbooks -to deploy Ceph in containers and migrate a non-containerized -deployment to a containerized deployment without downtime. Also, -making the Ceph deployment in TripleO less tightly coupled, by moving -it into a new Mistral workflow, would make it easier in a future -release to add a business logic layer through a tool like Tendrl [2]_, -to offer additional Ceph policy based configurations and possibly a -graphical tool to see the status of the Ceph cluster. However, the -scope of this proposal for Pike does not include Tendrl and instead -takes the first step towards deploying Ceph via a Mistral workflow by -triggering ceph-ansible directly. After the Pike cycle is complete -triggering Mistral may be considered in a future spec. - -Proposed Change -=============== - -Overview --------- - -The ceph-ansible [1]_ project provides a set of playbooks to deploy -and manage Ceph. A proof of concept [3]_ has been written which uses -two custom Mistral actions from the experimental -mistral-ansible-actions project [4]_ to have a Mistral workflow on the -undercloud trigger ceph-ansible to produce a working hyperconverged -overcloud. - -The deployer experience to stand up Ceph with TripleO at the end of -this cycle should be the following: - -#. The deployer chooses to deploy a role containing any of the - Ceph server services: CephMon, CephOSD, CephRbdMirror, CephRgw, - or CephMds. -#. The deployer provides the same Ceph parameters they provide today - in a Heat env file, e.g. a list of OSDs. -#. The deployer starts the deploy and gets an overcloud with Ceph - -Thus, the deployment experience remains the same for the deployer but -behind the scenes a Mistral workflow is started which triggers -ceph-ansible. The details of the Mistral workflow to accomplish this -follows. - -TripleO Ceph Deployment via Mistral ------------------------------------ - -TripleO's workflow to deploy a Ceph cluster would be changed so that -there are two ways to deploy a Ceph cluster; the way currently -supported by TripleO and the way described in this proposal. - -The workflow described here assumes the following: - -#. A deployer chooses to deploy Ceph server services from the - following list of five services found in THT's roles_data.yaml: - CephMon, CephOSD, CephRbdMirror, CephRgw, or CephMds. -#. The deployer chooses to include new Heat environment files which - will be in THT when this spec is implemented. The new Heat - environment file will change the implementation of any of the five - services from the previous step. Using storage-environment.yaml, - which defaults to Ceph deployed by puppet-ceph, will still trigger - the Ceph deployment by puppet-ceph. However, if the new Heat - environment files are included instead of storage-environment.yaml, - then the implementation of the service will be done by ceph-ansible - instead; which already configures these services for hosts under - the following roles in the Ansible inventory: mons, osds, mdss, - rgws, or rbdmirrors. -#. The undercloud has a directory called /usr/share/ceph-ansible - which contains the ceph-ansible playbooks described in this spec. - It will be present because its install will contain the - installation of the ceph-ansible package. -#. The Mistral on the Undercloud will contain to custom actions called - `ansible` and `ansible-playbook` (or similar) and will also contain - the workflow for each task below and can be observed by running - `openstack workflow list`. Assume this is the case because the - tripleo-common package will be modified to ship these actions and - they will be available after undercloud installation. -#. Heat will ship a new CustomResource type like - OS::Mistral::WorflowExecution [6]_, which will execute custom - Mistral workflows. - -The standard TripleO workflow, as executed by a deployer, will create -a custom Heat resource which starts an independent Mistral workflow to -interact with ceph-ansible. An example of such a Heat resource would be -OS::Mistral::WorflowExecution [6]_. - -Each independent Mistral workflow may be implemented directly in -tripleo-common/workbooks. A separate Mistral workbook will be created -for each goal described below: - -* Initial deployment of OpenStack and Ceph -* Adding additional Ceph OSDs to existing OpenStack and Ceph clusters - -The initial goal for the Pike cycle will be to maintain feature parity -with what is possible today in TripleO and puppet-ceph but with -containerized Ceph. Additional Mistral workflows may be written, time -permitting or in a future cycle to add new features to TripleO's Ceph -deployment which leverage ceph-ansible playbooks to shrink the Ceph -Cluster and safely remove an OSD or to perform maintenance on the -cluster by using Ceph's 'noout' flag so that the maintenance does not -result in more data migration than necessary. - -Initial deployment of OpenStack and Ceph ----------------------------------------- - -The sequence of events for this new Mistral workflow and Ceph-Ansible -to be triggered during initial deployment with TripleO follows: - -#. Define the Overcloud on the Undercloud in Heat. This includes the - Heat parameters that are related to storage which will later be - passed to ceph-ansible via a Mistral workflow. -#. Run `openstack overcloud deploy` with standard Ceph options but - including a new Heat environment file to make the implementation - of the service deployment use ceph-ansible. -#. The undercloud assembles and uploads the deployment plan to the - undercloud Swift and Mistral environment. -#. Mistral starts the workflow to deploy the Overcloud and interfaces - with Heat accordingly. -#. A point in the deployment is reached where the Overcloud nodes are - imaged, booted, and networked. At that point the undercloud has - access to the provisioning or management IPs of the Overcloud - nodes. -#. A new Heat Resource is created which starts a Mistral workflow to - Deploy Ceph on the systems with the any of the five Ceph server - services, including CephMon, CephOSD, CephRbdMirror, CephRgw, or - CephMds [6]_. -#. The servers which host Ceph services have their relevant firewall - ports opened according to the needs of their service, e.g. the Ceph - monitor firewalls are configured to accept connections on TCP - port 6789. [7]_. -#. The Heat resource is passed the same parameters normally found in - the tripleo-heat-templates environments/storage-environment.yaml - but instead through a new Heat environment file. Additional files - may be passed to include overrides, e.g. the list of OSD disks. -#. The Heat resource passes its parameters to the Mistral workflow as - parameters. This will include information about which hosts should - have which of the five Ceph server services. -#. The Mistral workflow translates these parameters so that they match - the parameters that ceph-ansible expects, e.g. - ceph::profile::params::osds would become devices though they'd have - the same content, which would be a list of block devices. The - translation entails building an argument list that may be passed - to the playbook by calling `ansible-playbook --extra-vars`. - Typically ceph-ansible uses modified files in the group_vars - directory but in this case, no files are modified and instead the - parameters are passed programmatically. Thus, the playbooks in - /usr/share/ceph-ansible may be run unaltered and that will be the - default directory. However, it will be possible to pass an - alternative location for the /usr/share/ceph-ansible playbook as - an argument. No playbooks are run yet at this stage. -#. The Mistral environment is updated to generate a new SSH key-pair - for ceph-ansible and the Overcloud nodes using the same process - that is used to create the SSH keys for TripleO validations and - install the public key on Overcloud nodes. After this environment - update it will be possible to run `mistral environment-get - ssh_keys_ceph` on the undercloud and see the public and private - keys in JSON. -#. The Mistral Action Plugin `ansible-playbook` is called and passed - the list of parameters as described earlier. The dynamic ansible - inventory used by tripleo-validations is used with the `-i` - option. In order for ceph-ansible to work as usual there must be a - group called `[mons]` and `[osds]` in the inventory. In addition to - optional groups for `[mdss]`, `[rgws]`, or `[rbdmirrors]`. - Modifications to the tripleo-validations project's - tripleo-ansible-inventory script may be made to support this, or a - derivative work of the same as shipped by TripleO common. The SSH - private key for the heat-admin user and the provisioning or - management IPs of the Overcloud nodes are what Ansible will use. -#. The mistral workflow computes the number of forks in Ansible - according to the number of machines that are going to be - bootstrapped and will pass this number with `ansible-playbook - --forks`. -#. Mistral verifies that the Ansible ping module can execute `ansible - $group -m ping` for any group in mons, osds, mdss, rgws, or - rbdmirrors, that was requested by the deployer. For example, if the - deployer only specified the CephMon and CephOSD service, then - Mistral will only run `ansible mons -m ping` and `ansible osds -m - ping`. The Ansible ping module will SSH into each host as the - heat-admin user with key which was generated as described - previously. If this fails, then the deployment fails. -#. Mistral starts the Ceph install using the `ansible-playbook` - action. -#. The Mistral workflow creates a Zaqar queue to send progress - information back to the client (CLI or web UI). -#. The workflow posts messages to the "tripleo" Zaqar queue or the - queue name provided to the original deploy workflow. -#. If there is a problem during the status of the deploy may be seen - by `openstack workflow execution list | grep ceph` and in the logs - at /var/log/mistral/{engine.log,executor.log}. Running `openstack - stack resource list` would show the custom Heat resource that - started the Mistral workflow, but `openstack workflow execution - list` and `openstack workflow task list` would contain more details - about what steps completed within the Mistral workflow. -#. The Ceph deployment is done in containers in a way which must - prevent any configuration file conflict for any composed service, - e.g. if a Nova compute container (as deployed by TripleO) and a - Ceph OSD container are on the same node, then they must have - different ceph.conf files, even if those files have the same - content. Though, ceph-ansible will manage ceph.conf for Ceph - services and puppet-ceph will still manage ceph.conf for OpenStack - services, neither tool will both try to manage the same ceph.conf - because it will be in a different location on the container host - and bind mounted to /etc/ceph/ceph.conf within different - containers. -#. After the Mistral workflow is completed successfully, the custom - Heat resource is considered successfully created. If the Mistral - workflow does not complete successfully, then the Heat resource - is not considered successfully created. TripleO should handle this - the same way that it handles any Heat resource that failed to be - created. For example, because the workflow is idempotent, if the - resource creation fails because the wrong parameter was passed or - because of a temporary network issue, the deployer could simply run - a stack-update the Mistral worklow would run again and if the - issues which caused the first run to fail were resolved, the - deployment should succeed. Similarly if a user updates a parameter, - e.g. a new disk is added to `ceph::profile::params::osds`, then the - workflow will run again without breaking the state of the running - Ceph cluster but it will configure the new disk. -#. After the dependency of the previous step is satisfied, the TripleO - Ceph external Heat resource is created to configure the appropriate - Overcloud nodes as Ceph clients. -#. For the CephRGW service, hieradata will be emitted so that it may - be used for the haproxy listener setup and keystone users setup. -#. The Overcloud deployment continues as if it was using an external - Ceph cluster. - -Adding additional Ceph OSD Nodes to existing OpenStack and Ceph clusters ------------------------------------------------------------------------- - -The process to add an additional Ceph OSD node is similar to the -process to deploy the OSDs along with the Overcloud: - -#. Introspect the new hardware to host the OSDs. -#. In the Heat environment file containing the node counts, increment - the CephStorageCount. -#. Run `openstack overcloud deploy` with standard Ceph options and the - environment file which specifies the implementation of the Ceph - deployment via ceph-ansible. -#. The undercloud updates the deployment plan. -#. Mistral starts the workflow to update the Overcloud and interfaces - with Heat accordingly. -#. A point in the deployment is reached where the new Overcloud nodes - are imaged, booted, and networked. At that point the undercloud has - access to the provisioning or management IPs of the Overcloud - nodes. -#. A new Heat Resource is created which starts a Mistral workflow to - add new Ceph OSDs. -#. TCP ports 6800:7300 are opened on the OSD host [7]_. -#. The Mistral environment already has an SSH key-pair as described in - the initial deployment scenario. The same process that is used to - install the public SSH key on Overcloud nodes for TripleO - validations is used to install the SSH keys for ceph-ansible. -#. If necessary, the Mistral workflow updates the number of forks in - Ansible according to the new number of machines that are going to - be bootstrapped. -#. The dynamic Ansible inventory will contain the new node. -#. Mistral confirms that Ansible can execute `ansible osds -m ping`. - This causes Ansible to SSH as the heat-admin user into all of the - CephOsdAnsible nodes, including the new nodes. If this fails, then - the update fails. -#. Mistral uses the Ceph variables found in Heat as described in the - initial deployment scenario. -#. Mistral runs the osd-configure.yaml playbook from ceph-ansible to - add the extra Ceph OSD server. -#. The OSDs on the server are each deployed in their own containers - and `docker ps` will list each OSD container. -#. After the Mistral workflow is completed, the Custom Heat resource - is considered to be updated. -#. No changes are necessary for the TripleO Ceph external Heat - resource since the Overcloud Ceph clients only need information - about new OSDs from the Ceph monitors. -#. The Overcloud deployment continues as if it was using an external - Ceph cluster. - -Containerization of configuration files ---------------------------------------- - -As described in the Containerize TripleO spec, configuration files -for the containerized service will be generated by Puppet and then -passed to the containerized service using a configuration volume [8]_. -A similar containerization feature is already supported by -ceph-ansible, which uses the following sequence to generate the -ceph.conf configuration file. - -* Ansible generates a ceph.conf on a monitor node -* Ansible runs the monitor container and bindmount /etc/ceph -* No modification is being done in the ceph.conf -* Ansible copies the ceph.conf to the Ansible server -* Ansible copies the ceph.conf and keys to the appropriate machine -* Ansible runs the OSD container and bindmount /etc/ceph -* No modification is being done in the ceph.conf - -These similar processes are compatible, even in the case of container -hosts which run more than one OpenStack service but which each need -their own copy of the configuration file per container. For example, -consider a containerzation node which hosts both Nova compute and Ceph -OSD services. In this scenario, the Nova compute service would be a -Ceph client and puppet-ceph would generate its ceph.conf and the Ceph -OSD service would be a Ceph server and ceph-ansible would generate its -ceph.conf. It is necessary for Puppet to configure the Ceph client -because Puppet configures the other OpenStack related configuration -files as is already provided by TripleO. Both generated ceph.conf -files would need to be stored in a separate directory on the -containerization hosts to avoid conflicts and the directories could be -mapped to specific containers. For example, host0 could have the -following versions of foo.conf for two different containers:: - - host0:/container1/etc/foo.conf <--- generated by conf tool 1 - host0:/container2/etc/foo.conf <--- generated by conf tool 2 - -When each container is started on the host, the different -configuration files could then be mapped to the different containers:: - - docker run containter1 ... /container1/etc/foo.conf:/etc/foo.conf - docker run containter2 ... /container2/etc/foo.conf:/etc/foo.conf - -In the above scenario, it is necessary for both configuration files -to be generated from the same parameters. I.e. both Puppet and Ansible -will use the same values from the Heat environment file, but will -generate the configuration files differently. After the configuration -programs have run it won't matter that Puppet idempotently updated -lines of the ceph.conf and that Ansible used a Jina2 template. What -will matter is that both configuration files have the same value, -e.g. the same FSID. - -Configuration files generated as described in the Containerize TripleO -spec will not store those configuration files on the container -host's /etc directory before passing it to the container guest with a -bind mount. By default, ceph-ansible generates the initial ceph.conf -on the container host's /etc directory before it uses a bind mount to -pass it through to the container. In order to be consistent with the -Containerize TripleO spec, ceph-ansible will get a new feature for -deploying Ceph in containers so that it will not generate the -ceph.conf on the container host's /etc directory. The same option will -need to apply when generating Ceph key rings; which will be stored in -/etc/ceph in the container, but not on the container host. - -Because Mistral on the undercloud runs the ansible playbooks, the -user "mistral" on the undercloud will be the one that SSH's into the -overcloud nodes to run ansible playbooks. Care will need to be taken -to ensure that user doesn't make changes which are out of scope. - -Alternatives ------------- - -From a high level, this proposal is an alternative to the current -method of deploying Ceph with TripleO and offers the benefits listed -in the problem description. - -From a lower level, how this proposal is implemented as described in -the Workflow section should be considered. - -#. In a split-stack scenario, after the hardware has been provisioned - by the first Heat stack and before the configuration Heat stack is - created, a Mistral workflow like the one in the POC [3]_ could be - run to configured Ceph on the Ceph nodes. This scenario would be - more similar to the one where TripleO is deployed using the TripleO - Heat Templates environment file puppet-ceph-external.yaml. This - could be an alternative to a new OS::Mistral::WorflowExecution Heat - resource [6]_. -#. Trigger the ceph-ansible deployment before the OpenStack deployment - In the initial workflow section, it is proposed that "A new - Heat Resource is created which starts a Mistral workflow to Deploy - Ceph". This may be difficult because, in general, composable services - currently define snippets of puppet data which is then later combined - to define the deployment steps, and there is not yet a way to support - running an arbitrary Mistral workflow at a given step of a deployment. - Thus, the Mistral workflow could be started first and then it could - wait for what is described in step 6 of the overview section. - -Security Impact ---------------- - -* A new SSH key pair will be created on the undercloud and will be - accessible in the Mistral environment via a command like - `mistral environment-get ssh_keys_ceph`. The public key of this - pair will be installed in the heat-admin user's authorized_keys - file on all Overcloud nodes which will be Ceph Monitors or OSDs. - This process will follow the same pattern used to create the SSH - keys used for TripleO validations so nothing new would happen in - that respect; just another instance on the same type of process. -* An additional tool would do configuration on the Overcloud, though - the impact of this should be isolated via Containers. -* Regardless of how Ceph services are configured, they require changes - to the firewall. This spec will implement parity in fire-walling for - Ceph services [7]_. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -The following applies to the undercloud: - -* Mistral will need to run an additional workflow -* Heat's role in deploying Ceph would be lessened so the Heat stack - would be smaller. - -Other Deployer Impact ---------------------- - -Ceph will be deployed using a method that is proven but who's -integration is new to TripleO. - -Developer Impact ----------------- - -None. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - fultonj - -Other contributors: - gfidente - leseb - colonwq - d0ugal (to review Mistral workflows/actions) - -Work Items ----------- - -* Prototype a Mistral workflow to independently install Ceph on - Overcloud nodes [3]_. [done] -* Prototype a Heat Resource to start an independent Mistral Workflow - [6]_. [done] -* Expand mistral-ansible-actions with necessary options (fultonj) -* Parametize mistral workflow (fultonj) -* Update and have merged Heat CustomResource [6]_ (gfidente) -* Have ceph-ansible create openstack pools and keys for containerized - deployments: https://github.com/ceph/ceph-ansible/issues/1321 (leseb) -* get ceph-ansible packaged in ceph.com and push to centos cbs - (fultonj / leseb) -* Make undercloud install produce /usr/share/ceph-ansible by modifying - RDO's instack RPM's spec file to add a dependency (fultonj) -* Submit mistral workflow and ansible-mistral-actions to - tripleo-common (fultonj) -* Prototype new service plugin interface that defines per-service - workflows (gfidente / shardy / fultonj) -* Submit new services into tht/roles_data.yaml so users can use it. - This should include a change to the tripleo-heat-templates - ci/environments/scenario001-multinode.yaml to include the new - service, e.g. CephMonAnsible so that CI is tested. This may not - work unless it all co-exists in a single overcloud deploy. - If it works, we use it to get started. The initial plan is for - scenario004 to keep using puppet-ceph. -* Implement the deleting the Ceph Cluster scenario -* Implement the adding additional Ceph OSDs to existing OpenStack and - Ceph clusters scenario -* Implement the removing Ceph OSD nodes scenario -* Implement the performing maintenance on Ceph OSD nodes (optional) - -Dependencies -============ - -Containerization of the Ceph services provided by ceph-ansible is -used to ensure the configuration tools aren't competing. This -will need to be compatible with the Containerize TripleO spec -[9]_. - -Testing -======= - -A change to tripleo-heat-templates' scenario001-multinode.yaml will be -submitted which includes deployment of the new services CephMonAnsible -and CephOsdAnsible (note that these role names will be changed when -fully working). This testing scenario may not work unless all of the -services may co-exist; however, preliminary testing indicates that -this will work. Initially scenario004 will not be modified and will be -kept using puppet-ceph. We may start by changing ovb-nonha scenario -first as we believe this may be faster. When the CI move to -tripleo-quickstart happens and there is a containers only scenario we -will want to add a hyperconverged containerized deployment too. - -Documentation Impact -==================== - -A new TripleO Backend Configuration document "Deploying Ceph with -ceph-ansible" would be required. - -References -========== - -.. [1] `ceph-ansible `_ -.. [2] `Tendrl `_ -.. [3] `POC tripleo-ceph-ansible `_ -.. [4] `Experimental mistral-ansible-actions project `_ -.. [6] `Proposed new Heat resource OS::Mistral::WorflowExecution `_ -.. [7] `These firewall changes must be managed in a way that does not conflict with TripleO's mechanism for managing host firewall rules and should be done before the Ceph servers are deployed. We are working on a solution to this problem.` -.. [8] `Configuration files generated by Puppet and passed to a containerized service via a config volume `_ -.. [9] `Spec to Containerize TripleO `_ diff --git a/specs/pike/tripleo-derive-parameters.rst b/specs/pike/tripleo-derive-parameters.rst deleted file mode 100644 index ab42a540..00000000 --- a/specs/pike/tripleo-derive-parameters.rst +++ /dev/null @@ -1,440 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================== -Deriving TripleO Parameters -=========================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-derive-parameters - -This specification proposes a generic interface for automatically -populating environment files with parameters which were derived from -formulas; where the formula's input came from introspected hardware -data, workload type, and deployment type. It also provides specific -examples of how this interface may be used to improve deployment of -overclouds to be used in DPDK or HCI usecases. Finally, it proposes -how this generic interface may be shared and extended by operators -who optionally chose to have certain parameters prescribed so that -future systems tuning expertise may be integrated into TripleO. - -Problem Description -=================== - -Operators must populate parameters for a deployment which may be -specific to hardware and deployment type. The hardware information -of a node is available to the operator once the introspection of the -node is completed. However, the current process requires that the -operator manually read the introspected data, make decisions based on -that data and then update the parameters in an environment file. This -makes deployment preparation unnecessarily complex. - -For example, when deploying for DPDK, the operator must provide the -list of CPUs which should be assigned to the DPDK Poll Mode Driver -(PMD) and the CPUs should be provided from the same NUMA node on which -the DPDK interface is present. In order to provide the correct -parameters, the operator must cross check all of these details. - -Another example is the deployment of HCI overclouds, which run both -Nova compute and Ceph OSD services on the same nodes. In order to -prevent contention between compute and storage services, the operator -may manually apply formulas, provided by performance tuning experts, -which take into account available hardware, type of workload, and type -of deployment, and then after computing the appropriate parameters -based on those formulas, manually store them in environment files. - -In addition to the complexity of the DPDK or HCI usecase, knowing the -process to assign CPUs to the DPDK Poll Mode Driver or isolate compute -and storage resources for HCI is, in itself, another problem. Rather -than document the process and expect operators to follow it, the -process should be captured in a high level language with a generic -interface so that performance tuning experts may easily share new -similar processes for other use cases with operators. - -Proposed Change -=============== - -This spec aims to make three changes to TripleO outlined below. - -Mistral Workflows to Derive Parameters --------------------------------------- - -A group of Mistral workflows will be added for the features which are -complex to determine the deployment parameters. Features like DPDK, -SR-IOV and HCI require, input from the introspection data to be -analyzed to compute the deployment parameters. This derive parameters -workflow will provide a default set of computational formulas by -analyzing the introspected data. Thus, there will be a hard dependency -with node introspection for this workflow to be successful. - -During the first iterations, all the roles in a deployment will be -analyzed to find a service associated with the role, which requires -parameter derivation. Various options of using this and the final -choice for the current iteration is discussed in below section -`Workflow Association with Services`_. - -This workflow assumes that all the nodes in a role have a homegenous -hardware specification and introspection data of the first node will -be used for processing the parameters for the entire role. This will -be reexamined in later iterations, based on the need for node specific -derivations. The workflow will consider the flavor-profile association -and nova placement scheduler to identify the nodes associated with a -role. - -Role-specific parameters are an important requirement for this workflow. -If there are multiple roles with the same service (feature) enabled, -the parameters which are derived from this workflow will be applied -only on the corresponding role. - -The input sources for these workflows are the ironic database and ironic -introspection data stored in Swift, in addition to the Deployment plan stored -in Swift. Computations done to derive the parameters within the Mistral -workflow will be implemented in YAQL. These computations will be a separate -workflow on per feature basis so that the formulas can be customizable. If an -operator has to modify the default formulas, he or she has to update only this -workflow with customized formula. - - -Applying Derived Parameters to the Overcloud --------------------------------------------- - -In order for the resulting parameters to be applied to the overcloud, -the deployment plan, which is stored in Swift on the undercloud, -will be modified with the Mistral `tripleo.parameters.update` action -or similar. - -The methods for providing input for derivation and the update of -parameters which are derivation output should be consistent with the -Deployment Plan Management specification [1]_. The implementation of -this spec with respect to the interfaces to set and get parameters may -change as it is updated. However, the basic workflow should remain the -same. - -Trigger Mistral Workflows with TripleO --------------------------------------- - -Assuming that workflows are in place to derive parameters and update the -deployment plan as described in the previous two sections, an operator may -take advantage of this optional feature by enabling it via ``plan- -environment.yaml``. A new section ``workflow_parameters`` will be added to -the ``plan-environments.yaml`` file to accomodate the additional parameters -required for executing workflows. With this additional section, we can ensure -that the workflow specific parameters are provide only to the workflow, -without polluting the heat environments. It will also be possible to provide -multiple plan environment files which will be merged in the CLI before plan -creation. - -These additional parameters will be read by the derive params workflow -directly from the merged ``plan-environment.yaml`` file stored in Swift. - -It is possible to modify the created plan or modify the profile-node -association, after the derive parameters workflow execution. As of -now, we assume that there no such alterations done, but it will be -extended after the initial iteration, to fail the deployment with -some validations. - -An operator should be able to derive and view parameters without doing a -deployment; e.g. "generate deployment plan". If the calculation is done as -part of the plan creation, it would be possible to preview the calculated -values. Alternatively the workflow could be run independently of the overcloud -deployment, but how that will fit with the UI workflow needs to be determined. - -Usecase 1: Derivation of DPDK Parameters -======================================== - -A part of the Mistral workflow which uses YAQL to derive DPDK -parameters based on introspection data, including NUMA [2]_, exists -and may be seen on GitHub [3]_. - -Usecase 2: Derivation Profiles for HCI -====================================== - -This usecase uses HCI, running Ceph OSD and Nova Compute on the same node. HCI -derive parameters workflow works with a default set of configs to categorize -the type of the workload that the role will host. An option will be provide to -override the default configs with deployment specific configs via ``plan- -environment.yaml``. - -In case of HCI deployment, the additional plan environment used for the -deployment will look like:: - - workflow_parameters: - tripleo.workflows.v1.derive_parameters: - # HCI Derive Parameters - HciProfile: nfv-default - HciProfileConfig: - default: - average_guest_memory_size_in_mb: 2048 - average_guest_CPU_utilization_percentage: 50 - many_small_vms: - average_guest_memory_size_in_mb: 1024 - average_guest_CPU_utilization_percentage: 20 - few_large_vms: - average_guest_memory_size_in_mb: 4096 - average_guest_CPU_utilization_percentage: 80 - nfv_default: - average_guest_memory_size_in_mb: 8192 - average_guest_CPU_utilization_percentage: 90 - -In the above example, the section ``workflow_parameters`` is used to provide -input parameters for the workflow in order to isolate Nova and Ceph -resources while maximizing performance for different types of guest -workloads. An example of the derivation done with these inputs is -provided in nova_mem_cpu_calc.py on GitHub [4]_. - - -Other Integration of Parameter Derivation with TripleO -====================================================== - -Users may still override parameters ------------------------------------ - -If a workflow derives a parameter, e.g. cpu_allocation_ratio, but the -operator specified a cpu_allocation_ratio in their overcloud deploy, -then the operator provided value is given priority over the derived -value. This may be useful in a case where an operator wants all of the -values that were derived but just wants to override a subset of those -parameters. - -Handling Cross Dependency Resources ------------------------------------ - -It is possible that multiple workflows will end up deriving parameters based -on the same resource (like CPUs). When this happens, it is important to have a -specific order for the workflows to be run considering the priority. - -For example, let us consider the resource CPUs and how it should be used -between DPDK and HCI. DPDK requires a set of dedicated CPUs for Poll Mode -Drivers (NeutronDpdkCoreList), which should not be used for host process -(ComputeHostCpusList) and guest VM's (NovaVcpuPinSet). HCI requires the CPU -allocation ratio to be derived based on the number of CPUs that are available -for guest VMs (NovaVcpuPinSet). Priority is given to DPDK, followed by HOST -parameters and then HCI parameters. In this case, the workflow execution -starts with a pool of CPUs, then: - -* DPDK: Allocate NeutronDpdkCoreList -* HOST: Allocate ComputeHostCpusList -* HOST: Allocate NovaVcpuPinSet -* HCI: Fix the cpu allocation ratio based on NovaVcpuPinSet - -Derived parameters for specific services or roles -------------------------------------------------- - -If an operator only wants to configure Enhanced Placement Awareness (EPA) -features like CPU pinning or huge pages, which are not associated with any -feature like DPDK or HCI, then it should be associated with just the compute -service. - -Workflow Association with Services ----------------------------------- - -The optimal way to associate the derived parameter workflows with -services, is to get the list of the enabled services on a given role, -by previewing Heat stack. With the current limitations in Heat, it is -not possible fetch the enabled services list on a role. Thus, a new -parameter will be introduced on the service which is associated with a -derive parameters workflow. If this parameter is referenced in the -heat resource tree, on a specific role, then the corresponding derive -parameter workflow will be invoked. For example, the DPDK service will -have a new parameter "EnableDpdkDerivation" to enable the DPDK -specific workflows. - -Future integration with TripleO UI ----------------------------------- - -If this spec were implemented and merged, then the TripleO UI could -have a menu item for a deployment, e.g. HCI, in which the deployer may -choose a derivation profile and then deploy an overcloud with that -derivation profile. - -The UI could better integrate with this feature by allowing a deployer -to use a graphical slider to vary an existing derivation profile and -then save that derivation profile with a new name. The following -cycle could be used by the deployer to tune the overcloud. - -* Choose a deployment, e.g. HCI -* Choose an HCI profile, e.g. many_small_vms -* Run the deployment -* Benchmark the planned workload on the deployed overcloud -* Use the sliders to change aspects of the derivation profile -* Update the deployment and re-run the benchmark -* Repeat as needed -* Save the new derivation profile as the one to be deployed in the field - -The implementation of this spec would enable the TripleO UI to support -the above. - -Alternatives ------------- - -The simplest alternative is for operators to determine what tunings -are appropriate by testing or reading documentation and then implement -those tunings in the appropriate Heat environment files. For example, -in an HCI scenario, an operator could run nova_mem_cpu_calc.py [4]_ -and then create a Heat environment file like the following with its -output and then deploy the overcloud and directly reference this -file:: - - parameter_defaults: - ExtraConfig: - nova::compute::reserved_host_memory: 75000 - nova::cpu_allocation_ratio: 8.2 - -This could translate into a variety of overrides which would require -initiative on the operator's part. - -Another alternative is to write separate tools which generate the -desired Heat templates but don't integrate them with TripleO. For -example, nova_mem_cpu_calc.py and similar, would produce a set of Heat -environment files as output which the operator would then include -instead of output containing the following: - -* nova.conf reserved_host_memory_mb = 75000 MB -* nova.conf cpu_allocation_ratio = 8.214286 - -When evaluating the above, keep in mind that only two parameters for -CPU allocation and memory are being provided as an example, but that -a tuned deployment may contain more. - -Security Impact ---------------- - -There is no security impact from this change as it sits at a higher -level to automate, via Mistral and Heat, features that already exist. - -Other End User Impact ---------------------- - -Operators need not manually derive the deployment parameters based on the -introspection or hardware specification data, as it is automatically derived -with pre-defined formulas. - -Performance Impact ------------------- - -The deployment and update of an overcloud may take slightly longer if -an operator uses this feature because an additional Mistral workflow -needs to run to perform some analytics before applying configuration -updates. However, the performance of the overcloud would be improved -because this proposal aims to make it easier to tune the overcloud for -performance. - -Other Deployer Impact ---------------------- - -A new configuration option is being added, but it has to be explicitly -enabled, and thus it would not take immediate effect after its merged. -Though, if a deployer chooses to use it and there is a bug in it, then -it could affect the overcloud deployment. If a deployer uses this new -option, and had a deploy in which they set a parameter directly, -e.g. the Nova cpu_allocation_ratio, then that parameter may be -overridden by a particular tuning profile. So that is something a -deployer should be aware of when using this proposed feature. - -The config options being added will ship with a variety of defaults -based on deployments put under load in a lab. The main idea is to make -different sets of defaults, which were produced under these -conditions, available. The example discussed in this proposal and to -be made available on completion could be extended. - -Developer Impact ----------------- - -This spec proposes modifying the deployment plan which, if there was a -bug, could introduce problems into a deployment. However, because the -new feature is completely optional, a developer could easily disable -it. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees: - skramaja - fultonj - -Other contributors: - jpalanis - abishop - shardy - gfidente - -Work Items ----------- - -* Derive Params start workflow to find list of roles -* Workflow run for each role to fetch the introspection data and trigger - individual features workflow -* Workflow to identify if a service associated with a features workflow is - enabled in a role -* DPDK Workflow: Analysis and concluding the format of the input data (jpalanis) -* DPDK Workflow: Parameter deriving workflow (jpalanis) -* HCI Workflow: Run a workflow that calculates the parameters (abishop) -* SR-IOV Workflow -* EPA Features Workflow -* Run the derive params workflow from CLI -* Add CI scenario testing if workflow with produced expected output - -Dependencies -============ - -* NUMA Topology in introspection data (ironic-python-agent) [5]_ - -Testing -======= - -Create a new scenario in the TripleO CI in which a deployment is done -using all of the available options within a derivation profile called -all-derivation-options. A CI test would need to be added that would -test this new feature by doing the following: - -* A deployment would be done with the all-derivation-options profile -* The deployment would be checked that all of the configurations had been made -* If the configuration changes are in place, then the test passed -* Else the test failed - -Relating the above to the HCI usecase, the test could verify one of -two options: - -1. A Heat environment file created with the following syntactically - valid Heat:: - - parameter_defaults: - ExtraConfig: - nova::compute::reserved_host_memory: 75000 - nova::cpu_allocation_ratio: 8.2 - -2. The compute node was deployed such that the commands below return - something like the following:: - - [root@overcloud-osd-compute-0 ~]# grep reserved_host_memory /etc/nova/nova.conf - reserved_host_memory_mb=75000 - [root@overcloud-osd-compute-0 ~]# grep cpu_allocation_ratio /etc/nova/nova.conf - cpu_allocation_ratio=8.2 - [root@overcloud-osd-compute-0 ~]# - -Option 1 would put less load on the CI infrastructure and produce a -faster test but Option 2 tests the full scenario. - -If a new derived parameter option is added, then the all-derivation-options -profile would need to be updated and the test would need to be updated -to verify that the new options were set. - -Documentation Impact -==================== - -A new chapter would be added to the TripleO document on deploying with -derivation profiles. - -References -========== - -.. [1] `Deployment Plan Management specification `_ -.. [2] `Spec for Ironic to retrieve NUMA node info `_ -.. [3] ``_ -.. [4] `nova_mem_cpu_calc.py `_ -.. [5] `NUMA Topology in introspection data (ironic-python-agent) `_ diff --git a/specs/pike/tripleo-realtime.rst b/specs/pike/tripleo-realtime.rst deleted file mode 100644 index 31c35aaf..00000000 --- a/specs/pike/tripleo-realtime.rst +++ /dev/null @@ -1,235 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================== -Add real-time compute nodes to TripleO -====================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-realtime - -Real-time guest VMs require compute nodes with a specific configuration to -control the sources of latency spikes. - -Problem Description -=================== - -Manual configuration of compute nodes to support real-time guests is possible. -However this is complex and time consuming where there is large number of -compute nodes to configure. - -On a real-time compute node a subset of the available physical CPUs (pCPUs) are -isolated and dedicated to real-time tasks. The remaining pCPUs are dedicated to -general housekeeping tasks. This requires a real-time Linux Kernel and real-time -KVM that allow their housekeeping tasks to be isolated. The real-time and -housekeeping pCPUs typically reside on different NUMA nodes. - -Huge pages are also reserved for guest VMs to prevent page faults, either via -the kernel command line or via sysfs. Sysfs is preferable as it allows the -reservation on each individual NUMA node to be set. - -A real-time Linux guest VM is partitioned in a similar manner, having one or -more real-time virtual CPUs (vCPUs) and one or more general vCPUs to handle -the non real-time housekeeping tasks. - -A real-time vCPU is pinned to a real-time pCPU while a housekeeping vCPU is -pinned to a housekeeping pCPUS. - -It is expected that operators would require both real-time and non real-time -compute nodes on the same overcloud. - -Use Cases ---------- - -The primary use-case is NFV appliances deployed by the telco community which -require strict latency guarantees. Other latency sensitive applications should -also benefit. - -Proposed Change -=============== - -This spec proposes changes to automate the deployment of real-time capable -compute nodes using TripleO. - -* a custom overcloud image for the real-time compute nodes, which shall include: - - * real-time Linux Kernel - * real-time KVM - * real-time tuned profiles - -* a new real-time compute role that is a variant of the existing compute role - - * huge pages shall be enabled on the real-time compute nodes. - * huge pages shall be reserved for the real-time guests. - * CPU pinning shall be used to isolate kernel housekeeping tasks from the - real-time tasks by configuring tuned. - * CPU pinning shall be used to isolate virtualization housekeeping tasks from - the real-time tasks by configuring nova. - -Alternatives ------------- - -None - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -Worse-case latency in real-time guest VMs should be significantly reduced. -However a real-time configuration potentially reduces the overall throughput of -a compute node. - -Other Deployer Impact ---------------------- - -The operator will remain responsible for: - -* appropriate BIOS settings on compute node. -* setting appropriate parameters for the real-time role in an environment file -* post-deployment configuration - - * creating/modifying overcloud flavors to enable CPU pinning, hugepages, - dedicated CPUs, real-time policy - * creating host aggregates for real-time and non real-time compute nodes - - - -Developer Impact ----------------- - -None - -Implementation -============== - -Real-time ``overcloud-full`` image creation: - -* create a disk-image-builder element to include the real-time packages -* add support for multiple overcloud images in python-tripleoclient CLIs:: - - openstack overcloud image build - openstack overcloud image upload - -Real-time compute role: - -* create a ``ComputeRealtime`` role - - * variant of the ``Compute`` role that can be configued and scaled - independently - * allows a different image and flavor to be used for real-time nodes - * includes any additional parameters/resources that apply to real-time nodes - -* create a ``NovaRealtime`` service - - * contains a nested ``NovaCompute`` service - * allows parameters to be overridden for the real-time role only - -Nova configuration: - -* Nova ``vcpu_pin_set`` support is already implemented. See NovaVcpuPinSet in - :ref:`references` - -Kernel/system configuration: - -* hugepages support - - * set default hugepage size (kernel cmdline) - * number of hugepages of each size to reserve at boot (kernel cmdline) - * number of hugepages of each size to reserve post boot on each NUMA node - (sysfs) - -* Kernel CPU pinning - - * isolcpu option (kernel cmdline) - -Ideally this can be implemented outside of TripleO in the Tuned profiles, where -it is possible to set the kernel command line and manage sysfs. TripleO would -then manage the Tuned profile config files. -Alternatively the grub and systemd config files can be managed directly. - -.. note:: - - This requirement is shared with OVS-DPDK. The development should be - coordinated to ensure a single implementation is implemented for - both use-cases. - Managing the grub config via a UserData script is the current approach used - for OVS-DPDK. See OVS-DPDK documentation in :ref:`references`. - -Assignee(s) ------------ - -Primary assignee: - owalsh - -Other contributors: - ansiwen - -Work Items ----------- - -As outlined in the proposed changes. - -Dependencies -============ - -* Libvirt real time instances - https://blueprints.launchpad.net/nova/+spec/libvirt-real-time -* Hugepages enabled in the Compute nodes. - https://bugs.launchpad.net/tripleo/+bug/1589929 -* CPU isolation of real-time and non real-time tasks. - https://bugs.launchpad.net/tripleo/+bug/1589930 -* Tuned - https://fedorahosted.org/tuned/ - -Testing -======= - -Genuine real-time guests are unlikely to be testable in CI: - -* specific BIOS settings are required. -* images with real-time Kernel and KVM modules are required - -However the workflow to deploy these guest should be testable in CI. - -Documentation Impact -==================== - -Manual steps performed by the operator shall be documented: - -* BIOS settings for low latency -* Real-time overcloud image creation - - .. note:: - - CentOS repos do not include RT packages. The CERN CentOS RT repository is an - alternative. -* Flavor and profile creation -* Parameters required in a TripleO environment file -* Post-deployment configuration - -.. _references: - -References -========== - -Nova blueprint `"Libvirt real time instances" -`_ - -The requirements are similar to :doc:`../newton/tripleo-ovs-dpdk` - -CERN CentOS 7 RT repo http://linuxsoft.cern.ch/cern/centos/7/rt/ - -NoveVcpuPinSet parameter added: https://review.openstack.org/#/c/343770/ - -OVS-DPDK documentation (work-in-progress): https://review.openstack.org/#/c/395431/ diff --git a/specs/pike/tripleo-routed-networks-ironic-inspector.rst b/specs/pike/tripleo-routed-networks-ironic-inspector.rst deleted file mode 100644 index cc730abb..00000000 --- a/specs/pike/tripleo-routed-networks-ironic-inspector.rst +++ /dev/null @@ -1,386 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================================== -Modify TripleO Ironic Inspector to PXE Boot Via DHCP Relay -========================================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-routed-networks-ironic-inspector - -This blueprint is part of the series tripleo-routed-networks-deployment [0]_. - -This spec describes adding features to the Undercloud to support Ironic -Inspector performing PXE boot services for multiple routed subnets (with -DHCP relay on the routers forwarding the requests). The changes required -to support this will be in the format of ``undercloud.conf`` and in the Puppet -script that writes the ``dnsmasq.conf`` configuration for Ironic Inspector. - -TripleO uses Ironic Inspector to perform baremetal inspection of overcloud -nodes prior to deployment. Today, the ``dnsmasq.conf`` that is used by Ironic -Inspector is generated by Puppet scripts that run when the Undercloud is -configured. A single subnet and IP allocation range is entered in -``undercloud.conf`` in the parameter ``inspection_iprange``. This spec would -implement support for multiple subnets in one provisioning network. - -Background Context -================== - -For a detailed description of the desired topology and problems being -addresssed, please reference the parent blueprint -triplo-routed-networks-deployment [0]_. - -Problem Descriptions -==================== - -Ironic Inspector DHCP doesn't yet support DHCP relay. This makes it -difficult to do introspection when the hosts are not on the same L2 domain -as the controllers. The dnsmasq process will actually function across a DHCP -relay, but the configuration must be edited by hand. - -Possible Solutions, Ideas, or Approaches: - -1. Add support for DHCP scopes and support for DHCP relays. -2. Use remote DHCP/PXE boot but provide L3 routes back to the introspection server -3. Use Neutron DHCP agent to PXE boot nodes for introspection (the Neutron - dhcp-agent already supports multiple subnets, and can be modified to support - DHCP relay). Note that there has been discussion about moving to Neutron for - Ironic Introspection on this bug [3]_. This is currently infeasible due to - Neutron not being able to issue IPs for unknown MACs. The related patch has - been abandoned [5]_. - - -Solution Implementation - -The Ironic Inspector DHCP server uses dnsmasq, but only configures one subnet. -We need to modify the Ironic Inspector DHCP configuration so that we can -configure DHCP for multiple Neutron subnets and allocation pools. Then we -should be able to use DHCP relay to send DHCP requests to the Ironic -Inspector DHCP server. In the long term, we can likely leverage the Routed -Networks work being done in Neutron to represent the subnets and allocation -pools that would be used for the DHCP range sets below. This spec only covers -the minimum needed for TripleO, so the work can be achieved simply by modifying -the Undercloud Puppet scripts. The following has been tested and shown -to result in successful introspection across two subnets, one local and one -across a router configured with DHCP relay:: - - Current dnsmasq.conf representing one network (172.20.0.0/24), which is - configured in the "inspection_iprange" in undercloud.conf: - port=0 - interface=br-ctlplane - bind-interfaces - dhcp-range=172.21.0.100,172.21.0.120,29 - dhcp-sequential-ip - dhcp-match=ipxe,175 - # Client is running iPXE; move to next stage of chainloading - dhcp-boot=tag:ipxe,http://172.20.0.1:8088/inspector.ipxe - dhcp-boot=undionly.kpxe,localhost.localdomain,172.20.0.1 - - Multiple-subnet dnsmasq.conf representing multiple subnets: - port=0 - interface=br-ctlplane - bind-interfaces - # Ranges and options - dhcp-range=172.21.0.100,172.21.0.120,29 - dhcp-range=set:leaf1,172.20.0.100,172.20.0.120,255.255.255.0,29 - dhcp-option=tag:leaf1,option:router,172.20.0.254 - dhcp-range=set:leaf2,172.19.0.100,172.19.0.120,255.255.255.0,29 - dhcp-option=tag:leaf2,option:router,172.19.0.254 - - dhcp-sequential-ip - dhcp-match=ipxe,175 - # Client is running iPXE; move to next stage of chainloading - dhcp-boot=tag:ipxe,http://172.20.0.1:8088/inspector.ipxe - dhcp-boot=undionly.kpxe,localhost.localdomain,172.20.0.1 - -In the above configuration, a router is supplied for all subnets, including -the subnet to which the Undercloud is attached. Note that the router is not -required for nodes on the same subnet as the inspector host, but if it gets -automatically generated it won't hurt anything. - -This file is created by the Puppet file located in [1]_. That is where the -changes will have to be made. - -As discussed above, using a remote DHCP/PXE server is a possibility only if we -have support in the top-of-rack switches, or if there is a system or VM -listening on the remote subnet to relay DHCP requests. This configuration of -dnsmasq will allow it to send DHCP offers to the DHCP relay, which forwards the -offer on to the requesting host. After the offer is accepted, the host can -communicate directly with the Undercloud, since it has already received the -proper gateway address for packets to be forwarded. It will send a DHCP request -directly based on the offer, and the DHCP ACK will be sent directly from the -Undercloud to the client. Downloading of the PXE images is then done via TFTP -and HTTP, not through the DHCP relay. - -An additional problem is that Ironic Inspector blacklists nodes that have -already been introspected using iptables rules blocking traffic from -particular MAC addresses. Since packets relayed via DHCP relay will come -from the MAC address of the router (not the original NIC that sent the packet), -we will need to blacklist MACs based on the contents of the relayed DHCP -packet. If possible, this blacklisting would be done using dnsmasq, which -would provide the ability to decode the DHCP Discover packets and act on the -contents. In order to do blacklisting directly with ``dnsmasq`` instead of -using iptables, we need to be able to influence the ``dnsmasq`` configuration -file. - -Proposed Change -=============== -The proposed changes are discussed below. - -Overview --------- - -The Puppet modules will need to be refactored to output a multi-subnet -``dnsmasq.conf`` from a list of subnets in undercloud.conf. - -The blacklisting functionality will need to be updated. Filtering by MAC -address won't work for DHCP requests that are relayed by a router. In that -case, the source MAC address will be the router interface that sent the -relayed request. There are methods to blacklist MAC addresses within dnsmasq, -such as this configuration:: - - dhcp-mac=blacklist, - dhcp-ignore=blacklist - -Or this configuration:: - - # Never offer DHCP service to a machine whose Ethernet - # address is 11:22:33:44:55:66 - dhcp-host=11:22:33:44:55:66,ignore - -The configuration could be placed into the main ``dnsmasq.conf`` file, or into -a file in ``/etc/dnsmasq.d/``. Either way, dnsmasq will have to be restarted -in order to re-read the configuration files. This is due to a security feature -in dnsmasq to prevent foreign configuration being loaded as root. Since DHCP -has a built-in retry mechanism, the brief time it takes to restart dnsmasq -should not impact introspection, as long as we don't restart dnsmasq too -many times in any 60-second period. - -It does not appear that the dnsmasq DBus interface can be used to set the -"dhcp-ignore" option for individual MAC addresses [4]_ [6]_. - -Alternatives ------------- - -One alternative approach is to use DHCP servers to assign IP addresses on all -hosts on all interfaces. This would simplify configuration within the Heat -templates and environment files. Unfortunately, this was the original approach -of TripleO, and it was deemed insufficient by end-users, who wanted stability -of IP addresses, and didn't want to have an external dependency on DHCP. - -Another approach which was considered was simply trunking all networks back -to the Undercloud, so that dnsmasq could respond to DHCP requests directly, -rather than requiring a DHCP relay. Unfortunately, this has already been -identified as being unacceptable by some large operators, who have network -architectures that make heavy use of L2 segregation via routers. This also -won't work well in situations where there is geographical separation between -the VLANs, such as in split-site deployments. - -Another approach is to use the DHCP server functionality in the network switch -infrastructure in order to PXE boot systems, then assign static IP addresses -after the PXE boot is done via DHCP. This approach would require configuration -at the switch level that influenced where systems PXE boot, potentially opening -up a security hole that is not under the control of OpenStack. This approach -also doesn't lend itself to automation that accounts for things like changes -to the PXE image that is being served to hosts. - -It is not necessary to use hardware routers to forward DHCP packets. There -are DHCP relay and DHCP proxy packages available for Linux. It is possible -to place a system or a VM on both the Provisioning network and the remote -network in order to forward DHCP requests. This might be one method for -implementing CI testing. Another method might trunk all remote provisioning -networks back to the Undercloud, with DHCP relay running on the Undercloud -forwarding to the local br-ctlplane. - -Security Impact ---------------- - -One of the major differences between spine-and-leaf and standard isolated -networking is that the various subnets are connected by routers, rather than -being completely isolated. This means that without proper ACLs on the routers, -private networks may be opened up to outside traffic. - -This should be addressed in the documentation, and it should be stressed that -ACLs should be in place to prevent unwanted network traffic. For instance, the -Internal API network is sensitive in that the database and message queue -services run on that network. It is supposed to be isolated from outside -connections. This can be achieved fairly easily if *supernets* are used, so that -if all Internal API subnets are a part of the ``172.19.0.0/16`` supernet, an -ACL rule will allow only traffic between Internal API IPs (this is a simplified -example that could be applied on all Internal API router VLAN interfaces -or as a global ACL):: - - allow traffic from 172.19.0.0/16 to 172.19.0.0/16 - deny traffic from * to 172.19.0.0/16 - -In the case of Ironic Inspector, the TFTP server is a potential point of -vulnerability. TFTP is inherently unauthenticated and does not include an -access control model. The network(s) where Ironic Inspector is operating -should be secured from remote access. - -Other End User Impact ---------------------- - -Deploying with spine-and-leaf will require additional parameters to -provide the routing information and multiple subnets required. This will have -to be documented. Furthermore, the validation scripts may need to be updated -to ensure that the configuration is validated, and that there is proper -connectivity between overcloud hosts. - -Performance Impact ------------------- - -Much of the traffic that is today made over layer 2 will be traversing layer -3 routing borders in this design. That adds some minimal latency and overhead, -although in practice the difference may not be noticeable. One important -consideration is that the routers must not be too overcommitted on their -uplinks, and the routers must be monitored to ensure that they are not acting -as a bottleneck, especially if complex access control lists are used. - -The DHCP process is not likely to be affected, however delivery of system -images via TFTP may suffer a performance degredation. Since TFTP does not -deal well with packet loss, deployers will have to take care not to -oversaturate the links between routing switches. - -Other Deployer Impact ---------------------- - -A spine-and-leaf deployment will be more difficult to troubleshoot than a -deployment that simply uses a set of VLANs. The deployer may need to have -more network expertise, or a dedicated network engineer may be needed to -troubleshoot in some cases. - -Developer Impact ----------------- - -Spine-and-leaf is not easily tested in virt environments. This should be -possible, but due to the complexity of setting up libvirt bridges and -routes, we may want to provide a simulation of spine-and-leaf for use in -virtual environments. This may involve building multiple libvirt bridges -and routing between them on the Undercloud, or it may involve using a -DHCP relay on the virt-host as well as routing on the virt-host to simulate -a full routing switch. A plan for development and testing will need to be -formed, since not every developer can be expected to have a routed -environment to work in. It may take some time to develop a routed virtual -environment, so initial work will be done on bare metal. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Dan Sneddon - -Final assignees to be determined. - -Approver(s) ------------ - -Primary approver: - Emilien Macchi - -Work Items ----------- - -1. Modify Ironic Inspector ``dnsmasq.conf`` generation to allow export of - multiple DHCP ranges. The patch enabling this has merged [7]_. -2. Modify the Ironic Inspector blacklisting mechanism so that it supports DHCP - relay, since the DHCP requests forwarded by the router will have the source - MAC address of the router, not the node being deployed. -3. Modify the documentation in ``tripleo-docs`` to cover the spine-and-leaf case. -4. Add an upstream CI job to test booting across subnets (although - hardware availability may make this a long-term goal). - -[*] Note that depending on the timeline for Neutron/Ironic integration, it might -make sense to implement support for multiple subnets via changes to the Puppet -modules which process ``undercloud.conf`` first, then follow up with a patch -to integrate Neutron networks into Ironic Inspector later on. - -Implementation Details ----------------------- - -Workflow for introspection and deployment: - -1. Network Administrator configures all provisioning VLANs with IP address of - Undercloud server on the ctlplane network as DHCP relay or "helper-address". -2. Operator configures IP address ranges and default gateways in - ``undercloud.conf``. Each subnet will require its own IP address range. -3. Operator imports baremetal instackenv.json. -4. When introspection or deployment is run, the DHCP server receives the DHCP - request from the baremetal host via DHCP relay. -5. If the node has not been introspected, reply with an IP address from the - introspection pool and the inspector PXE boot image. -6. Introspection is performed. LLDP collection [2]_ is performed to gather - information about attached network ports. -7. The node is blacklisted in ``dnsmasq.conf`` (or in ``/etc/dnsmasq.d``), - and dnsmasq is restarted. -8. On the next boot, if the MAC address is blacklisted and a port exists in - Neutron, then Neutron replies with the IP address from the Neutron port - and the overcloud-full deployment image. -9. The Heat templates are processed which generate os-net-config templates, and - os-net-config is run to assign static IPs from the correct subnets, as well - as routes to other subnets via the router gateway addresses. - -When using spine-and-leaf, the DHCP server will need to provide an introspection -IP address on the appropriate subnet, depending on the information contained in -the DHCP relay packet that is forwarded by the segment router. dnsmasq will -automatically match the gateway address (GIADDR) of the router that forwarded -the request to the subnet where the DHCP request was received, and will respond -with an IP and gateway appropriate for that subnet. - -The above workflow for the DHCP server should allow for provisioning IPs on -multiple subnets. - -Dependencies -============ - -There will be a dependency on routing switches that perform DHCP relay service -for production spine-and-leaf deployments. Since we will not have routing -switches in our virtual testing environment, a DHCP proxy may be set up as -described in the testing section below. - -Testing -======= - -In order to properly test this framework, we will need to establish at least -one CI test that deploys spine-and-leaf. As discussed in this spec, it isn't -necessary to have a full routed bare metal environment in order to test this -functionality, although there is some work required to get it working in virtual -environments such as OVB. - -For virtual testing, it is sufficient to trunk all VLANs back to the -Undercloud, then run DHCP proxy on the Undercloud to receive all the -requests and forward them to br-ctlplane, where dnsmasq listens. This -will provide a substitute for routers running DHCP relay. - -Documentation Impact -==================== - -The TripleO docs will need to be updated to include detailed instructions -for deploying in a spine-and-leaf environment, including the environment -setup. Covering specific vendor implementations of switch configurations -is outside this scope, but a specific overview of required configuration -options should be included, such as enabling DHCP relay (or "helper-address" -as it is also known) and setting the Undercloud as a server to receive -DHCP requests. - -The updates to TripleO docs will also have to include a detailed discussion -of choices to be made about IP addressing before a deployment. If supernets -are to be used for network isolation, then a good plan for IP addressing will -be required to ensure scalability in the future. - -References -========== - -.. [0] `Spec: Routed Networks for Neutron `_ -.. [1] `Source Code: inspector_dnsmasq_http.erb `_ -.. [2] `Review: Add LLDP processing hook and new CLI commands `_ -.. [3] `Bug: [RFE] Implement neutron routed networks support in Ironic `_ -.. [4] `Wikibooks: Python Programming: DBus `_ -.. [5] `Review: Enhanced Network/Subnet DHCP Options `_ -.. [6] `Documentation: DBus Interface for dnsmasq `_ -.. [7] `Review: Multiple DHCP Subnets for Ironic Inspector `_ diff --git a/specs/policy-template.rst b/specs/policy-template.rst deleted file mode 100644 index 442d35d5..00000000 --- a/specs/policy-template.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. - This template should be in ReSTructured text. For help with syntax, - see http://sphinx-doc.org/rest.html - - To test out your formatting, build the docs using tox, or see: - http://rst.ninjs.org - - The filename in the git repository should match the launchpad URL, - for example a URL of - https://blueprints.launchpad.net/oslo?searchtext=awesome-thing should be - named awesome-thing.rst. - - For specs targeted at a single project, please prefix the first line - of your commit message with the name of the project. For example, - if you're submitting a new feature for oslo.config, your git commit - message should start something like: "config: My new feature". - - Wrap text at 79 columns. - - Do not delete any of the sections in this template. If you have - nothing to say for a whole section, just write: None - - If you would like to provide a diagram with your spec, ascii diagrams are - required. http://asciiflow.com/ is a very nice tool to assist with making - ascii diagrams. The reason for this is that the tool used to review specs is - based purely on plain text. Plain text will allow review to proceed without - having to look at additional files which can not be viewed in gerrit. It - will also allow inline feedback on the diagram itself. - -========================= - The title of the policy -========================= - -Introduction paragraph -- why are we doing anything? - -Problem Description -=================== - -A detailed description of the problem. - -Policy -====== - -Here is where you cover the change you propose to make in detail. How do you -propose to solve this problem? - -If the policy seeks to modify a process or workflow followed by the -team, explain how and why. - -If this is one part of a larger effort make it clear where this piece ends. In -other words, what's the scope of this policy? - -Alternatives & History -====================== - -What other ways could we do this thing? Why aren't we using those? This doesn't -have to be a full literature review, but it should demonstrate that thought has -been put into why the proposed solution is an appropriate one. - -If the policy changes over time, summarize the changes here. The exact -details are always available by looking at the git history, but -summarizing them will make it easier for anyone to follow the desired -policy and understand when and why it might have changed. - -Implementation -============== - -Author(s) ---------- - -Who is leading the writing of the policy? If more than one person is -working on it, please designate the primary author and contact. - -Primary author: - - -Other contributors: - - -Milestones ----------- - -When will the policy go into effect? - -If there is a built-in deprecation period for the policy, or criteria -that would trigger it no longer being in effect, describe them. - -Work Items ----------- - -List any concrete steps we need to take to implement the policy. - -References -========== - -Please add any useful references here. You are not required to have -any references. Moreover, this policy should still make sense when -your references are unavailable. Examples of what you could include -are: - -* Links to mailing list or IRC discussions - -* Links to notes from a summit session - -* Links to relevant research, if appropriate - -* Related policies as appropriate - -* Anything else you feel it is worthwhile to refer to - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - - - Introduced - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/adding-ci-jobs.rst b/specs/policy/adding-ci-jobs.rst deleted file mode 100644 index a6b23a10..00000000 --- a/specs/policy/adding-ci-jobs.rst +++ /dev/null @@ -1,146 +0,0 @@ -==================== - Adding New CI Jobs -==================== - -New CI jobs need to be added following a specific process in order to ensure -they don't block patches unnecessarily and that they aren't ignored by -developers. - -Problem Description -=================== - -We need to have a process for adding CI jobs that is not going to result -in a lot of spurious failures due to the new jobs. Bogus CI results force -additional rechecks and reduce developer/reviewer confidence in the results. - -In addition, maintaining CI jobs is a non-trivial task, and each one we add -increases the load on the team. Hopefully having a process that requires the -involvement of the new job's proposer makes it clear that the person/team -adding the job has a responsibility to help maintain it. CI is everyone's -problem. - -Policy -====== - -The following steps should be completed in the order listed when adding a new -job: - -#. Create an experimental job or hijack an existing job for a single Gerrit - change. See the references section for details on how to add a new job. - This job should be passing before moving on to the next step. - -#. Verify that the new job is providing a reasonable level of logging. Not - too much, not too little. Important logs, such as the OpenStack service - logs and basic system logs, are necessary to determine why jobs fail. - However, OpenStack Infra has to store the logs from an enormous number of - jobs, so it is also important to keep our log artifact sizes under control. - When in doubt, try to capture about the same amount of logs as the existing - jobs. - -#. Promote the job to check non-voting. While the job should have been - passing prior to this, it most likely has not been run a significant number - of times, so the overall stability is still unknown. - - "Stable" in this case would be defined as not having significantly more - spurious failures than the ovb-ha job. Due to the additional complexity of - an HA deployment, that job tends to fail for reasons unrelated to the patch - being tested more often than the other jobs. We do not want to add any - jobs that are less stable. Note that failures due to legitimate problems - being caught by the new job should not count against its stability. - - .. important:: Before adding OVB jobs to the check queue, even as - non-voting, please check with the CI admins to ensure there is enough - OVB capacity to run a large number of new jobs. As of this writing, - the OVB cloud capacity is significantly more constrained than regular - OpenStack Infra. - - A job should remain in this state until it has been proven stable over a - period of time. A good rule of thumb would be that after a week of - stability the job can and should move to the next step. - - .. important:: Jobs should not remain non-voting indefinitely. This causes - reviewers to ignore the results anyway, so the jobs become a waste of - resources. Once a job is believed to be stable, it should be made - voting as soon as possible. - -#. To assist with confirming the stability of a job, it should be added to the - `CI Status `_ page at this point. This - can actually be done at any time after the job is moved to the check queue, - but must be done before the job becomes voting. - - Additionally, contact Sagi Shnaidman (sshnaidm on IRC) to get the job - added to the `Extended CI Status `_ - page. - -#. Send an e-mail to openstack-dev, tagged with [tripleo], that explains the - purpose of the new job and notifies people that it is about to be made - voting. - -#. Make the job voting. At this point there should be sufficient confidence - in the job that reviewers can trust the results and should not merge - anything which does not pass it. - - In addition, be aware that voting multinode jobs are also gating. If the - job fails the patch cannot merge. This means a broken job can block all - TripleO changes from merging. - -#. Keep an eye on the `CI Status `_ page to - ensure the job keeps running smoothly. If it starts to fail an unusual - amount, please investigate. - -Alternatives & History -====================== - -Historically, a number of jobs have been added to the check queue when they -were completely broken. This is bad and reduces developer and reviewer -confidence in the CI results. It can also block TripleO changes from merging -if the broken job is gating. - -We also have a bad habit of leaving jobs in the non-voting state, which makes -them fairly worthless since reviewers will not respect the results. Per -this policy, we should clean up all of the non-voting jobs by either moving -them back to experimental, or stabilizing them and making them voting. - -Implementation -============== - -Author(s) ---------- - -Primary author: - bnemec - -Milestones ----------- - -This policy would go into effect immediately. - -Work Items ----------- - -This policy is mostly targeted at new jobs, but we do have a number of -non-voting jobs that should be brought into compliance with it. - -References -========== - -`OpenStack Infra Manual `_ - -`Adding a New Job `_ - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Pike - - Introduced - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/bug-tagging.rst b/specs/policy/bug-tagging.rst deleted file mode 100644 index 6fce6f31..00000000 --- a/specs/policy/bug-tagging.rst +++ /dev/null @@ -1,150 +0,0 @@ -======== -Bug tags -======== - -The main TripleO bug tracker is used to keep track of bugs for multiple -projects that are all parts of TripleO. In order to reduce confusion, -we are using a list of approved tags to categorize them. - -Problem Description -=================== - -Given the heavily interconnected nature of the various TripleO -projects, there is a desire to track all the related bugs in a single -bug tracker. However when it is needed, it can be difficult to narrow -down the bugs related to a specific aspect of the project. Launchpad -bug tags can help us here. - -Policy -====== - -The Launchpad official tags list for TripleO contains the following -tags. Keeping them official in Launchpad means the tags will -auto-complete when users start writing them. A bug report can have any -combination of these tags, or none. - -Proposing new tags should be done via policy update (proposing a change -to this file). Once such a change is merged, a member of the driver -team will create/delete the tag in Launchpad. - -Tags ----- - -+-------------------------------+----------------------------------------------------------------------------+ -| Tag | Description | -+===============================+============================================================================+ -| alert | For critical bugs requiring immediate attention. Triggers IRC notification | -+-------------------------------+----------------------------------------------------------------------------+ -| ci | A bug affecting the Continuous Integration system | -+-------------------------------+----------------------------------------------------------------------------+ -| ci-reproducer | A bug affecting local recreation of Continuous Integration environments | -+-------------------------------+----------------------------------------------------------------------------+ -| config-agent | A bug affecting os-collect-config, os-refresh-config, os-apply-config | -+-------------------------------+----------------------------------------------------------------------------+ -| containers | A bug affecting container based deployments | -+-------------------------------+----------------------------------------------------------------------------+ -| depcheck | A bug affecting 3rd party dependencies, for example ceph-ansible, podman | -+-------------------------------+----------------------------------------------------------------------------+ -| deployment-time | A bug affecting deployment time | -+-------------------------------+----------------------------------------------------------------------------+ -| documentation | A bug that is specific to documentation issues | -+-------------------------------+----------------------------------------------------------------------------+ -| edge | A bug that correlates to EDGE computing cases by network/scale etc. areas | -+-------------------------------+----------------------------------------------------------------------------+ -| i18n | A bug related to internationalization issues | -+-------------------------------+----------------------------------------------------------------------------+ -| low-hanging-fruit | A good starter bug for newcomers | -+-------------------------------+----------------------------------------------------------------------------+ -| networking | A bug that is specific to networking issues | -+-------------------------------+----------------------------------------------------------------------------+ -| promotion-blocker | Bug that is blocking promotion job(s) | -+-------------------------------+----------------------------------------------------------------------------+ -| puppet | A bug affecting the TripleO Puppet templates | -+-------------------------------+----------------------------------------------------------------------------+ -| quickstart | A bug affecting tripleo-quickstart or tripleo-quickstart-extras | -+-------------------------------+----------------------------------------------------------------------------+ -| selinux | A bug related to SELinux | -+-------------------------------+----------------------------------------------------------------------------+ -| tech-debt | A bug related to TripleO tech debt | -+-------------------------------+----------------------------------------------------------------------------+ -| tempest | A bug related to tempest running on TripleO | -+-------------------------------+----------------------------------------------------------------------------+ -| tripleo-common | A bug affecting tripleo-common | -+-------------------------------+----------------------------------------------------------------------------+ -| tripleo-heat-templates | A bug affecting the TripleO Heat Templates | -+-------------------------------+----------------------------------------------------------------------------+ -| tripleoclient | A bug affecting python-tripleoclient | -+-------------------------------+----------------------------------------------------------------------------+ -| ui | A bug affecting the TripleO UI | -+-------------------------------+----------------------------------------------------------------------------+ -| upgrade | A bug affecting upgrades | -+-------------------------------+----------------------------------------------------------------------------+ -| ux | A bug affecting user experience | -+-------------------------------+----------------------------------------------------------------------------+ -| validations | A bug affecting the Validations | -+-------------------------------+----------------------------------------------------------------------------+ -| workflows | A bug affecting the Mistral workflows | -+-------------------------------+----------------------------------------------------------------------------+ -| xxx-backport-potential | Cherry-pick request for the stable team | -+-------------------------------+----------------------------------------------------------------------------+ - -Alternatives & History -====================== - -The current ad-hoc system is not working well, as people use -inconsistent subject tags and other markers. Likewise, with the list -not being official Launchpad tags do not autocomplete and quickly -become inconsistent, hence not as useful. - -We could use the wiki to keep track of the tags, but the future of the -wiki is in doubt. By making tags an official policy, changes to the -list can be reviewed. - -Implementation -============== - -Author(s) ---------- - -Primary author: - jpichon - -Milestones ----------- - -Newton-3 - -Work Items ----------- - -Once the policy has merged, someone with the appropriate Launchpad -permissions should create the tags and an email should be sent to -openstack-dev referring to this policy. - -References -========== - -Launchpad page to manage the tag list: -https://bugs.launchpad.net/tripleo/+manage-official-tags - -Thread that led to the creation of this policy: -http://lists.openstack.org/pipermail/openstack-dev/2016-July/099444.html - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Newton - - Introduced - * - Queens - - tech-debt tag added - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/ci-team-structure.rst b/specs/policy/ci-team-structure.rst deleted file mode 100644 index b50bf14d..00000000 --- a/specs/policy/ci-team-structure.rst +++ /dev/null @@ -1,246 +0,0 @@ -CI Team Structure -================= - -Problem Description -------------------- -The soft analysis over the past one to two years is that landing major new -features and function in CI is difficult while being interrupted by a constant -stream of issues. Each individual is siloed in their own work, feature or -section of the production chain and there is very little time for thoughtful -peer review and collaborative development. - -Policy ------- - -Goals -^^^^^ - - * Increase developer focus, decrease distractions, interruptions, and time - slicing. - * Encourage collaborative team development. - * Better and faster code reviews - -Team Structure -^^^^^^^^^^^^^^ - * The Ruck - * The Rover - * The Sprint Team - -The Ruck -^^^^^^^^ -One person per week will be on the front lines reporting failures found in CI. -The Ruck & Rover switch roles in the second week of the sprint. - - * Primary focus is to watch CI, report bugs, improve debug documentation. - * Does not participate in the sprint - * Attends the meetings where the team needs to be represented - * Responds to pings on #oooq / #tripleo regarding CI - * Reviews and improves documentation - * Attends meetings for the group where possible - * For identification, use the irc nick $user|ruck - -The Rover -^^^^^^^^^ -The primary backup for the Ruck. The Ruck should be catching all the issues -in CI and passing the issues to the Rover for more in depth analysis or -resolution of the bug. - - * Back up for the Ruck - * Workload is driven from the tripleo-quickstart bug queue, the Rover is - not monitoring CI - * A secondary input for work is identified technical debt defined in the - Trello board. - * Attends the sprint meetings, but is not responsible for any sprint work - * Helps to triage incoming gerrit reviews - * Responds to pings on irc #oooq / #tripleo - * If the Ruck is overwhelmed with any of their responsibilities the - Rover is the primary backup. - * For identification, use the irc nick $user|rover - -The Sprint Team -^^^^^^^^^^^^^^^ -The team is defined at the beginning of the sprint based on availability. -Members on the team should be as focused on the sprint epic as possible. -A member of team should spend 80% of their time on sprint goals and 20% -on any other duties like code review or incoming high priority bugs that -the Rover can not manage alone. - - * hand off interruptions to the Ruck and Rover as much as possible - * focus as a team on the sprint epic - * collaborate with other members of the sprint team - * seek out peer review regarding sprint work - * keep the Trello board updated daily - * One can point to Trello cards in stand up meetings for status - -The Squads -^^^^^^^^^^ -The squads operate as a subunit of the sprint team. Each squad will operate -with the same process and procedures and are managed by the team catalyst. - - * Current Squads - * CI - * Responsible for the TripleO CI system ( non-infra ) and build - verification. - * Tempest - * Responsible for tempest development. - -Team Leaders ------------- - -The team catalyst (TC) -^^^^^^^^^^^^^^^^^^^^^^ -The member of the team responsible organizing the group. The team will elect or -appoint a team catalyst per release. - - * organize and plan sprint meetings - * collect status and send status emails - -The user advocate (UA) -^^^^^^^^^^^^^^^^^^^^^^ -The member of the team responsible for help to prioritize work. The team will -elect or appoint a user advocate per release. - - * organize and prioritize the Trello board for the sprint planning - * monitor the board during the sprint. - * ensure the right work is being done. - -The Squads -^^^^^^^^^^ -There are two squads on the CI team. - - * tripleo ci - * tempest development - -Each squad has a UA and they share a TC. Both contribute to Ruck and Rover rotations. - - -Current Leaders for Rocky -^^^^^^^^^^^^^^^^^^^^^^^^^^ - * team catalyst (ci, tempest) - Matt Young - * user advocate (ci) - Gabriele Cerami - * user advocate (tempest) - Chandan Kumar - -Sprint Structure -^^^^^^^^^^^^^^^^ -The goal of the sprint is to define a narrow and focused feature called an epic -to work on in a collaborative way. Work not completed in the sprint will be -added to the technical debt column of Trello. - -**Note:** Each sprint needs a clear definition of done that is documented in -the epic used for the sprint. - -Sprint Start ( Day 1 ) - 2.5 hours -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - * Sprints are three weeks in length - * A planning meeting is attended by the entire team including the Ruck and - Rover - * Review PTO - * Review any meetings that need to be covered by the Ruck/Rover - * The UA will present options for the sprint epic - * Discuss the epic, lightly breaking each one down - * Vote on an epic - * The vote can be done using a doodle form - * Break down the sprint epic into cards - * Review each card - * Each card must have a clear definition of done - * As a group include as much detail in the card as to provide enough - information for an engineer with little to no background with the task. - - -Sprint End ( Day 15 ) - 2.5 hours -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - * Retrospective - * team members, ruck and rover only - * Document any technical debt left over from the sprint - * Ruck / Rover hand off - * Assign Ruck and Rover positions - * Sprint demo - when available - * Office hours on irc - -Scrum meetings - 30 Min -^^^^^^^^^^^^^^^^^^^^^^^ - * Planning meeting, video conference - * Sprint End, video and irc #oooq on freenode - * 2 live video conference meetings per week - * sprint stand up - * Other days, post status to the team's Trello board and/or cards - - -TripleoO CI Community meeting -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - * A community meeting should be held once a week. - * The meeting should ideally be conveniently scheduled immediately after - the TripleO community meeting on #tripleo (OFTC) - * The CI meeting should be announced as part of the TripleO community meeting - to encourage participation. - -Alternatives & History ----------------------- - -In the past the CI team has worked as individuals or by pairing up for distinct -parts of the CI system and for certain features. Neither has been -overwhelmingly successful for delivering features on a regular cadence. - -Implementation --------------- - -Primary author: Wes Hayutin weshayutin at gmail - -Other contributors: - * Ronelle Landy rlandy at redhat - * Arx Cruz acruz at redhat - * Sagi Shnaidman at redhat - - -Milestones ----------- - -This document is likely to evolve from the feedback discussed in sprint -retrospectives. An in depth retrospective should be done at the end of each -upstream cycle. - - -References ----------- - -Trello -^^^^^^ -A Trello board will be used to organize work. The team is expected to keep the -board and their cards updated on a daily basis. - - * https://trello.com/b/U1ITy0cu/tripleo-ci-squad - -Dashboards -^^^^^^^^^^ -A number of dashboards are used to monitor the CI - - * http://cistatus.tripleo.org/ - * https://dashboards.rdoproject.org/rdo-dev - * http://zuul-status.tripleo.org/ - -Team Notes -^^^^^^^^^^ - - * https://etherpad.openstack.org/p/tripleo-ci-squad-meeting - -Bug Queue -^^^^^^^^^ - * http://tinyurl.com/yag6y9ne - - -Revision History ----------------- - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Rocky - - April 16 2018 - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/expedited-approvals.rst b/specs/policy/expedited-approvals.rst deleted file mode 100644 index 00faeadc..00000000 --- a/specs/policy/expedited-approvals.rst +++ /dev/null @@ -1,122 +0,0 @@ -===================== - Expedited Approvals -===================== - -In general, TripleO follows the standard "2 +2" review standard, but there are -situations where we want to make an exception. This policy is intended to -document those exceptions. - -Problem Description -=================== - -Core reviewer time is precious, and there is never enough of it. In some -cases, requiring 2 +2's on a patch is a waste of that core time, so we need -to be reasonable about when to make exceptions. While core reviewers are -always free to use their judgment about when to merge or not merge a patch, -it can be helpful to list some specific situations where it is acceptable and -even expected to approve a patch with a single +2. - -Part of this information is already in the wiki, but the future of the wiki -is in doubt and it's better to put policies in a place that they can be -reviewed anyway. - -Policy -====== - -Single +2 Approvals -------------------- - -A core can and should approve patches without a second +2 under the following -circumstances: - -* The change has multiple +2's on previous patch sets, indicating an agreement - from the other cores that the overall design is good, and any alterations to - the patch since those +2's must be minor implementation details only - - trivial rebases, minor syntax changes, or comment/documentation changes. -* Backports proposed by another core reviewer. Backports should already have - been reviewed for design when they merged to master, so if two cores agree - that the backport is good (one by proposing, the other by reviewing), they - can be merged with a single +2 review. -* Requirements updates proposed by the bot. -* Translation updates proposed by the bot. (See also `reviewing - translation imports - `_.) - -Co-author +2 ------------- - -Co-authors on a patch are allowed to +2 that patch, but at least one +2 from a -core not listed as a co-author is required to merge the patch. For example, if -core A pushes a patch with cores B and C as a co-authors, core B and core C are -both allowed to +2 that patch, but another core is required to +2 before the -patch can be merged. - -Self-Approval -------------- - -It is acceptable for a core to self-approve a patch they submitted if it has the -requisite 2 +2's and a CI pass. However, this should not be done if there is any -dispute about the patch, such as on a change with 2 +2's and an unresolved -1. - -Note on CI ----------- - -This policy does not affect CI requirements. Patches must still pass CI before -merging. - -Alternatives & History -====================== - -This policy has been in effect for a while now, but not every TripleO core is -aware of it, so it is simply being written down in an official location for -reference. - -Implementation -============== - -Author(s) ---------- - -Primary author: - bnemec - -Milestones ----------- - -The policy is already in effect. - -Work Items ----------- - -Ensure all cores are aware of the policy. Once the policy has merged, an email -should be sent to openstack-dev referring to it. - -References -========== - -Existing wiki on review guidelines: -https://wiki.openstack.org/wiki/TripleO/ReviewGuidelines - -Previous spec that implemented some of this policy: -https://specs.openstack.org/openstack/tripleo-specs/specs/kilo/tripleo-review-standards.html - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Newton - - Introduced - * - Newton - - Added co-author +2 policy - * - Ocata - - Added note on translation imports - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - https://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/first-principles.rst b/specs/policy/first-principles.rst deleted file mode 100644 index 332c66ea..00000000 --- a/specs/policy/first-principles.rst +++ /dev/null @@ -1,257 +0,0 @@ -.. - -======================== -TripleO First Principles -======================== - -The TripleO first principles are a set of principles that guide decision making -around future direction with TripleO. The principles are used to evaluate -choices around changes in direction and architecture. Every impactful decision -does not necessarily have to follow all the principles, but we use them to make -informed decisions about trade offs when necessary. - -Problem Description -=================== - -When evaluating technical direction within TripleO, a better and more -consistent method is needed to weigh pros and cons of choices. Defining the -principles is a step towards addressing that need. - - -Policy -====== - - -Definitions ------------ - -Framework - The functional implementation which exposes a set of standard enforcing - interfaces that can be consumed by a service to describe that service's - deployment and management. The framework includes all functional pieces that - implement such interfaces, such as CLI's, API's, or libraries. - - Example: tripleoclient/tripleo-common/tripleo-ansible/tripleo-heat-templates - -Service - The unit of deployment. A service will implement the necessary framework - interfaces in order to describe it's deployment. - - The framework does not enforce a particular service boundary, other than by - prescribing best practices. For example, a given service implementation could - deploy both a REST API and a database, when in reality the API and database - should more likely be deployed as their own services and expressed as - dependencies. - - Example: Keystone, MariaDB, RabbitMQ - -Third party integrations - Service implementations that are developed and maintained outside of the - TripleO project. These are often implemented by vendors aiming to add support - for their products within TripleO. - - Example: Cinder drivers, Neutron plugins - -First Principles ----------------- - -#. [UndercloudMigrate] No Undercloud Left Behind - - #. TripleO itself as the deployment tool can be upgraded. We do - not immediately propose what the upgrade will look like or the technology - stack, but we will offer an upgrade path or a migration path. - -#. [OvercloudMigrate] No Overcloud Left Behind - - #. An overcloud deployed with TripleO can be upgraded to the next major version - with either an in place upgrade or migration. - -#. [DefinedInterfaces] TripleO will have a defined interface specification. - - #. We will document clear boundaries between internal and external - (third party integrations) interfaces. - #. We will document the supported interfaces of the framework in the same - way that a code library or API would be documented. - #. Individual services of the framework can be deployed and tested in - isolation from other services. Service dependencies are expressed per - service, but do not preclude using the framework to deploy a service - isolated from its dependencies. Whether that is successful or not - depends on how the service responds to missing dependencies, and that is - a behavior of the service and not the framework. - #. The interface will offer update and upgrade tasks as first class citizens - #. The interface will offer validation tasks as first class citizens - -#. [OSProvisioningSeparation] Separation between operating system provisioning - and software configuration. - - #. Baremetal configuration, network configuration and base operating system - provisioning is decoupled from the software deployment. - #. The software deployment will have a defined set of minimal requirements - which are expected to be in-place before it begins the software deployment. - - #. Specific linux distributions - #. Specific linux distribution versions - #. Password-less access via ssh - #. Password-less sudo access - #. Pre-configured network bridges - -#. [PlatformAgnostic] Platform agnostic deployment tooling. - - #. TripleO is sufficiently isolated from the platform in a way that allows - for use in a variety of environments (baremetal/virtual/containerized/OS - version). - #. The developer experience is such that it can easily be run in - isolation on developer workstations - -#. [DeploymentToolingScope] The deployment tool has a defined scope - - #. Data collection tool. - - #. Responsible for collecting host and state information and posting to a - centralized repository. - #. Handles writes to central repository (e.g. read information from - repository, do aggregation, post to central repository) - - #. A configuration tool to configure software and services as part of the - deployment - - #. Manages Software Configuration - - #. Files - #. Directories - #. Service (containerized or non-containerized) state - #. Software packages - - #. Executes commands related to “configuration” of a service - Example: Configure OpenStack AZ's, Neutron Networks. - #. Isolated executions that are invoked independently by the orchestration tool - #. Single execution state management - - #. Input is configuration data/tasks/etc - #. A single execution produces the desired state or reports failure. - #. Idempotent - - #. Read-only communication with centralized data repository for configuration data - - #. The deployment process depends on an orchestration tool to handle various - task executions. - - #. Task graph manager - #. Task transport and execution tracker - #. Aware of hosts and work to be executed on the hosts - #. Ephemeral deployment tooling - #. Efficient execution - #. Scale and reliability/durability are first class citizens - -#. [CI/CDTooling] TripleO functionality should be considered within the context - of being directly invoked as part of a CI/CD pipeline. - -#. [DebuggableFramework] Diagnosis of deployment/configuration failures within - the framework should be quick and simple. Interfaces should be provided to - enable debuggability of service failures. - -#. [BaseOSBootstrap] TripleO can start from a base OS and go to full cloud - - #. It should be able to start at any point after base OS, but should be able - to handle the initial OS bootstrap - -#. [PerServiceManagement] TripleO can manage individual services in isolation, - and express and rely on dependencies and ordering between services. - -#. [Predictable/Reproducible/Idempotent] The deployment is predictable - - #. The operator can determine what changes will occur before actually applying - those changes. - #. The deployment is reproducible in that the operator can re-run the - deployment with the same set of inputs and achieve the same results across - different environments. - #. The deployment is idempotent in that the operator can re-run the - deployment with the same set of inputs and the deployment will not change other - than when it was first deployed. - #. In the case where a service needs to restart a process, the framework - will have an interface that the service can use to notify of the - needed restart. In this way, the restarts are predictable. - #. The interface for service restarts will allow for a service to describe - how it should be restarted in terms of dependencies on other services, - simultaneous restarts, or sequential restarts. - -Non-principles --------------- - -#. [ContainerImageManagement] The framework does not manage container images. - Other than using a given container image to start a container, the framework - does not encompass common container image management to include: - - #. Building container images - #. Patching container images - #. Serving or mirroring container images - #. Caching container images - - Specific tools for container image and runtime management and that need to - leverage the framework during deployment are expected to be implemented as - services. - -#. [SupportingTooling] Tools and software executed by the framework to deploy - services or tools required prior to service deployment by the framework are - not considered part of the framework itself. - - Examples: podman, TCIB, image-serve, nova-less/metalsmith - -Alternatives & History -====================== - -Many, if not all, the principles are already well agreed upon and understood as -core to TripleO. Writing them down as policy makes them more discoverable and -official. - -Historically, there have been instances when decisions have been guided by -desired technical implementation or outcomes. Recording the principles does not -necessarily mean those decisions would stop, but it does allow for a more -reasonable way to think about the trade offs. - -We do not need to adopt any principles, or record them. However, there is no -harm in doing so. - -Implementation -============== - -Author(s) ---------- - -Primary author: - James Slagle - -Other contributors: - - -Milestones ----------- - -None. - -Work Items ----------- - -None. - -References -========== - -None. - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - v0.0.1 - - Introduced - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/patch-abandonment.rst b/specs/policy/patch-abandonment.rst deleted file mode 100644 index 89d94150..00000000 --- a/specs/policy/patch-abandonment.rst +++ /dev/null @@ -1,109 +0,0 @@ -================= -Patch Abandonment -================= - -Goal -==== - -Provide basic policy that core reviewers can apply to outstanding reviews. As -always, it is up to the core reviewers discretion on whether a patch should or -should not be abandoned. This policy is just a baseline with some basic rules. - -Problem Description -=================== - -TripleO consists of many different projects in which many patches become stale -or simply forgotten. This can lead to problems when trying to review the -current patches for a given project. - -When to Abandon -=============== - -If a proposed patch has been marked -1 WIP by the author but has sat idle for -more than 180 days, a core reviewer should abandon the change with a reference -to this policy. - -If a proposed patch is submitted and given a -2 and the patch has sat idle for -90 days with no effort to address the -2, a core reviewer should abandon the -change with a reference to this policy. - -If a proposed patch becomes stale by ending up with a -1 from CI for 90 days -and no activity to resolve the issues, a core reviewer should abandon the -change with a reference to this policy. - -If a proposed patch with no activity for 90 days is in merge conflict, even -with a +1 from CI, a core reviewer should abandon the change with a reference -to this policy. - -When NOT to Abandon -=================== - -If a proposed patch has no feedback but is +1 from CI, a core reviewer should -not abandon such changes. - -If a proposed patch a given a -1 by a reviewer but the patch is +1 from CI and -not in merge conflict and the author becomes unresponsive for a few weeks, -reviewers can leave a reminder comment on the review to see if there is -still interest in the patch. If the issues are trivial then anyone should feel -welcome to checkout the change and resubmit it using the same change ID to -preserve original authorship. Core reviewers should not abandon such changes. - -Restoration -=========== - -Feel free to restore your own patches. If a change has been abandoned -by a core reviewer, anyone can request the restoration of the patch by -asking a core reviewer on IRC in #tripleo on OFTC or by sending a -request to the openstack-dev mailing list. Should the patch again -become stale it may be abandoned again. - -Alternative & History -===================== - -This topic was previously brought up on the openstack mailing list [1]_ along -with proposed code to use for automated abandonment [2]_. Similar policies are -used by the Puppet OpenStack group [3]_. - -Implementation -============== - -Author(s) ---------- - -Primary author: - aschultz - -Other contributors: - bnemec - -Milestones ----------- - -Pike-2 - -Work Items ----------- - -References -========== - -.. [1] http://lists.openstack.org/pipermail/openstack-dev/2015-October/076666.html -.. [2] https://github.com/cybertron/tripleo-auto-abandon -.. [3] https://docs.openstack.org/developer/puppet-openstack-guide/reviews.html#abandonment - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Pike - - Introduced - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/spec-review.rst b/specs/policy/spec-review.rst deleted file mode 100644 index fc9c40df..00000000 --- a/specs/policy/spec-review.rst +++ /dev/null @@ -1,163 +0,0 @@ -========================= - Spec Review Process -========================= - -Document the existing process to help reviewers, especially newcomers, -understand how to review specs. This is migrating the existing wiki -documentation into a policy. - -Problem Description -=================== - -Care should be taken when approving specs. An approved spec, and an -associated blueprint, indicate that the proposed change has some -priority for the TripleO project. We don't want a bunch of approved -specs sitting out there that no community members are owning or working -on. We also want to make sure that our specs and blueprints are easy to -understand and have sufficient enough detail to effectively communicate -the intent of the change. The more effective the communication, the -more likely we are to elicit meaningful feedback from the wider -community. - -Policy -====== - -To this end, we should be cognizant of the following checklist when -reviewing and approving specs. - -* Broad feedback from interested parties. - - * We should do our best to elicit feedback from operators, - non-TripleO developers, end users, and the wider OpenStack - community in general. - * Mail the appropriate lists, such as opentack-operators and - openstack-dev to ask for feedback. Respond to feedback on the list, - but also encourage direct comments on the spec itself, as those - will be easier for other spec reviewers to find. - -* Overall consensus - - * Check for a general consensus in the spec. - * Do reviewers agree this change is meaningful for TripleO? - * If they don't have a vested interest in the change, are they at - least not objecting to the change? - -* Review older patchsets to make sure everything has been addressed - - * Have any reviewers raised objections in previous patchsets that - were not addressed? - * Have any potential pitfalls been pointed out that have not been - addressed? - -* Impact/Security - - * Ensure that the various Impact (end user, deployer, etc) and - Security sections in the spec have some content. - * These aren't sections to just gloss off over after understanding - the implementation and proposed change. They are actually the most - important sections. - * It would be nice if that content had elicited some feedback. If it - didn't, that's probably a good sign that the author and/or - reviewers have not yet thought about these sections carefully. - -* Ease of understandability - - * The spec should be easy to understand for those reviewers who are - familiar with the project. While the implementation may contain - technical details that not everyone will grasp, the overall - proposed change should be able to be understood by folks generally - familiar with TripleO. Someone who is generally familiar with - TripleO is likely someone who has run through the undercloud - install, perhaps contributed some code, or participated in reviews. - * To aid in comprehension, grammar nits should generally be corrected - when they have been pointed out. Be aware though that even nits can - cause disagreements, as folks pointing out nits may be wrong - themselves. Do not bikeshed over solving disagreements on nits. - -* Implementation - - * Does the implementation make sense? - * Are there alternative implementations, perhaps easier ones, and if - so, have those been listed in the Alternatives section? - * Are reasons for discounting the Alternatives listed in the spec? - -* Ownership - - * Is the spec author the primary assignee? - * If not, has the primary assignee reviewed the spec, or at least - commented that they agree that they are the primary assignee? - -* Reviewer workload - - * Specs turn into patches to codebases. - * A +2 on a spec means that the core reviewer intends to review the - patches associated with that spec in addition to their other core - commitments for reviewer workload. - * A +1 on a spec from a core reviewer indicates that the core - reviewer is not necessarily committing to review that spec's - patches. - * It's fine to +2 even if the spec also relates to other repositories - and areas of expertise, in addition to the reviewer's own. We - probably would not want to merge any spec that spanned multiple - specialties without a representative from each group adding their - +2. - * Have any additional (perhaps non-core) reviewers volunteered to - review patches that implement the spec? - * There should be a sufficient number of core reviewers who have - volunteered to go above and beyond their typical reviewer workload - (indicated by their +2) to review the relevant patches. A - "sufficient number" is dependent on the individual spec and the - scope of the change. - * If reviewers have said they'll be reviewing a spec's patches - instead of patches they'd review otherwise, that doesn't help much - and is actually harmful to the overall project. - -Alternatives & History -====================== - -This is migrating the already agreed upon policy from the wiki. - -Implementation -============== - -Author(s) ---------- - -Primary author: - james-slagle (from the wiki history) - -Other contributors: - jpichon - -Milestones ----------- - -None - -Work Items ----------- - -Once the policy has merged, an email should be sent to openstack-dev -referring to this document. - -References -========== - -* Original documentation: https://wiki.openstack.org/wiki/TripleO/SpecReviews - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Ocata - - Migrated from wiki - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/squads.rst b/specs/policy/squads.rst deleted file mode 100644 index 8f2636a0..00000000 --- a/specs/policy/squads.rst +++ /dev/null @@ -1,141 +0,0 @@ -============== -TripleO Squads -============== - -Scaling-up a team is a common challenge in OpenStack. -We always increase the number of projects, with more contributors -and it often implies some changes in the organization. -This policy is intended to document how we will address this challenge in -the TripleO project. - -Problem Description -=================== - -Projects usually start from a single git repository and very often grow to -dozen of repositories, doing different things. As long as a project gets -some maturity, people who work together on a same topic needs some space -to collaborate the open way. -Currently, TripleO is acting as a single team where everyone meets -on IRC once a week to talk about bugs, CI status, release management. -Also, it happens very often that new contributors have hard time to find -an area of where they could quickly start to contribute. -Time is precious for our developers and we need to find a way to allow -them to keep all focus on their area of work. - -Policy -====== - -The idea of this policy is to create squads of people who work on the -same topic and allow them to keep focus with low amount of external -distractions. - -* Anyone would be free to join and leave a squad at will. - Right now, there is no size limit for a squad as this is something - experimental. If we realize a squad is too big (more than 10 people), - we might re-consider the focus of area of the squad. -* Anyone can join one or multiple squads at the same time. Squads will be - documented in a place anyone can contribute. -* Squads are free to organize themselves a weekly meeting. -* #tripleo remains the official IRC channel. We won't add more channels. -* Squads will have to choose a representative, who would be a squad liaison - with TripleO PTL. -* TripleO weekly meeting will still exist, anyone is encouraged to join, - but topics would stay high level. Some examples of topics: release - management; horizontal discussion between squads, CI status, etc. - The meeting would be a TripleO cross-projects meeting. - -We might need to test the idea for at least 1 or 2 months and invest some -time to reflect what is working and what could be improved. - -Benefits --------- - -* More collaboration is expected between people working on a same topic. - It will reflect officially what we have nearly done over the last cycles. -* People working on the same area of TripleO would have the possibility - to do public and open meetings, where anyone would be free to join. -* Newcomers would more easily understand what TripleO project delivers - since squads would provide a good overview of the work we do. Also - it would be an opportunity for people who want to learn on a specific - area of TripleO to join a new squad and learn from others. -* Open more possibilities like setting up mentoring program for each squad, - or specific docs to get involved more quickly. - -Challenges ----------- - -* We need to avoid creating silos and keep horizontal collaboration. - Working on a squad doesn't meen you need to ignore other squads. - -Squads ------- - -The list tends to be dynamic over the cycles, depending on which topics -the team is working on. The list below is subject to change as squads change. - -+-------------------------------+----------------------------------------------------------------------------+ -| Squad | Description | -+===============================+============================================================================+ -| ci | Group of people focusing on Continuous Integration tooling and system | -+-------------------------------+----------------------------------------------------------------------------+ -| upgrade | Group of people focusing on TripleO upgrades | -+-------------------------------+----------------------------------------------------------------------------+ -| validations | Group of people focusing on TripleO validations tooling | -+-------------------------------+----------------------------------------------------------------------------+ -| networking | Group of people focusing on networking bits in TripleO | -+-------------------------------+----------------------------------------------------------------------------+ -| integration | Group of people focusing on configuration management (eg: services) | -+-------------------------------+----------------------------------------------------------------------------+ -| security | Group of people focusing on security | -+-------------------------------+----------------------------------------------------------------------------+ -| edge | Group of people focusing on Edge/multi-site/multi-cloud | -| | https://etherpad.openstack.org/p/tripleo-edge-squad-status | -+-------------------------------+----------------------------------------------------------------------------+ -| transformation | Group of people focusing on converting heat templates / puppet to Ansible | -| | within the tripleo-ansible framework | -+-------------------------------+----------------------------------------------------------------------------+ - -.. note:: - - Note about CI: the squad is about working together on the tooling used - by OpenStack Infra to test TripleO, though every squad has in charge of - maintaining the good shape of their tests. - - -Alternatives & History -====================== - -One alternative would be to continue that way and keep a single horizontal -team. As long as we try to welcome in the team and add more projects, we'll -increase the problem severity of scaling-up TripleO project. -The number of people involved and the variety of topics that makes it really difficult to become able to work on everything. - -Implementation -============== - -Author(s) ---------- - -Primary author: - emacchi - -Milestones ----------- - -Ongoing - -Work Items ----------- - -* Work with TripleO developers to document the area of work for every squad. -* Document the output. -* Document squads members. -* Setup Squad meetings if needed. -* For each squad, find a liaison or a squad leader. - - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/policy/tech-debt-tracking.rst b/specs/policy/tech-debt-tracking.rst deleted file mode 100644 index ec3e7446..00000000 --- a/specs/policy/tech-debt-tracking.rst +++ /dev/null @@ -1,113 +0,0 @@ -================== -Tech Debt Tracking -================== - -Goal -==== - -Provide a basic policy for tracking and being able to reference tech debt -related changes in TripleO. - -Problem Description -=================== - -During the development of TripleO, sometimes tech debt is acquired due to time -or resource constraints that may exist. Without a solid way of tracking when -we intentially add tech debt, it is hard to quantify how much tech debt is -being self inflicted. Additionally tech debt gets lost in the code and without -a way to remember where we left it, it is almost impossible to remember when -and where we need to go back to fix some known issues. - -Proposed Change -=============== - -Tracking Code Tech Debt with Bugs ---------------------------------- - -Intentionally created tech debt items should have a bug [1]_ created with the -`tech-debt` tag added to it. Additionally the commit message of the change -should reference this `tech-debt` bug and if possible a comment should be added -into the code referencing who put it in there. - -Example Commit Message:: - - Always exit 0 because foo is currently broken - - We need to always exit 0 because the foo process eroneously returns - 42. A bug has been reported upstream but we are not sure when it - will be addressed. - - Related-Bug: #1234567 - -Example Comment:: - - # TODO(aschultz): We need this because the world is falling apart LP#1234567 - foo || exit 0 - -Triaging Bugs as Tech Debt --------------------------- - -If an end user reports a bug that we know is a tech debt item, the person -triaging the bug should add the `tech-debt` tag to the bug. - -Reporting Tech Debt -------------------- - -With the `tech-debt` tag on bugs, we should be able to keep a running track -of the bugs we have labeled and should report on this every release milestone -to see trends around how much is being added and when. As part of our triaging -of bugs, we should strive to add net-zero tech-debt bugs each major release if -possible. - - -Alternatives ------------- - -We continue to not track any of these things and continue to rely on developers -to remember when they add code and circle back around to fix it themselves or -when other developers find the issue and remove it. - -Implementation -============== - -Core reviewers should request that any tech debt be appropriately tracked and -feel free to -1 any patches that are adding tech debt without proper -attribution. - -Author(s) ---------- - -Primary author: - aschultz - -Milestones ----------- - -Queens-1 - -Work Items ----------- - -* aschultz to create tech-debt tag in Launchpad. - -References -========== - -.. [1] https://docs.openstack.org/tripleo-docs/latest/contributor/contributions.html#reporting-bugs - -Revision History -================ - -.. list-table:: Revisions - :header-rows: 1 - - * - Release Name - - Description - * - Queens - - Introduced - -.. note:: - - This work is licensed under a Creative Commons Attribution 3.0 - Unported License. - http://creativecommons.org/licenses/by/3.0/legalcode diff --git a/specs/queens/fast-forward-upgrades.rst b/specs/queens/fast-forward-upgrades.rst deleted file mode 100644 index 26861876..00000000 --- a/specs/queens/fast-forward-upgrades.rst +++ /dev/null @@ -1,351 +0,0 @@ -. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -===================== -Fast-forward upgrades -===================== - -https://blueprints.launchpad.net/tripleo/+spec/fast-forward-upgrades - -Fast-forward upgrades are upgrades that move an environment from release `N` to -`N+X` in a single step, where `X` is greater than `1` and for fast-forward -upgrades is typically `3`. This spec outlines how such upgrades can be -orchestrated by TripleO between the Newton and Queens OpenStack releases. - -Problem Description -=================== - -OpenStack upgrades are often seen by operators as problematic [1]_ [2]_. -Whilst TripleO upgrades have improved greatly over recent cycles many operators -are still reluctant to upgrade with each new release. - -This often leads to a situation where environments remain on the release used -when first deployed. Eventually this release will come to the end of its -supported life (EOL), forcing operators to upgrade to the next supported -release. There can also be restrictions imposed on an environment that simply -do not allow upgrades to be performed ahead of the EOL of a given release, -forcing operators to again wait until the release hits EOL. - -While it is possible to then linearly upgrade to a supported release with the -cadence of upstream releases, downstream distributions providing long-term -support (LTS) releases may not be able to provide the same path once the -initially installed release reaches EOL. Operators in such a situation may also -want to avoid running multiple lengthy linear upgrades to reach their desired -release. - -Proposed Change -=============== - -Overview --------- - -TripleO support for fast-forward upgrades will first target `N` to `N+3` -upgrades between the Newton and Queens releases: - -.. code-block:: bash - - Newton Ocata Pike Queens - +-----+ +-----+ +-----+ +-----+ - | | | N+1 | | N+2 | | | - | N | ---------------------> | N+3 | - | | | | | | | | - +-----+ +-----+ +-----+ +-----+ - - -This will give the impression of the Ocata and Pike releases being skipped with -the fast-forward upgrade moving the environment from Newton to Queens. In -reality as OpenStack projects with the `supports-upgrade` tag are only required -to support `N` to `N+1` upgrades [3]_ the upgrade will still need to move -through each release, completing database migrations and a limited set of other -tasks. - -Caveats -------- - -Before outlining the suggested changes to TripleO it is worth highlighting the -following caveats for fast-forward upgrades: - -* The control plane is inaccessible for the duration of the upgrade -* The data plane and active workloads must remain available for the duration of - the upgrade. - -Prerequisites -------------- - -Prior to the overcloud fast-forward upgrade starting the following prerequisite -tasks must be completed: - -* Rolling minor update of the overcloud on `N` - -This is a normal TripleO overcloud update [4]_ and should bring each node in -the environment up to the latest supported version of the underlying OS and -pulling in the latest packages. Operators can then reboot the nodes as -required. The reboot ensuring that the latest kernel, openvswitch, QEMU and any -other reboot dependant package is reloaded before proceeding with the upgrade. -This can happen well in advance of the overcloud fast-forward upgrade and -should remove the need for additional reboots during the upgrade. - -* Upgrade undercloud from `N` to `N+3` - -The undercloud also needs to be upgraded to `N+3` ahead of any overcloud -upgrade. Again this can happen well in advance of the overcloud upgrade. For -the time being this is a traditional, linear upgrade between `N` and `N+1` -releases until we reach the target `N+3` Queens release. - -* Container images cached prior to the start of the upgrade - -With the introduction of containerised TripleO overclouds in Pike operators -will need to cache the required container images prior to the fast-forward -upgrade if they wish to end up with a containerised Queens overcloud. - -High level flow ---------------- - -At a high level the following actions will be carried out by the fast-forward -upgrade to move the overcloud from `N` to `N+3`: - -* Stop all OpenStack control and compute services across all roles - -This will bring down the OpenStack control plane, leaving infrastructure -services such as the databases running, while allowing any workloads to -continue running without interruption. For HA environments this will disable -the cluster, ensuring that OpenStack services are not restarted. - -* Upgrade a single host from `N` to `N+1` then `N+1` to `N+2` - -As alluded to earlier, OpenStack projects currently only support `N` to `N+1` -upgrades and so fast-forward upgrades still need to cycle through each release in -order to complete data migrations and any other tasks that are required before -these migrations can be completed. This part of the upgrade is limited to a -single host per role to ensure this is completed as quickly as possible. - -* Optional upgrade and deployment of single canary compute host to `N+3` - -As fast-forward upgrades aim to ensure workloads are online and accessible -during the upgrade we can optionally upgrade all control service hosting roles -_and_ a single canary compute to `N+3` to verify that workloads will remain -active and accessible during the upgrade. - -A canary compute node will be selected at the start of the upgrade and have -instances launched on it to validate that both it and the data plane remain -active during the upgrade. The upgrade will halt if either become inaccessible -with a recovery procedure being provided to move all hosts back to `N+1` -without further disruption to the active workloads on the untouched compute -hosts. - -* Upgrade and deployment of all roles to `N+3` - -If the above optional canary compute host upgrade is not used then the final -action in the fast-forward upgrade will be a traditional `N` to `N+1` migration -between `N+2` and `N+3` followed by the deployment of all roles on `N+3`. This -final action essentially being a redeployment of the overcloud to containers on -`N+3` (Queens) as previously seen when upgrading TripleO environments from -Ocata to Pike. - -A python-tripleoclient command and associated Mistral workflow will control if -this final step is applied to all roles in parallel (default), all hosts in a -given role or selected hosts in a given role. The latter being useful if a user -wants to control the order in which computes are moved from `N+1` to `N+3` etc. - -Implementation --------------- - -As with updates [5]_ and upgrades [6]_ specific fast-forward upgrade Ansible -tasks associated with the first two actions above will be introduced into the -`tripleo-heat-template` service templates for each service as `RoleConfig` -outputs. - -As with `upgrade_tasks` each task is associated with a particular step in the -process. For `fast_forward_upgrade_tasks` these steps are split between prep -tasks that apply to all hosts and bootstrap tasks that only apply to a single -host for a given role. - -Prep step tasks will map to the following actions: - -- Step=1: Disable the overall cluster -- Step=2: Stop OpenStack services -- Step=3: Update host repositories - -Bootstrap step tasks will map to the following actions: - -- Step=4: Take OpenStack DB backups -- Step=5: Pre package update commands -- Step=6: Update required packages -- Step=7: Post package update commands -- Step=8: OpenStack service DB sync -- Step=9: Validation - -As with `update_tasks` each task will use simple `when` conditionals to -identify which step and release(s) it is associated with, ensuring these tasks -are executed at the correct point in the upgrade. - -For example, a step 2 `fast_forward_upgrade_task` task on Ocata is listed below: - -.. code-block:: yaml - - fast_forward_upgrade_tasks: - - name: Example Ocata step 2 task - command: /bin/foo bar - when: - - step|int == 2 - - release == 'ocata' - - -These tasks will then be collated into role specific Ansible playbooks via the -RoleConfig output of the `overcloud` heat template, with step and release -variables being fed in to ensure tasks are executed in the correct order. - -As with `major upgrades` [8]_ a new mistral workflow and tripleoclient command -will be introduced to generate and execute the associated Ansible tasks. - -.. code-block:: bash - - openstack overcloud fast-forward-upgrade --templates [..path to latest THT..] \ - [..original environment arguments..] \ - [..new container environment agruments..] - -Operators will also be able to generate [7]_ , download and review the -playbooks ahead of time using the latest version of `tripleo-heat-templates` -with the following commands: - -.. code-block:: bash - - openstack overcloud deploy --templates [..path to latest THT..] \ - [..original environment arguments..] \ - [..new container environment agruments..] \ - -e environments/fast-forward-upgrade.yaml \ - -e environments/noop-deploy-steps.yaml - openstack overcloud config download - - -Dev workflow ------------- - -The existing tripleo-upgrade Ansible role will be used to automate the -fast-forward upgrade process for use by developers and CI, including the -initial overcloud minor update, undercloud upgrade to `N+3` and fast-forward -upgrade itself. - -Developers working on fast_forward_upgrade_tasks will also be able to deploy -minimal overcloud deployments via `tripleo-quickstart` using release configs -also used by CI. - -Further, when developing tasks, developers will be able to manually render and -run `fast_forward_upgrade_tasks` as standalone Ansible playbooks. Allowing them -to run a subset of the tasks against specific nodes using -`tripleo-ansible-inventory`. Examples of how to do this will be documented -hopefully ensuring a smooth development experience for anyone looking to -contribute tasks for specific services. - -Alternatives ------------- - -* Continue to force operators to upgrade linearly through each major release -* Parallel cloud migrations. - -Security Impact ---------------- - -N/A - -Other End User Impact ---------------------- - -* The control plane will be down for the duration of the upgrade -* The data plane and workloads will remain up. - -Performance Impact ------------------- - -N/A - -Other Deployer Impact ---------------------- - -N/A - -Developer Impact ----------------- - -* Third party service template providers will need to provide - fast_forward_upgrade_steps in their THT service configurations. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees: - -* lbezdick -* marios -* chem - -Other contributors: - -* shardy -* lyarwood - -Work Items ----------- - -* Introduce fast_forward_upgrades_playbook.yaml to RoleConfig -* Introduce fast_forward_upgrade_tasks in each service template -* Introduce a python-tripleoclient command and associated Mistral workflow. - -Dependencies -============ - -* TripleO - Ansible upgrade Workflow with UI integration [9]_ - -The new major upgrade workflow being introduced for Pike to Queens upgrades -will obviously impact what fast-forward upgrades looks like to Queens. At -present the high level flow for fast-forward upgrades assumes that we can reuse -the current `upgrade_tasks` between N+2 and N+3 to disable and then potentially -remove baremetal services. This is likely to change as the major upgrade -workflow is introduced and so it is likely that these steps will need to be -encoded in `fast_forward_upgrade_tasks`. - -Testing -======= - -* Third party CI jobs will need to be created to test Newton to Queens using - RDO given the upstream EOL of stable/newton with the release of Pike. - -* These jobs should cover the initial undercloud upgrade, overcloud upgrade and - optional canary compute node checks. - -* An additional third party CI job will be required to verify that a Queens - undercloud can correctly manage a Newton overcloud, allowing the separation - of the undercloud upgrade and fast-forward upgrade discussed under - prerequisites. - -* Finally, minimal overcloud roles should be used to verify the upgrade for - certain services. For example, when changes are made to the - `fast_forward_upgrade_tasks` of Nova via changes to - `docker/services/nova-*.yaml` files then a basic overcloud deployment of - Keystone, Glance, Swift, Cinder, Neutron and Nova could be used to quickly - verify the changes in regards to fast-forward upgrades. - -Documentation Impact -==================== - -* This will require extensive developer and user documentation to be written, - most likely in a new section of the docs specifically detailing the - fast-forward upgrade flow. - -References -========== -.. [1] https://etherpad.openstack.org/p/MEX-ops-migrations-upgrades -.. [2] https://etherpad.openstack.org/p/BOS-forum-skip-level-upgrading -.. [3] https://governance.openstack.org/tc/reference/tags/assert_supports-upgrade.html -.. [4] http://tripleo.org/install/post_deployment/package_update.html -.. [5] https://github.com/openstack/tripleo-heat-templates/blob/master/puppet/services/README.rst#update-steps -.. [6] https://github.com/openstack/tripleo-heat-templates/blob/master/puppet/services/README.rst#upgrade-steps -.. [7] https://review.openstack.org/#/c/495658/ -.. [8] https://review.openstack.org/#/q/topic:major-upgrade+(status:open+OR+status:merged) -.. [9] https://specs.openstack.org/openstack/tripleo-specs/specs/queens/tripleo_ansible_upgrades_workflow.html diff --git a/specs/queens/instance-ha.rst b/specs/queens/instance-ha.rst deleted file mode 100644 index 0e0da811..00000000 --- a/specs/queens/instance-ha.rst +++ /dev/null @@ -1,145 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================== -Instance High Availability -========================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/instance-ha - -A very often requested feature by operators and customers is to be able to -automatically resurrect VMs that were running on a compute node that failed (either -due to hardware failures, networking issues or general server problems). -Currently we have a downstream-only procedure which consists of many manual -steps to configure Instance HA: -https://access.redhat.com/documentation/en/red-hat-openstack-platform/9/paged/high-availability-for-compute-instances/chapter-1-overview - -What we would like to implement here is basically an optional opt-in automatic -deployment of a cloud that has Instance HA support. - -Problem Description -=================== - -Currently if a compute node has a hardware failure or a kernel panic all the -instances that were running on the node, will be gone and manual intervention -is needed to resurrect these instances on another compute node. - -Proposed Change -=============== - -Overview --------- - -The proposed change would be to add a few additional puppet-tripleo profiles that would help -us configure the pacemaker resources needed for instance HA. Unlike in previous iterations -we won't need to move nova-compute resources under pacemaker's management. We managed to -achieve the same result without touching the compute nodes (except by setting -up pacemaker_remote on the computes, but that support exists already) - -Alternatives ------------- - -There are a few specs that are modeling host recovery: - -Host Recovery - https://review.openstack.org/#/c/386554/ -Instances auto evacuation - https://review.openstack.org/#/c/257809 - -The first spec uses pacemaker in a very similar way but is too new -and too high level to really be able to comment at this point in time. -The second one has been stalled for a long time and it looks like there -is no consensus yet on the approaches needed. The longterm goal is -to morph the Instance HA deployment into the spec that gets accepted. -We are actively working on both specs as well. In any case we have -discussed the long-term plan with SuSe and NTT and we agreed -on a long-term plan of which this spec is the first step for TripleO. - -Security Impact ---------------- - -No additional security impact. - -Other End User Impact ---------------------- - -End users are not impacted except for the fact that VMs can be resurrected -automatically on a non-failed compute node. - -Performance Impact ------------------- - -There are no performance related impacts as compared to a current deployment. - -Other Deployer Impact ---------------------- - -So this change does not affect the default deployments. What it does it adds a boolean -and some additional profiles so that a deployer can have a cloud configured with Instance -HA support out of the box. - -* One top-level parameter to enable the Instance HA deployment - -* Although fencing configuration is already currently supported by tripleo, we will need - to improve bits and pieces so that we won't need an extra command to generate the - fencing parameters. - -* Upgrades will be impacted by this change in the sense that we will need to make sure to test - them when Instance HA is enabled. - -Developer Impact ----------------- - -No developer impact is planned. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - michele - -Other contributors: - cmsj, abeekhof - -Work Items ----------- - -* Make the fencing configuration fully automated (this is mostly done already, we need oooq integration - and some optimization) - -* Add the logic and needed resources on the control-plane - -* Test the upgrade path when Instance HA is configured - - -Testing -======= - -Testing this manually is fairly simple: - -* Deploy with Instance HA configured and two compute nodes - -* Spawn a test VM - -* Crash the compute node where the VM is running - -* Observe the VM being resurrected on the other compute node - -Testing this in CI is doable but might be a bit more challenging due to resource constraints. - -Documentation Impact -==================== - -A section under advanced configuration is needed explaining the deployment of -a cloud that supports Instance HA. - -References -========== - -* https://access.redhat.com/documentation/en/red-hat-openstack-platform/9/paged/high-availability-for-compute-instances/ diff --git a/specs/queens/ipsec.rst b/specs/queens/ipsec.rst deleted file mode 100644 index e9e81eb3..00000000 --- a/specs/queens/ipsec.rst +++ /dev/null @@ -1,189 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================== -IPSEC encrypted networks -======================== - -https://blueprints.launchpad.net/tripleo/+spec/ipsec - -This proposes the usage of IPSEC tunnels for encrypting all communications in a -TripleO cloud. - -Problem Description -=================== - -Having everything in the network encrypted is a hard requirements for certain -use-cases. While TLS everywhere provides support for this, not everyone wants a -full-fledged CA. IPSEC provides an alternative which requires one component -less (the CA) while still fulfilling the security requirements. With the -downside that IPSEC tunnel configurations can get quite verbose. - - -Proposed Change -=============== - -Overview --------- - -As mentioned in the mailing list [1], for OSP10 we already worked on an ansible -role that runs on top of a TripleO deployment [2]. - -It does the following: - -* Installs IPSEC if it's not available in the system. - -* Sets up the firewall rules. - -* Based on a hard-coded set of networks, it discovers the IP addresses for each - of them. - -* Based on a hard-coded set of networks, it discovers the Virtual IP addresses - (including the Redis VIP). - -* It puts up an IPSEC tunnel for most IPs in each network. - - - Regular IPs are handled as a point-to-point IPSEC tunnel. - - - Virtual IPs are handled with road-warrior configurations. This means that - the VIP's tunnel listens for any connections. This enables easier - configuration of the tunnel, as the VIP-holder doesn't need to be aware nor - configure each tunnel. - - - Similarly to TLS everywhere, this focuses on service-to-service - communication, so we explicitly skip the tenant network. Or, - as it was in the original ansible role, compute-to-compute communication. - This significantly reduces the amount of tunnels we need to set up, but - leaves application security to the deployer. - - - Authentication for the tunnels is done via a Pre-Shared Key (PSK), which is - shared between all nodes. - -* Finally, it creates an OCF resource that tracks each VIP and puts up or down - its corresponding IPSEC tunnel depending on the VIP's location. - - - While this resource is still in the repository [3], it has now landed - upstream [4]. Once this resource is available in the packaged version of - the resource agents, the preferred version will be the packaged one. - - - This resource effectively handles VIP fail-overs, by detecting that a VIP - is no longer hosted by the node, it cleanly puts down the IPSEC tunnel and - enables it where the VIP is now hosted. - -All of this work is already part of the role, however, to have better -integration with the current state of TripleO, the following work is needed: - -* Support for composable networks. - - - Now that composable networks are a thing, we can no longer rely on the - hard-coded values we had in the role. - - - Fortunately, this is information we can get from the tripleo dynamic - inventory. So we would need to add information about the available networks - and the VIPs. - -* Configurable skipping of networks. - - - In order to address the tenant network skipping, we need to somehow make it - configurable. - -* Add the IPSEC package as part of the image. - -* Configure Firewall rules the TripleO way. - - - Currently the role handles the firewall rule setup. However, it should be - fairly simple to configure these rules the same way other services - configure theirs (Using the tripleo..firewall_rules entry). This - will require the usage of a composable service template. - -* As mentioned above, we will need to create a composable service template. - - - This could take into use the recently added `external_deploy_tasks` section - of the templates, which will work similarly to the Kubernetes configuration - and would rely on the config-download mechanism [5]. - -Alternatives ------------- - -While deployers can already use TLS everywhere. A few are already using the -aforementioned ansible role. So this would provide a seamless upgrade path for -them. - -Security Impact ---------------- - -This by itself is a security enhancement, as it enables encryption in the -network. - -The PSK being shared by all the nodes is not ideal and could be addressed by -per-network PSKs. However, this work could be done in further iterations. - -Other End User Impact ---------------------- - -Currently, the deployer needs to provide their PSK. However, this could be -automated as part of the tasks that TripleO does. - -Performance Impact ------------------- - -Same as with TLS everywhere, adding encryption in the network will have a -performance impact. We currently don't have concrete data on what this impact -actually is. - -Other Deployer Impact ---------------------- - -This would be added as a composable service. So it would be something that the -deployer would need to enable via an environment file. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - jaosorior - -Work Items ----------- - -* Add libreswan (IPSEC's frontend) package to the overcloud-full iamge. - -* Add required information to the dynamic inventory (networks and VIPs) - -* Based on the inventory, create the IPSEC tunnels dynamically, and not based - on the hardcoded networks. - -* Add tripleo-ipsec ansible role as part of the TripleO umbrella. - -* Create composable service. - - -Dependencies -============ - -* This requires the triple-ipsec role to be available. For this, it will be - moved to the TripleO umbrella and packaged as such. - - -Testing -======= - -Given that this doesn't require an extra component, we could test this as part -of our upstream tests. The requirement being that the deployment has -network-isolation enabled. - - -References -========== - -[1] http://lists.openstack.org/pipermail/openstack-dev/2017-November/124615.html -[2] https://github.com/JAORMX/tripleo-ipsec -[3] https://github.com/JAORMX/tripleo-ipsec/blob/master/files/ipsec-resource-agent.sh -[4] https://github.com/ClusterLabs/resource-agents/blob/master/heartbeat/ipsec -[5] https://github.com/openstack/tripleo-heat-templates/blob/master/extraconfig/services/kubernetes-master.yaml#L58 diff --git a/specs/queens/network-configuration.rst b/specs/queens/network-configuration.rst deleted file mode 100644 index 76535cfd..00000000 --- a/specs/queens/network-configuration.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -===================== -Network configuration -===================== - -Network configuration for the TripleO GUI - -Problem Description -=================== - -Currently, it's not possible to make advanced network configurations using the -TripleO GUI. - -Proposed Change -=============== - -Overview --------- - -In the GUI, we will provide a wizard to guide the user through configuring the -networks of their deployment. The user will be able to assign networks to -roles, and configure additional network parameters. We will use the -``network_data.yaml`` in the `TripleO Heat Templates`_. The idea is to expose -the data in ``network_data.yaml`` via the web interface. - -In addition to the wizard, we will implement a dynamic network topology diagram -to visually present the configured networks. This will enable the Deployer to -quickly validate their work. The diagram will rely on ``network_data.yaml`` -and ``roles_data.yaml`` for the actual configuration. - -For details, please see the `wireframes`_. - -.. _wireframes: https://openstack.invisionapp.com/share/UM87J4NBQ#/screens -.. _TripleO Heat Templates: https://review.openstack.org/#/c/409921/ - -Alternatives ------------- - -As an alternative, heat templates can be edited manually to allow customization -before uploading. - -Security Impact ---------------- - -The Deployer could accidentally misconfigure the network topology, and thereby -cause data to be exposed. - -Other End User Impact ---------------------- - -Performance Impact ------------------- - -The addition of the configuration wizard and the network topology diagram should -have no performance impact on the amount of time needed to run a deployment. - -Other Deployer Impact ---------------------- - -Developer Impact ----------------- - -As with any new substantial feature, the impact on the developer is cognitive. -We will have to gain a detail understanding of network configuration in -``network_data.yaml``. Also, testing will add overhead on our efforts. - -Implementation -============== - -We can proceed with implementation immediately. - -Assignee(s) ------------ - -Primary assignee: - hpokorny - -Work Items ----------- - -* Network configuration wizard - - Reading data from the backend - - Saving changes - - UI based on wireframes -* Network topology diagram - - Investigate suitable javascript libraries - - UI based on wireframes - -Dependencies -============ - -* The presence of ``roles_data.yaml`` and ``network_data.yaml`` in the plan -* A javascript library for drawing the diagram - -Testing -======= - -Testing shouldn't pose any real challenges with the exception of the network -topology diagram rendering. At best, this is currently unknown as it depends on -the chosen javascript library. Verifying that the correct diagram is displayed -using automated testing might be non-trivial. - -Documentation Impact -==================== - -We should document the new settings introduced by the wizard. The documentation -should be transferable between the heat template project, and TripleO UI. - -References -========== diff --git a/specs/queens/tripleo-messaging.rst b/specs/queens/tripleo-messaging.rst deleted file mode 100644 index c73a33e3..00000000 --- a/specs/queens/tripleo-messaging.rst +++ /dev/null @@ -1,316 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================== -Tripleo RPC and Notification Messaging Support -============================================== - -https://blueprints.launchpad.net/tripleo - -This specification proposes changes to tripleo to enable the selection -and configuration of separate messaging backends for oslo.messaging -RPC and Notification communications. This proposal is a derivative of -the work associated with the original blueprint [1]_ and specification -[2]_ to enable dual backends for oslo.messaging in tripleo. - -Most of the groundwork to enable dual backends was implemented during -the pike release and the introduction of an alternative messaging -backend (qdrouterd) service was made. Presently, the deployment of this -alternative messaging backend is accomplished by aliasing the rabbitmq -service as the tripleo implementation does not model separate -messaging backends. - -Problem Description -=================== - -The oslo.messaging library supports the deployment of dual messaging -system backends for RPC and Notification communications. However, tripleo -currently deploys a single rabbitmq server (cluster) that serves as a -single messaging backend for both RPC and Notifications. - -:: - - +------------+ +----------+ - | RPC Caller | | Notifier | - +-----+------+ +----+-----+ - | | - +--+ +--+ - | | - v v - +-+---------------+-+ - | RabbitMQ Service | - | Messaging Backend | - | | - +-+---------------+-+ - ^ ^ - | | - +--+ +--+ - | | - v v - +------+-----+ +------+-------+ - | RPC | | Notification | - | Server | | Server | - +------------+ +--------------+ - - -To support two separate and distinct messaging backends, tripleo needs -to "duplicate" the set of parameters needed to specify each messaging -system. The oslo.messaging library in OpenStack provides the API to the -messaging services. It is proposed that the implementation model the -RPC and Notification messaging services in place of the backend -messaging server (e.g. rabbitmq). - -:: - - +------------+ +----------+ - | RPC Caller | | Notifier | - +-----+------+ +----+-----+ - | | - | | - v v - +-------------------+ +-------------------+ - | RPC | | Notification | - | Messaging Service | | Messaging Service | - | | | | - +--------+----------+ +--------+----------+ - | | - | | - v v - +------------+ +------+-------+ - | RPC | | Notification | - | Server | | Server | - +------------+ +--------------+ - - -Introducing the separate messaging services and associated parameters in place -of the rabbitmq server is not a major rework but special consideration -must be made to upgrade paths and capabilities to ensure that existing -configurations are not impacted. - -Having separate messaging backends for RPC and Notification -communications provides a number of benefits. These benefits include: - -* tuning the backend to the messaging patterns -* increased aggregate message capacity -* reduced applied load to messaging servers -* increased message throughput -* reduced message latency -* etc. - - -Proposed Change -=============== - -A number of issues need to be resolved in order to express RPC -and Notification messaging services on top of the backend messaging systems. - -Overview --------- - -The proposed change is similar to the concept of a service "backend" -that is configured by tripleo. A number of existing services support -such a backend (or plugin) model. The implementation of a messaging -service backend model should account for the following requirements: - -* deploy a single messaging backend for both RPC and Notifications -* deploy a messaging backend twice, once for RPC and once for - Notifications -* deploy a messaging backend for RPC and a different messaging backend - for Notifications -* deploy an external messaging backend for RPC -* deploy an external messaging backend for Notifications - -Generally, the parameters that were required for deployment of the -rabbitmq service should be duplicated and renamed to "RPC Messaging" -and "Notify Messaging" backend service definitions. Individual backend -files would exist for each possible backend type (e.g. rabbitmq, -qdrouterd, zeromq, kafka or external). The backend selected will -correspondingly define the messaging transport for the messaging -system. - -* transport specifier -* username -* password (and generation) -* host -* port -* virtual host(s) -* ssl (enabled) -* ssl configuration -* health checks - -Tripleo should continue to have a default configuration that deploys -RPC and Notifications messaging services on top of a single rabbitmq -backend server (cluster). Tripleo upgrades should map the legacy -rabbitmq service deployment onto the RPC and Notification messaging -services model. - - -Alternatives ------------- - -The configuration of separate messaging backends could be post -overcloud deployment (e.g. external to tripleo framework). This would -be problematic over the lifecycle of deployments e.g. during upgrades etc. - -Security Impact ---------------- - -The deployment of dual messaging backends for RPC and Notification -communications should be the same from a security standpoint. This -assumes the backends have parity from a security feature -perspective, e.g authentication and encryption. - -Other End User Impact ---------------------- - -Depending on the configuration of the messaging backend deployment, -there could be a number of end user impacts including the following: - -* monitoring of separated messaging backend services -* understanding differences in functionality/behaviors between different - messaging backends (e.g. broker versus router, etc.) -* handling exceptions (e.g. different places for logs, etc.) - -Performance Impact ------------------- - -Using separate messaging systems for RPC and Notifications should -have a positive impact on performance and scalability by: - -* separating RPC and Notification messaging loads -* increased parallelism in message processing -* increased aggregate message transfer capacity -* tuned backend configuration aligned to messaging patterns - -Other Deployer Impact ---------------------- - -The deployment of hybrid messaging will be new to OpenStack -operators. Operators will need to learn the architectural differences -as compared to a single backend deployment. This will include capacity -planning, monitoring, troubleshooting and maintenance best practices. - -Developer Impact ----------------- - -Discuss things that will affect other developers working on OpenStack. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - -* Andy Smith - -* John Eckersberg - -Work Items ----------- - -tripleo-heat-templates: - -* Modify *puppet/services/base.yaml* to introduce separate RPC and - Notification Messaging parameters (e.g. replace 'rabbit' parameters) -* Support two ssl environments (e.g. one for RPC and one for - Notification when separate backends are deployed) -* Consider example backend model such as the following: - -:: - - tripleo-heat-templates - | - +--+ /environments - | | - | +--+ /messaging - | | - | +--+ messaging-(rpc/notify)-rabbitmq.yaml - | +--+ messaging-(rpc/notify)-qdrouterd.yaml - | +--+ messaging-(rpc/notify)-zmq.yaml - | +--+ messaging-(rpc/notify)-kafka.yaml - +--+ /puppet - | | - | +--+ /services - | | - | +--+ messaging-(rpc/notify)-backend-rabbitmq.yaml - | +--+ messaging-(rpc/notify)-backend-qdrouterd.yaml - | +--+ messaging-(rpc/notify)-backend-zmq.yaml - | +--+ messaging-(rpc/notify)-backend-kafka.yaml - | - +--+ /roles - - -puppet_tripleo: - -* Replace rabbitmq_node_names with messaging_rpc_node_names and - messaging_notify_node_names or similar -* Add vhost support -* Consider example backend model such as the following: - -:: - - puppet-tripleo - | - +--+ /manifests - | - +--+ /profile - | - +--+ /base - | - +--+ /messaging - | - +--+ backend.pp - +--+ rpc.pp - +--+ notify.pp - | - +--+ /backend - | - +--+ rabbitmq.pp - +--+ qdrouterd.pp - +--+ zmq.pp - +--+ kafka.pp - - -tripleo_common: - -* Add user and password management for RPC and Messaging services -* Support distinct health checks for separated messaging backends - -packemaker: - -* Determine what should happen when two separate rabbitmq clusters - are deployed. Does this result in two pacemaker services or one? - Some experimentation may be required. - -Dependencies -============ - -None. - -Testing -======= - -In order to test this in CI, an environment will be needed where separate -messaging system backends (e.g. rabbitMQ server and dispatch-router -server) are deployed. Any existing hardware configuration should be -appropriate for the dual backend deployment. - - -Documentation Impact -==================== - -The deployment documentation will need to be updated to cover the -configuration of the separate messaging (RPC and Notify) services. - - -References -========== - -.. [1] https://blueprints.launchpad.net/tripleo/+spec/om-dual-backends -.. [2] https://review.openstack.org/#/c/396740/ diff --git a/specs/queens/tripleo-ptp.rst b/specs/queens/tripleo-ptp.rst deleted file mode 100644 index 30818cfd..00000000 --- a/specs/queens/tripleo-ptp.rst +++ /dev/null @@ -1,141 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================= -TripleO PTP (Precision Time Protocol) Support -============================================= - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ptp - -This spec introduces support for a time synchronization method called PTP [0] -which provides better time accuracy than NTP in general. With hardware -timestamping support on the host, PTP can achieve clock accuracy in the -sub-microsecond range, making it suitable for measurement and control systems. - -Problem Description -=================== - -Currently tripleo deploys NTP services by default which provide millisecond -level time accuracy, but this is not enough for some cases: - -* Fault/Error events will include timestamps placed on the associated event - messages, retrieved by detectors with the purpose of accurately identifying - the time that the event occurred. Given that the target Fault Management - cycle timelines are in tens of milliseconds on most critical faults, events - ordering may reverse against actual time if precison and accuracy of clock - synchronization are in the same level of accuracy. - -* NFV C-RAN (Cloud Radio Access Network) is looking for better time - sychronization and distribution in micro-second level accuracy as alternative - for NTP, PTP has been evaluated as one of the technologies. - -This spec is not intended to cover all the possible ways of PTP usage, rather -to provide a basic deployment path for PTP in tripleo with default -configuration set to support PTP Ordinary Clock (slave mode); the master mode -ptp clock configuration is not in the scope of this spec, but shall be deployed -by user to provide the time source for the PTP Ordinary Clock. The full support -of PTP capability can be enhanced further based on this spec. - -User shall be aware of the fact that NTP and PTP can not be configured together -on the same node without a coordinator program like timemaster which is also -provided by linuxptp package. How to configure and use timemaster is not in the -scope of this spec. - -Proposed Change -=============== - -Overview --------- - -Provide the capability to configure PTP as time synchronization method: - -* Add PTP configuration file path in overcloud resource registry. - -* Add puppet-tripleo profile for PTP services. - -* Add tripleo-heat-templates composable service for PTP. - -Retain the current default behavior to deploy NTP as time synchronization -source: - -* The NTP services remain unchanged as the default time synchronization method. - -* The NTP services must be disabled on nodes where PTP are deployed. - -Alternatives ------------- - -The alternative is to continue to use NTP. - -Security Impact ---------------- - -Security issues originated from PTP will need to be considered. - -Other End User Impact ---------------------- - -Users will get more accurate time from PTP. - -Performance Impact ------------------- - -No impact with default deployment mode which uses NTP as time source. - -Other Deployer Impact ---------------------- - -The operator who wants to use PTP should identify and provide the PTP capable -network interface name and make sure NTP is not deployed on the nodes where PTP -will be deployed. The default PTP network interface name is set to 'nic1' where -user should change it according to real interface name. By default, PTP will -not be deployed unless explicitly configured. - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - zshi - -Work Items ----------- - -* Puppet-tripleo profile for PTP services -* Tripleo-heat-templates composable service for PTP deployment - -Dependencies -============ - -* Puppet module for PTP services: ptp [1] -* The linuxptp RPM must be installed, and PTP capable NIC must be identified. -* Refer to linuxptp project page [2] for the list of drivers that support the - PHC (Physical Hardware Clock) subsystem. - -Testing -======= - -The deployment of PTP should be testable in CI. - -Documentation Impact -==================== - -The deployment documation will need to be updated to cover the configuration of -PTP. - -References -========== - -* [0] https://standards.ieee.org/findstds/standard/1588-2008.html -* [1] https://github.com/redhat-nfvpe/ptp -* [2] http://linuxptp.sourceforge.net diff --git a/specs/queens/tripleo-routed-networks-deployment.rst b/specs/queens/tripleo-routed-networks-deployment.rst deleted file mode 100644 index c89ad8d8..00000000 --- a/specs/queens/tripleo-routed-networks-deployment.rst +++ /dev/null @@ -1,733 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================================== -TripleO Routed Networks Deployment (Spine-and-Leaf Clos) -======================================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-routed-networks-deployment - -TripleO uses shared L2 networks today, so each node is attached to the -provisioning network, and any other networks are also shared. This -significantly reduces the complexity required to deploy on bare metal, -since DHCP and PXE booting are simply done over a shared broadcast domain. -This also makes the network switch configuration easy, since there is only -a need to configure VLANs and ports, but no added complexity from dynamic -routing between all switches. - -This design has limitations, however, and becomes unwieldy beyond a certain -scale. As the number of nodes increases, the background traffic from Broadcast, -Unicast, and Multicast (BUM) traffic also increases. This design also requires -all top-of-rack switches to trunk the VLANs back to the core switches, which -centralizes the layer 3 gateway, usually on a single core switch. That creates -a bottleneck which is not present in Clos architecture. - -This spec serves as a detailed description of the overall problem set, and -applies to the master blueprint. The sub-blueprints for the various -implementation items also have their own associated spec. - -Problem Description -=================== - -Where possible, modern high-performance datacenter networks typically use -routed networking to increase scalability and reduce failure domains. Using -routed networks makes it possible to optimize a Clos (also known as -"spine-and-leaf") architecture for scalability:: - - ,=========. ,=========. - | spine 1 |__ __| spine 2 | - '==|\=====\_ \__________________/ _/=====/|==' - | \_ \___ / \ ___/ _/ | ^ - | \___ / \ _______ / \ ___/ | |-- Dynamic routing (BGP, OSPF, - | / \ / \ / \ | v EIGRP) - ,------. ,------ ,------. ,------. - |leaf 1|....|leaf 2| |leaf 3|....|leaf 4| ======== Layer 2/3 boundary - '------' '------' '------' '------' - | | | | - | | | | - |-[serv-A1]=-| |-[serv-B1]=-| - |-[serv-A2]=-| |-[serv-B2]=-| - |-[serv-A3]=-| |-[serv-B3]=-| - Rack A Rack B - - - -In the above diagram, each server is connected via an Ethernet bond to both -top-of-rack leaf switches, which are clustered and configured as a virtual -switch chassis. Each leaf switch is attached to each spine switch. Within each -rack, all servers share a layer 2 domain. The subnets are local to the rack, -and the default gateway is the top-of-rack virtual switch pair. Dynamic routing -between the leaf switches and the spine switches permits East-West traffic -between the racks. - -This is just one example of a routed network architecture. The layer 3 routing -could also be done only on the spine switches, or there may even be distribution -level switches that sit in between the top-of-rack switches and the routed core. -The distinguishing feature that we are trying to enable is segregating local -systems within a layer 2 domain, with routing between domains. - -In a shared layer-2 architecture, the spine switches typically have to act in an -active/passive mode to act as the L3 gateway for the single shared VLAN. All -leaf switches must be attached to the active switch, and the limit on North-South -bandwidth is the connection to the active switch, so there is an upper bound on -the scalability. The Clos topology is favored because it provides horizontal -scalability. Additional spine switches can be added to increase East-West and -North-South bandwidth. Equal-cost multipath routing between switches ensures -that all links are utlized simultaneously. If all ports are full on the spine -switches, an additional tier can be added to connect additional spines, -each with their own set of leaf switches, providing hyperscale expandability. - -Each network device may be taken out of service for maintenance without the entire -network being offline. This topology also allows the switches to be configured -without physical loops or Spanning Tree, since the redundant links are either -delivered via bonding or via multiple layer 3 uplink paths with equal metrics. -Some advantages of using this architecture with separate subnets per rack are: - -* Reduced domain for broadcast, unknown unicast, and multicast (BUM) traffic. -* Reduced failure domain. -* Geographical separation. -* Association between IP address and rack location. -* Better cross-vendor support for multipath forwarding using equal-cost - multipath forwarding (ECMP) via L3 routing, instead of proprietary "fabric". - -This topology is significantly different from the shared-everything approach that -TripleO takes today. - -Problem Descriptions -==================== - -As this is a complex topic, it will be easier to break the problems down into -their constituent parts, based on which part of TripleO they affect: - -**Problem #1: TripleO uses DHCP/PXE on the Undercloud provisioning net (ctlplane).** - -Neutron on the undercloud does not yet support DHCP relays and multiple L2 -subnets, since it does DHCP/PXE directly on the provisioning network. - -Possible Solutions, Ideas, or Approaches: - -1. Modify Ironic and/or Neutron to support multiple DHCP ranges in the dnsmasq - configuration, use DHCP relay running on top-of-rack switches which - receives DHCP requests and forwards them to dnsmasq on the Undercloud. - There is a patch in progress to support that [11]_. -2. Modify Neutron to support DHCP relay. There is a patch in progress to - support that [10]_. - -Currently, if one adds a subnet to a network, Neutron DHCP agent will pick up -the changes and configure separate subnets correctly in ``dnsmasq``. For instance, -after adding a second subnet to the ``ctlplane`` network, here is the resulting -startup command for Neutron's instance of dnsmasq:: - - dnsmasq --no-hosts --no-resolv --strict-order --except-interface=lo \ - --pid-file=/var/lib/neutron/dhcp/aae53442-204e-4c8e-8a84-55baaeb496cf/pid \ - --dhcp-hostsfile=/var/lib/neutron/dhcp/aae53442-204e-4c8e-8a84-55baaeb496cf/host \ - --addn-hosts=/var/lib/neutron/dhcp/aae53442-204e-4c8e-8a84-55baaeb496cf/addn_hosts \ - --dhcp-optsfile=/var/lib/neutron/dhcp/aae53442-204e-4c8e-8a84-55baaeb496cf/opts \ - --dhcp-leasefile=/var/lib/neutron/dhcp/aae53442-204e-4c8e-8a84-55baaeb496cf/leases \ - --dhcp-match=set:ipxe,175 --bind-interfaces --interface=tap4ccef953-e0 \ - --dhcp-range=set:tag0,172.19.0.0,static,86400s \ - --dhcp-range=set:tag1,172.20.0.0,static,86400s \ - --dhcp-option-force=option:mtu,1500 --dhcp-lease-max=512 \ - --conf-file=/etc/dnsmasq-ironic.conf --domain=openstacklocal - -The router information gets put into the dhcp-optsfile, here are the contents -of /var/lib/neutron/dhcp/aae53442-204e-4c8e-8a84-55baaeb496cf/opts:: - - tag:tag0,option:classless-static-route,172.20.0.0/24,0.0.0.0,0.0.0.0/0,172.19.0.254 - tag:tag0,249,172.20.0.0/24,0.0.0.0,0.0.0.0/0,172.19.0.254 - tag:tag0,option:router,172.19.0.254 - tag:tag1,option:classless-static-route,169.254.169.254/32,172.20.0.1,172.19.0.0/24,0.0.0.0,0.0.0.0/0,172.20.0.254 - tag:tag1,249,169.254.169.254/32,172.20.0.1,172.19.0.0/24,0.0.0.0,0.0.0.0/0,172.20.0.254 - tag:tag1,option:router,172.20.0.254 - -The above options file will result in separate routers being handed out to -separate IP subnets. Furthermore, Neutron appears to "do the right thing" with -regard to routes for other subnets on the same network. We can see that the -option "classless-static-route" is given, with pointers to both the default -route and the other subnet(s) on the same Neutron network. - -In order to modify Ironic-Inspector to use multiple subnets, we will need to -extend instack-undercloud to support network segments. There is a patch in -review to support segments in instack undercloud [0]_. - -**Potential Workaround** - -One possibility is to use an alternate method to DHCP/PXE boot, such as using -DHCP configuration directly on the router, or to configure a host on the remote -network which provides DHCP and PXE URLs, then provides routes back to the -ironic-conductor and metadata server as part of the DHCP response. - -It is not always feasible for groups doing testing or development to configure -DHCP relay on the switches. For proof-of-concept implementations of -spine-and-leaf, we may want to configure all provisioning networks to be -trunked back to the Undercloud. This would allow the Undercloud to provide DHCP -for all networks without special switch configuration. In this case, the -Undercloud would act as a router between subnets/VLANs. This should be -considered a small-scale solution, as this is not as scalable as DHCP relay. -The configuration file for dnsmasq is the same whether all subnets are local or -remote, but dnsmasq may have to listen on multiple interfaces (today it only -listens on br-ctlplane). The dnsmasq process currently runs with -``--bind-interface=tap-XXX``, but the process will need to be run with either -binding to multiple interfaces, or with ``--except-interface=lo`` and multiple -interfaces bound to the namespace. - -For proof-of-concept deployments, as well as testing environments, it might -make sense to run a DHCP relay on the Undercloud, and trunk all provisioning -VLANs back to the Undercloud. This would allow dnsmasq to listen on the tap -interface, and DHCP requests would be forwarded to the tap interface. The -downside of this approach is that the Undercloud would need to have IP -addresses on each of the trunked interfaces. - -Another option is to configure dedicated hosts or VMs to be used as DHCP relay -and router for subnets on multiple VLANs, all of which would be trunked to the -relay/router host, thus acting exactly like routing switches. - ------------- - -**Problem #2: Neutron's model for a segmented network that spans multiple L2 -domains uses the segment object to allow multiple subnets to be assigned to -the same network. This functionality needs to be integrated into the -Undercloud.** - -Possible Solutions, Ideas, or Approaches: - -1. Implement Neutron segments on the undercloud. - -The spec for Neutron routed network segments [1]_ provides a schema that we can -use to model a routed network. By implementing support for network segments, we -can provide assign Ironic nodes to networks on routed subnets. This allows us -to continue to use Neutron for IP address management, as ports are assigned by -Neutron and tracked in the Neutron database on the Undercloud. See approach #1 -below. - -2. Multiple Neutron networks (1 set per rack), to model all L2 segments. - -By using a different set of networks in each rack, this provides us with -the flexibility to use different network architectures on a per-rack basis. -Each rack could have its own set of networks, and we would no longer have -to provide all networks in all racks. Additionally, a split-datacenter -architecture would naturally have a different set of networks in each -site, so this approach makes sense. This is detailed in approach #2 below. - -3. Multiple subnets per Neutron network. - -This is probably the best approach for provisioning, since Neutron is -already able to handle DHCP relay with multiple subnets as part of the -same network. Additionally, this allows a clean separation between local -subnets associated with provisioning, and networks which are used -in the overcloud, such as External networks in two different datacenters). -This is covered in more detail in approach #3 below. - -4. Use another system for IPAM, instead of Neutron. - -Although we could use a database, flat file, or some other method to keep -track of IP addresses, Neutron as an IPAM back-end provides many integration -benefits. Neutron integrates DHCP, hardware switch port configuration (through -the use of plugins), integration in Ironic, and other features such as -IPv6 support. This has been deemed to be infeasible due to the level of effort -required in replacing both Neutron and the Neutron DHCP server (dnsmasq). - -**Approaches to Problem #2:** - -Approach 1 (Implement Neutron segments on the Undercloud): - -The Neutron segments model provides a schema in Neutron that allows us to -model the routed network. Using multiple subnets provides the flexibility -we need without creating exponentially more resources. We would create the same -provisioning network that we do today, but use multiple segments associated -to different routed subnets. The disadvantage to this approach is that it makes -it impossible to represent network VLANs with more than one IP subnet (Neutron -technically supports more than one subnet per port). Currently TripleO only -supports a single subnet per isolated network, so this should not be an issue. - -Approach 2 (Multiple Neutron networks (1 set per rack), to model all L2 segments): - -We will be using multiple networks to represent isolated networks in multiple -L2 domains. One sticking point is that although Neutron will configure multiple -routes for multiple subnets within a given network, we need to be able to both -configure static IPs and routes, and be able to scale the network by adding -additional subnets after initial deployment. - -Since we control addresses and routes on the host nodes using a -combination of Heat templates and os-net-config, it is possible to use -static routes to supernets to provide L2 adjacency. This approach only -works for non-provisioning networks, since we rely on Neutron DHCP servers -providing routes to adjacent subnets for the provisioning network. - -Example: -Suppose 2 subnets are provided for the Internal API network: ``172.19.1.0/24`` -and ``172.19.2.0/24``. We want all Internal API traffic to traverse the Internal -API VLANs on both the controller and a remote compute node. The Internal API -network uses different VLANs for the two nodes, so we need the routes on the -hosts to point toward the Internal API gateway instead of the default gateway. -This can be provided by a supernet route to 172.19.x.x pointing to the local -gateway on each subnet (e.g. 172.19.1.1 and 172.19.2.1 on the respective -subnets). This could be represented in os-net-config with the following:: - - - - type: interface - name: nic3 - addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} - routes: - - - ip_netmask: {get_param: InternalApiSupernet} - next_hop: {get_param: InternalApiRouter} - -Where InternalApiIpSubnet is the IP address on the local subnet, -InternalApiSupernet is '172.19.0.0/16', and InternalApiRouter is either -172.19.1.1 or 172.19.2.1 depending on which local subnet the host belongs to. - -The end result of this is that each host has a set of IP addresses and routes -that isolate traffic by function. In order for the return traffic to also be -isolated by function, similar routes must exist on both hosts, pointing to the -local gateway on the local subnet for the larger supernet that contains all -Internal API subnets. - -The downside of this is that we must require proper supernetting, and this may -lead to larger blocks of IP addresses being used to provide ample space for -scaling growth. For instance, in the example above an entire /16 network is set -aside for up to 255 local subnets for the Internal API network. This could be -changed into a more reasonable space, such as /18, if the number of local -subnets will not exceed 64, etc. This will be less of an issue with native IPv6 -than with IPv4, where scarcity is much more likely. - -Approach 3 (Multiple subnets per Neutron network): - -The approach we will use for the provisioning network will be to use multiple -subnets per network, using Neutron segments. This will allow us to take -advantage of Neutron's ability to support multiple networks with DHCP relay. -The DHCP server will supply the necessary routes via DHCP until the nodes are -configured with a static IP post-deployment. - ---------- - -**Problem #3: Ironic introspection DHCP doesn't yet support DHCP relay** - -This makes it difficult to do introspection when the hosts are not on the same L2 -domain as the controllers. Patches are either merged or in review to support -DHCP relay. - -Possible Solutions, Ideas, or Approaches: - -1. A patch to support a dnsmasq PXE filter driver has been merged. This will - allow us to support selective DHCP when using DHCP relay (where the packet - is not coming from the MAC of the host but rather the MAC of the switch) - [12]_. - -2. A patch has been merged to puppet-ironic to support multiple DHCP subnets - for Ironic Inspector [13]_. - -3. A patch is in review to add support for multiple subnets for the - provisioning network in the instack-undercloud scripts [14]_. - -For more information about solutions, please refer to the -tripleo-routed-networks-ironic-inspector blueprint [5]_ and spec [6]_. - -------- - -**Problem #4: The IP addresses on the provisioning network need to be -static IPs for production.** - -Possible Solutions, Ideas, or Approaches: - -1. Dan Prince wrote a patch [9]_ in Newton to convert the ctlplane network - addresses to static addresses post-deployment. This will need to be - refactored to support multiple provisioning subnets across routers. - -Solution Implementation - -This work is done and merged for the legacy use case. During the -initial deployment, the nodes receive their IP address via DHCP, but during -Heat deployment the os-net-config script is called, which writes static -configuration files for the NICs with static IPs. - -This work will need to be refactored to support assigning IPs from the -appropriate subnet, but the work will be part of the TripleO Heat Template -refactoring listed in Problems #6, and #7 below. - -For the deployment model where the IPs are specified (ips-from-pool-all.yaml), -we need to develop a model where the Control Plane IP can be specified -on multiple deployment subnets. This may happen in a later cycle than the -initial work being done to enable routed networks in TripleO. For more -information, reference the tripleo-predictable-ctlplane-ips blueprint [7]_ -and spec [8]_. - ------- - -**Problem #5: Heat Support For Routed Networks** - -The Neutron routed networks extensions were only added in recent releases, and -there was a dependency on TripleO Heat Templates. - -Possible Solutions, Ideas or Approaches: - -1. Add the required objects to Heat. At minimum, we will probably have to - add ``OS::Neutron::Segment``, which represents layer 2 segments, the - ``OS::Neutron::Network`` will be updated to support the ``l2-adjacency`` - attribute, ``OS::Neutron::Subnet`` and ``OS::Neutron:port`` would be extended - to support the ``segment_id`` attribute. - -Solution Implementation: - -Heat now supports the OS::Neutron::Segment resource. For example:: - - heat_template_version: 2015-04-30 - ... - resources: - ... - the_resource: - type: OS::Neutron::Segment - properties: - description: String - name: String - network: String - network_type: String - physical_network: String - segmentation_id: Integer - -This work has been completed in Heat with this review [15]_. - ------- - -**Problem #6: Static IP assignment: Choosing static IPs from the correct -subnet** - -Some roles, such as Compute, can likely be placed in any subnet, but we will -need to keep certain roles co-located within the same set of L2 domains. For -instance, whatever role is providing Neutron services will need all controllers -in the same L2 domain for VRRP to work properly. - -The network interfaces will be configured using templates that create -configuration files for os-net-config. The IP addresses that are written to each -node's configuration will need to be on the correct subnet for each host. In -order for Heat to assign ports from the correct subnets, we will need to have a -host-to-subnets mapping. - -Possible Solutions, Ideas or Approaches: - -1. The simplest implementation of this would probably be a mapping of role/index - to a set of subnets, so that it is known to Heat that Controller-1 is in - subnet set X and Compute-3 is in subnet set Y. -2. We could associate particular subnets with roles, and then use one role - per L2 domain (such as per-rack). -3. The roles and templates should be refactored to allow for dynamic IP - assignment within subnets associated with the role. We may wish to evaluate - the possibility of storing the routed subnets in Neutron using the routed - networks extensions that are still under development. This would provide - additional flexibility, but is probably not required to implement separate - subnets in each rack. -4. A scalable long-term solution is to map which subnet the host is on - during introspection. If we can identify the correct subnet for each - interface, then we can correlate that with IP addresses from the correct - allocation pool. This would have the advantage of not requiring a static - mapping of role to node to subnet. In order to do this, additional - integration would be required between Ironic and Neutron (to make Ironic - aware of multiple subnets per network, and to add the ability to make - that association during introspection). - -Solution Impelementation: - -Solutions 1 and 2 above have been implemented in the "composable roles" series -of patches [16]_. The initial implementation uses separate Neutron networks -for different L2 domains. These templates are responsible for assigning the -isolated VLANs used for data plane and overcloud control planes, but does not -address the provisioning network. Future work may refactor the non-provisioning -networks to use segments, but for now non-provisioning networks must use -different networks for different roles. - -Ironic autodiscovery may allow us to determine the subnet where each node -is located without manual entry. More work is required to automate this -process. - ------- - -**Problem #7: Isolated Networking Requires Static Routes to Ensure Correct VLAN -is Used** - -In order to continue using the Isolated Networks model, routes will need to be -in place on each node, to steer traffic to the correct VLAN interfaces. The -routes are written when os-net-config first runs, but may change. We -can't just rely on the specific routes to other subnets, since the number of -subnets will increase or decrease as racks are added or taken away. Rather than -try to deal with constantly changing routes, we should use static routes that -will not need to change, to avoid disruption on a running system. - -Possible Solutions, Ideas or Approaches: - -1. Require that supernets are used for various network groups. For instance, - all the Internal API subnets would be part of a supernet, for instance - 172.17.0.0/16 could be used, and broken up into many smaller subnets, such - as /24. This would simplify the routes, since only a single route for - 172.17.0.0/16 would be required pointing to the local router on the - 172.17.x.0/24 network. -2. Modify os-net-config so that routes can be updated without bouncing - interfaces, and then run os-net-config on all nodes when scaling occurs. - A review for this functionality was considered and abandeded [3]_. - The patch was determined to have the potential to lead to instability. - -os-net-config configures static routes for each interface. If we can keep the -routing simple (one route per functional network), then we would be able to -isolate traffic onto functional VLANs like we do today. - -It would be a change to the existing workflow to have os-net-config run on -updates as well as deployment, but if this were a non-impacting event (the -interfaces didn't have to be bounced), that would probably be OK. - -At a later time, the possibility of using dynamic routing should be considered, -since it reduces the possibility of user error and is better suited to -centralized management. SDN solutions are one way to provide this, or other -approaches may be considered, such as setting up OVS tunnels. - -Proposed Change -=============== -The proposed changes are discussed below. - -Overview --------- - -In order to provide spine-and-leaf networking for deployments, several changes -will have to be made to TripleO: - -1. Support for DHCP relay in Ironic and Neutron DHCP servers. Implemented in - patch [15]_ and the patch series [17]_. -2. Refactoring of TripleO Heat Templates network isolation to support multiple - subnets per isolated network, as well as per-subnet and supernet routes. - The bulk of this work is done in the patch series [16]_ and in patch [10]_. -3. Changes to Infra CI to support testing. -4. Documentation updates. - -Alternatives ------------- - -The approach outlined here is very prescriptive, in that the networks must be -known ahead of time, and the IP addresses must be selected from the appropriate -pool. This is due to the reliance on static IP addresses provided by Heat. - -One alternative approach is to use DHCP servers to assign IP addresses on all -hosts on all interfaces. This would simplify configuration within the Heat -templates and environment files. Unfortunately, this was the original approach -of TripleO, and it was deemed insufficient by end-users, who wanted stability -of IP addresses, and didn't want to have an external dependency on DHCP. - -Another approach is to use the DHCP server functionality in the network switch -infrastructure in order to PXE boot systems, then assign static IP addresses -after the PXE boot is done via DHCP. This approach only solves for part of the -requirement: the net booting. It does not solve the desire to have static IP -addresses on each network. This could be achieved by having static IP addresses -in some sort of per-node map. However, this approach is not as scalable as -programatically determining the IPs, since it only applies to a fixed number of -hosts. We want to retain the ability of using Neutron as an IP address -management (IPAM) back-end, ideally. - -Another approach which was considered was simply trunking all networks back -to the Undercloud, so that dnsmasq could respond to DHCP requests directly, -rather than requiring a DHCP relay. Unfortunately, this has already been -identified as being unacceptable by some large operators, who have network -architectures that make heavy use of L2 segregation via routers. This also -won't work well in situations where there is geographical separation between -the VLANs, such as in split-site deployments. - -Security Impact ---------------- - -One of the major differences between spine-and-leaf and standard isolated -networking is that the various subnets are connected by routers, rather than -being completely isolated. This means that without proper ACLs on the routers, -networks which should be private may be opened up to outside traffic. - -This should be addressed in the documentation, and it should be stressed that -ACLs should be in place to prevent unwanted network traffic. For instance, the -Internal API network is sensitive in that the database and message queue -services run on that network. It is supposed to be isolated from outside -connections. This can be achieved fairly easily if *supernets* are used, so -that if all Internal API subnets are a part of the ``172.19.0.0/16`` supernet, -an ACL rule will allow only traffic between Internal API IPs (this is a -simplified example that could be applied to any Internal API VLAN, or as a -global ACL):: - - allow traffic from 172.19.0.0/16 to 172.19.0.0/16 - deny traffic from * to 172.19.0.0/16 - -Other End User Impact ---------------------- - -Deploying with spine-and-leaf will require additional parameters to -provide the routing information and multiple subnets required. This will have -to be documented. Furthermore, the validation scripts may need to be updated -to ensure that the configuration is validated, and that there is proper -connectivity between overcloud hosts. - -Performance Impact ------------------- - -Much of the traffic that is today made over layer 2 will be traversing layer -3 routing borders in this design. That adds some minimal latency and overhead, -although in practice the difference may not be noticeable. One important -consideration is that the routers must not be too overcommitted on their -uplinks, and the routers must be monitored to ensure that they are not acting -as a bottleneck, especially if complex access control lists are used. - -Other Deployer Impact ---------------------- - -A spine-and-leaf deployment will be more difficult to troubleshoot than a -deployment that simply uses a set of VLANs. The deployer may need to have -more network expertise, or a dedicated network engineer may be needed to -troubleshoot in some cases. - -Developer Impact ----------------- - -Spine-and-leaf is not easily tested in virt environments. This should be -possible, but due to the complexity of setting up libvirt bridges and -routes, we may want to provide a simulation of spine-and-leaf for use in -virtual environments. This may involve building multiple libvirt bridges -and routing between them on the Undercloud, or it may involve using a -DHCP relay on the virt-host as well as routing on the virt-host to simulate -a full routing switch. A plan for development and testing will need to be -developed, since not every developer can be expected to have a routed -environment to work in. It may take some time to develop a routed virtual -environment, so initial work will be done on bare metal. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Dan Sneddon - -Approver(s) ------------ - -Primary approver: - Emilien Macchi - -Work Items ----------- - -1. Add static IP assignment to Control Plane [DONE] -2. Modify Ironic Inspector ``dnsmasq.conf`` generation to allow export of - multiple DHCP ranges, as described in Problem #1 and Problem #3. -3. Evaluate the Routed Networks work in Neutron, to determine if it is required - for spine-and-leaf, as described in Problem #2. -4. Add OS::Neutron::Segment and l2-adjacency support to Heat, as described - in Problem #5. This may or may not be a dependency for spine-and-leaf, based - on the results of work item #3. -5. Modify the Ironic-Inspector service to record the host-to-subnet mappings, - perhaps during introspection, to address Problem #6. -6. Add parameters to Isolated Networking model in Heat to support supernet - routes for individual subnets, as described in Problem #7. -7. Modify Isolated Networking model in Heat to support multiple subnets, as - described in Problem #8. -8. Add support for setting routes to supernets in os-net-config NIC templates, - as described in the proposed solution to Problem #2. -9. Implement support for iptables on the Controller, in order to mitigate - the APIs potentially being reachable via remote routes. Alternatively, - document the mitigation procedure using ACLs on the routers. -10. Document the testing procedures. -11. Modify the documentation in tripleo-docs to cover the spine-and-leaf case. - - -Implementation Details ----------------------- - -Workflow: - -1. Operator configures DHCP networks and IP address ranges -2. Operator imports baremetal instackenv.json -3. When introspection or deployment is run, the DHCP server receives the DHCP - request from the baremetal host via DHCP relay -4. If the node has not been introspected, reply with an IP address from the - introspection pool* and the inspector PXE boot image -5. If the node already has been introspected, then the server assumes this is - a deployment attempt, and replies with the Neutron port IP address and the - overcloud-full deployment image -6. The Heat templates are processed which generate os-net-config templates, and - os-net-config is run to assign static IPs from the correct subnets, as well - as routes to other subnets via the router gateway addresses. - -* The introspection pool will be different for each provisioning subnet. - -When using spine-and-leaf, the DHCP server will need to provide an introspection -IP address on the appropriate subnet, depending on the information contained in -the DHCP relay packet that is forwarded by the segment router. dnsmasq will -automatically match the gateway address (GIADDR) of the router that forwarded -the request to the subnet where the DHCP request was received, and will respond -with an IP and gateway appropriate for that subnet. - -The above workflow for the DHCP server should allow for provisioning IPs on -multiple subnets. - -Dependencies -============ - -There may be a dependency on the Neutron Routed Networks. This won't be clear -until a full evaluation is done on whether we can represent spine-and-leaf -using only multiple subnets per network. - -There will be a dependency on routing switches that perform DHCP relay service -for production spine-and-leaf deployments. - -Testing -======= - -In order to properly test this framework, we will need to establish at least -one CI test that deploys spine-and-leaf. As discussed in this spec, it isn't -necessary to have a full routed bare metal environment in order to test this -functionality, although there is some work to get it working in virtual -environments such as OVB. - -For bare metal testing, it is sufficient to trunk all VLANs back to the -Undercloud, then run DHCP proxy on the Undercloud to receive all the -requests and forward them to br-ctlplane, where dnsmasq listens. This -will provide a substitute for routers running DHCP relay. For Neutron -DHCP, some modifications to the iptables rule may be required to ensure -that all DHCP requests from the overcloud nodes are received by the -DHCP proxy and/or the Neutron dnsmasq process running in the dhcp-agent -namespace. - -Documentation Impact -==================== - -The procedure for setting up a dev environment will need to be documented, -and a work item mentions this requirement. - -The TripleO docs will need to be updated to include detailed instructions -for deploying in a spine-and-leaf environment, including the environment -setup. Covering specific vendor implementations of switch configurations -is outside this scope, but a specific overview of required configuration -options should be included, such as enabling DHCP relay (or "helper-address" -as it is also known) and setting the Undercloud as a server to receive -DHCP requests. - -The updates to TripleO docs will also have to include a detailed discussion -of choices to be made about IP addressing before a deployment. If supernets -are to be used for network isolation, then a good plan for IP addressing will -be required to ensure scalability in the future. - -References -========== - -.. [0] `Review: TripleO Heat Templates: Tripleo routed networks ironic inspector, and Undercloud `_ -.. [1] `Spec: Routed Networks for Neutron `_ -.. [3] `Review: Modify os-net-config to make changes without bouncing interface `_ -.. [5] `Blueprint: Modify TripleO Ironic Inspector to PXE Boot Via DHCP Relay `_ -.. [6] `Spec: Modify TripleO Ironic Inspector to PXE Boot Via DHCP Relay `_ -.. [7] `Blueprint: User-specifiable Control Plane IP on TripleO Routed Isolated Networks `_ -.. [8] `Spec: User-specifiable Control Plane IP on TripleO Routed Isolated Networks `_ -.. [9] `Review: Configure ctlplane network with a static IP `_ -.. [10] `Review: Neutron: Make "on-link" routes for subnets optional `_ -.. [11] `Review: Ironic Inspector: Make "on-link" routes for subnets optional `_ -.. [12] `Review: Ironic Inspector: Introducing a dnsmasq PXE filter driver `_ -.. [13] `Review: Multiple DHCP Subnets for Ironic Inspector `_ -.. [14] `Review: Instack Undercloud: Add support for multiple inspection subnets `_ -.. [15] `Review: DHCP Agent: Separate local from non-local subnets `_ -.. [16] `Review Series: topic:bp/composable-networks `_ -.. [17] `Review Series: project:openstack/networking-baremetal `_ diff --git a/specs/queens/tripleo_ansible_upgrades_workflow.rst b/specs/queens/tripleo_ansible_upgrades_workflow.rst deleted file mode 100644 index ca3839ee..00000000 --- a/specs/queens/tripleo_ansible_upgrades_workflow.rst +++ /dev/null @@ -1,190 +0,0 @@ - -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================================== -TripleO - Ansible upgrade Worklow with UI integration -========================================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/major-upgrade-workflow - -During the Pike cycle the minor update and some parts of the major upgrade -are significantly different to any previous cycle, in that they are *not* being -delivered onto nodes via Heat stack update. Rather, Heat stack update is used -to only collect, but not execute, the relevant ansible tasks defined in each -of the service manifests_ as upgrade_tasks_ or update_tasks_ accordingly. -These tasks are then written as stand-alone ansible playbooks in the stack -outputs_. - -These 'config' playbooks are then downloaded using the *openstack overcloud -config download* utility_ and finally executed to deliver the actual -upgrade or update. See bugs 1715557_ and 1708115_ for more information -(or pointers/reviews) about this mechanism as used during the P cycle. - -For Queens and as discussed at the Denver PTG_ we aim to extend this approach -to include the controlplane upgrade too. That is, instead of using HEAT -SoftwareConfig and Deployments_ to invoke_ ansible we should instead collect -the upgrade_tasks for the controlplane nodes into ansible playbooks that can -then be invoked to deliver the actual upgrade. - -Problem Description -=================== - -Whilst it has continually improved in each cycle, complexity and difficulty to -debug or understand what has been executed at any given point of the upgrade -is still one of the biggest complaints from operators about the TripleO -upgrades workflow. In the P cycle and as discussed above, the minor version -update and some part of the 'non-controller' upgrade have already moved to the -model being proposed here, i.e. generate ansible-playbooks with an initial heat -stack update and then execute them. - -If we are to use this approach for all parts of the upgrade, including for the -controlplane services then we will also need a mistral workbook that can handle -the download and execution of the ansible-playbook invocations. With this kind -of ansible driven workflow, executed by mistral action/workbook, we can for -the first time consider integration with the UI for upgrade/updates. This -aligns well with the effort_ by the UI team for feature parity in CLI/UI for -Queens. It should also be noted that there is already some work underway to -adding the required mistral actions, at least for the minor update for Pike -deployments in changes 487488_ and 487496_ - -Implementing a fully ansible-playbook delivered workflow for the entire major -upgrade workflow will offer a number of benefits: - - * very short initial heat stack update to generate the playbooks - * easier to follow and understand what is happening at a given step of the upgrade - * easier to debug and re-run any particular step of the upgrade - * implies full python-tripleoclient and mistral workbook support for the - ansible-playbook invocations. - * can consider integrating upgrades/updates into the UI, for the first time - -Proposed Change -=============== - -We will need an initial heat stack update to populate the -upgrade_tasks_playbook into the overcloud stack output (the cli is just a -suggestion): - - * openstack overcloud upgrade --init --init-commands [ "sudo curl -L -o /etc/yum.repos.d/delorean-pike.repo https://trunk.rdoproject.org/centos7-ocata/current/pike.repo", - "sudo yum install my_package", ... ] - -The first step of the upgrade will be used to deliver any required common -upgrade initialisation, such as switching repos to the target version, -installing any new packages required during the upgrade, and populating the upgrades playbooks. - -Then the operator will run the upgrade targeting specific nodes: - - * openstack overcloud upgrade --nodes [overcloud-novacompute-0, overcloud-novacompute-1] or - openstack overcloud upgrade --nodes "Compute" - -Download and execute the ansible playbooks on particular specified set of -nodes. Ideally we will make it possible to specify a role name with the -playbooks being invoked in a rolling fashion on each node. - -One of the required changes is to convert all the service templates to have -'when' conditionals instead of the current 'stepN'. For Pike we did this in -the client_ to avoid breaking the heat driven upgrade workflow still in use -for the controlplane during the Ocata to Pike upgrade. This will allow us to -use the 'ansible-native' loop_ control we are currently using in the generated -ansible playbooks. - - -Other End User Impact ---------------------- - -There will be significant changes to the workflow and cli the operator uses -for the major upgrade as documented above. - -Performance Impact ------------------- - -The initial Heat stack update will not deliver any of the puppet or docker -config to nodes since the DeploymentSteps will be disabled_ as we currently -do for Pike minor update. This will mean a much shorter heat stack update - -exact numbers TBD but 'minutes not hours'. - -Developer Impact ----------------- - -Should make it easier for developers to debug particular parts of the upgrades -workflow. - - -Implementation -============== - -Assignee(s) ------------ -Contributors: -Marios Andreou (marios) -Mathieu Bultel (matbu) -Sofer Athlang Guyot (chem) -Steve Hardy (shardy) -Carlos Ccamacho (ccamacho) -Jose Luis Franco Arza (jfrancoa) -Marius Cornea (mcornea) -Yurii Prokulevych (yprokule) -Lukas Bezdicka (social) -Raviv Bar-Tal (rbartal) -Amit Ugol (amitu) - -Work Items ----------- - - * Remove steps and add when for all the ansible upgrade tasks, minor - update tasks, deployment steps, post_upgrade_tasks - * Need mistral workflows that can invoke the required stages of the - workflow (--init and --nodes). There is some existing work in this - direction in 463765_. - * CLI/python-tripleoclient changes required. Related to the previous - item there is some work started on this in 463728_. - * UI work - we will need to collaborate with the UI team for the - integration. We have never had UI driven upgrade or updates. - * CI: Implement a simple job (one nodes, just controller, which does the - heat-setup-output and run ansible --nodes Controller) with keystone - only upgrade. Then iterate on this as we can add service upgrade_tasks. - * Docs! - -Testing -======= - -We will aim to land a 'keystone-only' job asap which will be updated as the various -changes required to deliver this spec are closer to merging. For example we -may deploy only a very small subset of services (e.g. first keystone) and then iterate as changes -related to this spec are proposed. - -Documentation Impact -==================== - -We should also track changes in the documented upgrades workflow since as -described here it is going to change significantly both internally as well as -the interface exposed to an operator. - -References -========== -Check the source_ for links - -.. _manifests: https://github.com/openstack/tripleo-heat-templates/tree/master/docker/services -.. _upgrade_tasks: https://github.com/openstack/tripleo-heat-templates/blob/211d7f32dc9cda261e96c3f5e0e1e12fc0afdbb5/docker/services/nova-compute.yaml#L147 -.. _update_tasks: https://github.com/openstack/tripleo-heat-templates/blob/60f3f10442f3b4cedb40def22cf7b6938a39b391/puppet/services/tripleo-packages.yaml#L59 -.. _outputs: https://github.com/openstack/tripleo-heat-templates/blob/3dcc9b30e9991087b9e898e25685985df6f94361/common/deploy-steps.j2#L324-L372 -.. _utility: https://github.com/openstack/python-tripleoclient/blob/27bba766daa737a56a8d884c47cca1c003f16e3f/tripleoclient/v1/overcloud_config.py#L26-L154 -.. _1715557: https://bugs.launchpad.net/tripleo/+bug/1715557 -.. _1708115: https://bugs.launchpad.net/tripleo/+bug/1708115 -.. _PTG: https://etherpad.openstack.org/p/tripleo-ptg-queens-upgrades -.. _Deployments: https://github.com/openstack/tripleo-heat-templates/blob/f4730632a51dec2b9be6867d58184fac3b8a11a5/common/major_upgrade_steps.j2.yaml#L132-L173 -.. _invoke: https://github.com/openstack/tripleo-heat-templates/blob/f4730632a51dec2b9be6867d58184fac3b8a11a5/puppet/upgrade_config.yaml#L21-L50 -.. _effort: http://lists.openstack.org/pipermail/openstack-dev/2017-September/122089.html -.. _487488: https://review.openstack.org/#/c/487488/ -.. _487496: https://review.openstack.org/#/c/487496/ -.. _client: https://github.com/openstack/python-tripleoclient/blob/4d342826d6c3db38ee88dccc92363b655b1161a5/tripleoclient/v1/overcloud_config.py#L63 -.. _loop: https://github.com/openstack/tripleo-heat-templates/blob/fe2acfc579295965b5f39c5ef7a34bea35f3d6bf/common/deploy-steps.j2#L364-L365 -.. _disabled: https://review.openstack.org/#/c/487496/21/tripleo_common/actions/package_update.py@63 -.. _source: https://raw.githubusercontent.com/openstack/tripleo-specs/master/specs/queens/tripleo_ansible_upgrades_workflow.rst -.. _463728: https://review.openstack.org/#/c/463728/ -.. _463765: https://review.openstack.org/#/c/463765/ diff --git a/specs/queens/triplo-ovs-hw-offload.rst b/specs/queens/triplo-ovs-hw-offload.rst deleted file mode 100644 index 03e4b8f6..00000000 --- a/specs/queens/triplo-ovs-hw-offload.rst +++ /dev/null @@ -1,141 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Adding OVS Hardware Offload to TripleO -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ovs-hw-offload - -OVS Hardware Offload leverages SR-IOV technology to control the SR-IOV -VF using VF representor port. OVS 2.8.0 supports the hw-offload option which -allows to offload OVS datapath rule to hardware using linux traffic control -tool and the VF representor port. This feature accelerates the OVS -with a SR-IOV NIC which support switchdev mode. - -Problem Description -=================== - -Today the installation and configuration of OVS hardware offload feature is -done manually after overcloud deployment. It shall be automated via tripleo. - -Proposed Change -=============== - -Overview --------- - -* Configure the SR-IOV NIC to be in switchdev mode using the following - syntax :: for NeutronSriovNumVFs. - mode can be legacy or switchdev -* Configure the OVS with other_config:hw-offload. The options can - be added for the cluster without side effects, because if then NIC doesn't - support OVS will fall-back to kernel datapath. - -* Nova scheduler should be configured to use the PciPassthroughFilter - (same SR-IOV) -* Nova compute should be configured with passthrough_whitelist (same SR-IOV) - -Alternatives ------------- - -None - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -* OVS Hardware Offload leverage the SR-IOV technology to provides near - native I/O performance for each virtual machine that managed by OpenVswitch. - -Other Deployer Impact ---------------------- - -* The operator shall ensure that the BIOS supports VT-d/IOMMU virtualization - technology on the compute nodes. - -* IOMMU needs to be enabled in the Compute+SR-IOV nodes. Boot parameters - (intel_iommu=on or amd_iommu=pt) shall be added in the grub.conf, using the - PreNetworkConfig. - -* Post deployment, operator shall - - * Create neutron ports prior to creating VM’s (nova boot) - openstack port create --vnic-type direct --binding-profile '{"capabilities": ["switchdev"]}' port1 - - * Create the VM with the required flavor and SR-IOV port id - openstack server create --image cirros-mellanox_sriov --port=port1 --flavor m1.tiny vm_a1 - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - waleedm (waleedm@mellanox.com - -Other contributors: - moshele (moshele@mellanox.com) - -Work Items ----------- - -* Update tripleo::host::sriov::numvfs_persistence to allow configure SR-IOV - in switchdev mode. extending the vf_defs to - ::. Mode can be legacy which is default - SR-IOV or switchdev which is used for ovs hardware offload. -* Add a template parameter called NeutronOVSHwOffload to enable. -* provide environment YAML for OVS hardware offload in tripleo-heat-templates. - -Dependencies -============ - -None - - -Testing -======= - -* Since SR-IOV needs specific hardware support, this feature can be tested - under third party CI. We hope to provide Mellanox CI to SR-IOV and this - feature. - -Documentation Impact -==================== - -None - -References -========== - -* Introduction to SR-IOV - http://goo.gl/m7jP3 - -* SR-IOV OVS hardware offload netdevconf - http://netdevconf.org/1.2/papers/efraim-gerlitz-sriov-ovs-final.pdf - -* OVS hardware offload in OpenVswitch - https://mail.openvswitch.org/pipermail/ovs-dev/2017-April/330606.html - -* OpenStack OVS mechanism driver support in neutron/nova/os-vif - https://review.openstack.org/#/c/398265/ - https://review.openstack.org/#/c/275616/ - https://review.openstack.org/#/c/460278/ diff --git a/specs/rocky/custom-validations.rst b/specs/rocky/custom-validations.rst deleted file mode 100644 index 374fc979..00000000 --- a/specs/rocky/custom-validations.rst +++ /dev/null @@ -1,160 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Add Support for Custom TripleO Validations -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/custom-validations - -All validations are currently stored in a single directory. This makes -it inconvenient to try and write new validations, update from a remote -repository or to add an entirely new (perhaps private) source. - - -Problem Description -=================== - -* The deployer wants to develop and test their own validations in a - personal checkout without risking changes to the default ones. - -* The deployer wants to use a stable release of TripleO but consume - the latest validations because they are non-disruptive and check for - more stuff. - -* A third party has developed validations specific to their product - that they don't want to or can't include in the tripleo-validations - repository. - - - -Proposed Change -=============== - -Overview --------- - -We will store a default set of TripleO validations in a Swift container called -``tripleo-validations``. These will be shared across all plans and are not -expected to be updated by the deployer. This container should be created on -initial undercloud deployment. - -We will provide a mechanism for deployers to add a custom set of validations -per deployment plan. These plan-specific validations will be stored in a -``custom-validations`` subdirectory in the plan's Swift container. Storing them -together with the plan makes sense as these validations can be specific to -particular deployment plan configuration, as well as makes the import/export -easier. - -Since custom validation will be stored as part of the plan, no additional -workflows/actions to perform CRUD operations for them will be necessary; we can -simply use the existing plan create/update for this purpose. - -The validation Mistral actions (e.g. ``list`` and ``run_validation``) -will need to be updated to take into account this new structure of -validations. They will need to look for validations in the -``tripleo-validations`` Swift container (for default validations) and the -plan's ``custom-validations`` subdirectory (for custom validations), instead of -sourcing them from a directory on disk, as they are doing now. - -If a validation with the same name is found both in default in custom -validations, we will always pick the one stored in custom validations. - -.. note:: As a further iteration, we can implement validations as per-service - tasks in standalone service Ansible roles. They can then be consumed - by tripleo-heat-templates service templates. - -Alternatives ------------- - -* Do nothing. The deployers can already bring in additional - validations, it's just less convenient and potentially error-prone. - -* We could provide a know directory structure conceptually similar to - ``run-parts`` where the deployers could add their own validation - directories. - - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -In order to add their own validations, the deployer will need to -update the deployment plan by adding a ``custom-validations`` directory to it, -and making sure this directory contains the desired custom validations. The -plan update operation is already supported in the CLI and the UI. - -Performance Impact ------------------- - -Since the validation sources will now be Swift containers, downloading -validations will potentially be necessary on each run. We will have to keep an -eye on this an potentially introduce caching if this turns out to be a problem. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Testing and developing new validations in both development and -production environments will be easier with this change. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees: - * akrivoka - -Other contributors: - * florianf - -Work Items ----------- -* Move to using Swift as default storage for tripleo-validations ([1]_). - -* Update ``load_validations`` and ``find_validation`` functions to - read validations from all the sources specified in this document. - -Dependencies -============ - -In order to be able to implement this new functionality, we first need to have -the validations use Swift as the default storage. In other words, this spec -depends on the blueprint [1]_. - -Testing -======= - -The changes will be unit-tested in all the tripleo repos that related -changes land in (tripleo-common, instack-undercloud, tripleo-heat-templates, -etc). - -We could also add a new CI scenario that would have a custom-validations -directory within a plan set up. - - -Documentation Impact -==================== - -We will need to document the format of the new custom-validations plan -subdirectory and the new behaviour this will introduce. - - -References -========== - -.. [1] https://blueprints.launchpad.net/tripleo/+spec/store-validations-in-swift diff --git a/specs/rocky/logging-stdout.rst b/specs/rocky/logging-stdout.rst deleted file mode 100644 index 1a8c6fd3..00000000 --- a/specs/rocky/logging-stdout.rst +++ /dev/null @@ -1,172 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================= -Enable logging to stdout/journald for rsyslog -============================================= - -https://blueprints.launchpad.net/tripleo/+spec/logging-stdout-rsyslog - -We can optimize the current logging implementation to take advantage -of metadata that our default logging driver (journald) adds to the -logs. - -Problem Description -=================== - -Currently, we put all the logs of the containers into a directory in -the host (/var/log/containers/). While this approach works, it relies -on mounting directories from the host itself. This makes it harder for -logging forwarders, since we need to configure them to track all those -files. With every service that we add, we end up having to write -configuration for that service for those specific files. - -Furthermore, we lose important metadata with this approach. We can -figure out what service wrote what log, but we lose the container name and ID, -which is very useful. These we can easily get just by using the default -docker logging mechanism. - -Instead of relying on the host filesystem for our logs, we can adopt a -simpler solution that both preserves important metadata that is -discarded by the current implementation and that will support most -services without requiring per-service configuration. - -Proposed Change -=============== - -Overview --------- - -The proposal is to configure containerized services to log to -stdout/stderr as is common practice for containerized applications. -This allows the logs to get picked up by the docker logging driver, -and thus we can use "docker logs" to view the logs of a service as one -would usually expect. It will also help us decouple the -containers from the host, since we will no longer be relying on host -filesystem mounts for log collection. - -In the case of services where it's difficult or not possible to log to -stdout or stderr, we will place log files in a docker volume, and this -volume will be shared with a sidecar container that will output the -logs to stdout so they are consumable by the logging drvier. This will -also apply for containers that log only to syslog (such as HAProxy). -We will stop mounting ``/dev/log`` from the host, and instead add a -sidecar container that will output the logs instead. - -Additionally, since our default logging driver is journald, we will -get all the container logs accessible via ``journalctl`` and the -journald libraries. So one would be able to do ``journalctl -CONTAINER_NAME=`` to get the logs of a specific -container on the node. Furthermore, we would get extra metadata -information for each log entry [1]. We would benefit for -getting the container name (as the ``CONTAINER_NAME`` metadata item) -and the container ID (as the ``CONTAINER_ID`` and -``CONTAINER_ID_FULL`` metadata items) from each journald log entry -without requiring extra processing. Adding extra tags to the -containers is possible [2], and would get reflected via the -``CONTAINER_TAG`` metadata entry. These tags can optionally describe the -application that emitted the logs or describe the platform that it -comes from. - -This will also make it easier for us to forward logs, since there will -be a centralized service (journald) on each host from which we can -collect the logs. When we add a new service, it will be a matter of -following the same logging pattern, and we will automatically be able -to forward those logs without requiring specific configuration to -track a new set of log files. - -With this solution in place, we need to also provide tooling to -integrate with centralized logging solutions. This will then cover -integration to the Openshift Logging Stack [3] and ViaQ [4]. We are -proposing the use of rsyslog for message collection, manipulation, and -log forwarding. This will also be done in a containerized fashion, -where rsyslog will be a "system container" that reads from the host -journal. Rsyslog will perform metadata extraction from log messages -(such as extracting the user, project, and domain from standard oslo -format logs), and will then finally forward the logs to a central -collector. - -Pluggable implementation -~~~~~~~~~~~~~~~~~~~~~~~~ - -The implementation needs to be done in a pluggable manner. This is because -end-users have already created automation based on the assumption that logs -exist in the ``/var/log/`` / ``/var/log/containers/*`` directories -that we have been providing. For this reason, logging to stdout/stderr will be -optional, and we'll keep logging to files in the host as a default for now. -This will then be optionally enabled via an environment file. - -Example -~~~~~~~ - -nova-api container: - -In the proposed solution, the standard nova logs will go to the -nova_api container's stdout/stderr. However, since we are also -interested in the apache access logs, we will then create a docker -volume where the access logs will be hosted. A sidecar container will -mount this volume, create a FIFO (named pipe) and output whatever it -gets from that file. Note that this sidecar container will need to be -started before the actual nova_api container. - -For each log file generated in the main container, we will create a -sidecar container that outputs that log. This will make it easier to -associate log messages with the originating service. - -Alternatives ------------- - -Keep logging to files in the hosts' directory. - -We can still use the current solution; however, it is not ideal as it -violates container logging best practices, relies heavily on -directories on the host (which we want to avoid) and is inconsistent -in the way we can get logging from services (some in files, some in -syslog). - -Other End User Impact ---------------------- - -Since we're not getting rid of the previous logging solution, users won't be -impacted. They will, however, get another way of getting logs and interacting -with them in the host system, and further create automation from that if -needed. - -Performance Impact ------------------- - -* TODO: Any performance considerations on getting everything to journald? - -Implementation -============== - -Primary assignees: - jaosorior - jbadiapa - larsks - -Work Items ----------- - -* Allow services to log to stdout/stderr (if possible). - -* Implement pluggable logging for each service in t-h-t. - -* Add Rsyslog container. - -Testing -======= - -TODO: Evaluate how can we log to an EFK stack in upstream CI. Do we have one -available? - -References -========== - -[1] https://docs.docker.com/engine/admin/logging/journald/ -[2] https://docs.docker.com/engine/admin/logging/log_tags/ -[3] https://docs.openshift.com/container-platform/3.5/install_config/aggregate_logging.html -[4] https://github.com/ViaQ/Main/blob/master/README-install.md diff --git a/specs/rocky/split-controlplane.rst b/specs/rocky/split-controlplane.rst deleted file mode 100644 index d4f2b53d..00000000 --- a/specs/rocky/split-controlplane.rst +++ /dev/null @@ -1,248 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================================== -TripleO Split Control Plane from Compute/Storage Support -======================================================== - -https://blueprints.launchpad.net/tripleo/+spec/split-controlplane - -This spec introduces support for a mode of deployment where the controlplane -nodes are deployed and then batches of compute/storage nodes can be added -independently. - -Problem Description -=================== - -Currently tripleo deploys all services, for all roles (groups of nodes) in -a single heat stack. This works quite well for small to medium size deployments -but for very large environments, there is considerable benefit to dividing the -batches of nodes, e.g when deploying many hundreds/thousands of compute nodes. - -* Scalability can be improved when deploying a fairly static controlplane then - adding batches of e.g compute nodes when demand requires scale out. The overhead - of updating all the nodes in every role for any scale out operation is non-trivial - and although this is somewhat mitigated by the split from heat deployed servers - to config download & ansible for configuration, making modular deployments easier - is of benefit when needing to scale deployments to very large environments. - -* Risk reduction - there are often requests to avoid any update to controlplane - nodes when adding capacity for e.g compute or storage, and modular deployments - makes this easier as no modification is required to the controalplane nodes to - e.g add compute nodes. - -This spec is not intended to cover all the possible ways achieving modular deployments, -but instead outline the requirements and give an overview of the interfaces we need to -consider to enable this flexibility. - -Proposed Change -=============== - -Overview --------- - -To enable incremental changes, I'm assuming we could still deploy the controlplane -nodes via the existing architecture, e.g Heat deploys the nodes/networks and we -then use config download to configure those nodes via ansible. - -To deploy compute nodes, we have several options: - -1. Deploy multiple "compute only" heat stacks, which would generate - ansible playbooks via config download, and consume some output data - from the controlplane stack. - -2. Deploy additional nodes via mistral, then configure them via - ansible (today this still requires heat to generate the - playbooks/inventory even if it's a transient stack). - -3. Deploy nodes via ansible, then configure them via ansible (again, - with the config download mechanism we have available today we'd - need heat to generate the configuration data). - -The above doesn't consider a "pure ansible" solution as we would have to first make ansible -role equivalents for all the composable service templates available, and that effort -is out of scope for this spec. - -Scope and Phases ----------------- - -The three items listed in the overview cover an incremental approach -and the first phase is to implement the first item. Though this item -adds an additional dependency on Heat, this is done only to allow the -desired functionality using what is available today. In future phases -any additional dependency on Heat will need to be addressed and any -changes done during the first phase should be minimal and focus on -parameter exposure between Heat stacks. Implementation of the other -items in the overview could span multiple OpenStack development cycles -and additional details may need to be addressed in future -specifications. - -If a deployer is able to do the following simple scenario, then this -specification is implemented as phase 1 of the larger feature: - -- Deploy a single undercloud with one control-plane network -- Create a Heat stack called overcloud-controllers with 0 compute nodes -- Create a Heat stack called overcloud-computes which may be used by the controllers -- Use the APIs of the controllers to boot an instance on the computes deployed from the overcloud-computes Heat stack - -In the above scenario the majority of the work involves exposing the -correct parameters between Heat stacks so that a controller node is -able to use a compute node as if it were an external service. This is -analogous to how TripleO provides a template where properties of an -external Ceph cluster may be used by TripleO to configure a service -like Cinder which uses the external Ceph cluster. - -The simple scenario above is possible without network isolation. In -the more complex workload site vs control site scenario, described -in the following section, network traffic will not be routed through -the controller. How the networking aspect of that deployment scenario -is managed will need to be addressed in a separate specification and -the overall effort will likely to span multiple OpenStack development -cycles. - -For the phase of implementation covered in this specification, the -compute nodes will be PXE booted by Ironic from the same provisioning -network as the controller nodes during deployment. Instances booted on -these compute nodes could connect to a provider network to which their -compute nodes have direct access. Alternatively these compute nodes -could be deployed with physical access to the network which hosts -the overlay networks. The resulting overcloud should look the same as -one in which the compute nodes were deployed as part of the overcloud -Heat stack. Thus, the controller and compute nodes will run the same -services they normally would regardless of if the deployment were -split between two undercloud Heat stacks. The services on the -controller and compute nodes could be composed to multiple servers -but determining the limits of composition is out of scope for the -first phase. - -Example Usecase Scenario: Workload vs Control Sites ---------------------------------------------------- - -One application of this feature includes the ability to deploy -separate workload and control sites. A control site provides -management and OpenStack API services, e.g. the Nova API and -Scheduler. A workload site provides resources needed only by the -workload, e.g. Nova compute resources with local storage in -availability zones which directly serve workload network traffic -without routing back to the control site. Though there would be -additional latency between the control site and workload site with -respect to managing instances, there would be no reason that the -workload itself could not perform adequately once running and each -workload site would have a smaller footprint. - -.. image:: ../../../../images/split-controlplane/ceph-details.png - :height: 445px - :width: 629px - :alt: Diagram of an example control site with multiple workload sites - :align: center - -This scenario is included in this specification as an example -application of the feature. This specification does not aim to address -all of the details of operating separate control and workload sites -but only to describe how the proposed feature, *deployment of -independent controlplane and compute nodes*, for TripleO could be -built upon to simplify deployment of such sites in future versions of -TripleO. For example the blueprint to make it possible to deploy -multiple Ceph clusters in the overcloud [1]_ could be applied to -provide a separate Ceph cluster per workload site, but its scope only -focuses on changes to roles in order to enable only that feature; it -is orthogonal to this proposal. - -Alternatives ------------- - -Alternatives to the incremental change outlined in the overview include reimplementing service -configuration in ansible, such that nodes can be configured via playbooks without dependency -on the existing heat+ansible architecture. Work is ongoing in this area e.g the ansible roles -to deploy services on k8s, but this spec is primarily concerned with finding an interim -solution that enables our current architecture to scale to very large deployments. - -Security Impact ---------------- - -Potentially sensitive data such as passwords will need to be shared between the controlplane -stack and the compute-only deployments. Given the admin-only nature of the undercloud I think -this is OK. - -Other End User Impact ---------------------- - -Users will have more flexibility and control with regard to how they -choose to scale their deployments. An example of this includes -separate control and workload sites as mentioned in the example use -case scenario. - -Performance Impact ------------------- - -Potentially better performance at scale, although the total time could be increased assuming -each scale out is serialized. - -Other Deployer Impact ---------------------- - -None - - -Developer Impact ----------------- - -It is already possible to deploy multiple overcloud Heat stacks from -one undercloud, but if there are parts of the TripleO tool-chain which -assume a single Heat stack, they made need to be updated. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - shardy - -Other assignees: - gfidente - fultonj - - -Work Items ----------- - -* Proof of concept showing how to deploy independent controlplane and compute nodes using already landed patches [2]_ and by overriding the EndpointMap -* If there are problems with overriding the EndpointMap, rework all-nodes-config to output the "all nodes" hieradata and vip details, such that they could span stacks -* Determine what data are missing in each stack and propose patches to expose the missing data to each stack that needs it -* Modify the proof of concept to support adding a separate and minimal ceph cluster (mon, mgr, osd) through a heat stack separate from the controller node's heat stack. -* Refine how the data is shared between each stack to improve the user experience -* Update the documentation to include an example of the new deployment method -* Retrospect and write a follow up specification covering details necessary for the next phase - - -Dependencies -============ - -None. - -Testing -======= - -Ideally scale testing will be performed to validate the scalability -aspects of this work. For the first phase, any changes done to enable -the simple scenario described under Scope and Phases will be tested -manually and the existing CI will ensure they do not break current -functionality. Changes implemented in the follow up phases could have -CI scenarios added. - -Documentation Impact -==================== - -The deployment documation will need to be updated to cover the configuration of -split controlplane environments. - -References -========== - -.. [1] `Make it possible to deploy multiple Ceph clusters in the overcloud `_ -.. [2] `Topic: topic:compute_only_stack2 `_ diff --git a/specs/rocky/tripleo-barometer-integration.rst b/specs/rocky/tripleo-barometer-integration.rst deleted file mode 100644 index b5c19f42..00000000 --- a/specs/rocky/tripleo-barometer-integration.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================================================== -Support Barometer(Software Fastpath Service Quality Metrics) Service -==================================================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-barometer-integration - -The scope of the [Barometer]_ project is to provide interfaces to support -monitoring of the NFVI. The project has plugins for telemetry frameworks -to enable the collection of platform stats and events and relay gathered -information to fault management applications or the VIM. The scope is -limited to collecting/gathering the events and stats and relaying them -to a relevant endpoint. - -The consumption of performance and traffic-related information/events -provided by this project should be a logical extension of any existing -VNF/NFVI monitoring framework. - -Problem Description -=================== - -Integration of Barometer in TripleO is a benefit for building the OPNFV platform. -The Barometer project is complementary to the Doctor project to build the fault -management framework with [Apex_Installer]_ installer which is an OPNFV installation and -deployment tool based on TripleO. - -Proposed Change -=============== - -Overview --------- - -This spec proposes changes to automate the deployment of Barometer using TripleO. - -* Add puppet-barometer package to the overcloud-full image. - -* Define Barometer Service in THT. - -* Add how and when to deploy Barometer in puppet-tripleo. - -Alternatives ------------- - -None - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -Barometer service is default disabled in a Deployment. Need to enable it -if deployer wants to use it. - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Akhila Kishore - -Work Items ----------- - -As outlined in the proposed changes. - -Dependencies -============ - -The Barometer RPM package must be in RDO repo. - -Testing -======= - -Add the test for CI scenarios. - -Documentation Impact -==================== - -The setup and configuration of the Barometer service should be documented. - -References -========== - -.. [Barometer] https://wiki.opnfv.org/display/fastpath/Barometer+Home -.. [Apex_Installer] https://wiki.opnfv.org/display/apex diff --git a/specs/rocky/tripleo-ha-utils.rst b/specs/rocky/tripleo-ha-utils.rst deleted file mode 100644 index 3ef95fb5..00000000 --- a/specs/rocky/tripleo-ha-utils.rst +++ /dev/null @@ -1,143 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================= -TripleO tools for testing HA deployments -============================================= - -We need a way to verify a Highly Available TripleO deployment with proper tests -that check if the HA bits are behaving correctly. - -Problem Description -=================== - -Currently, we test HA behavior of TripleO deployments only by deploying -environments with three controllers and see if we're able to spawn an instance, -but this is not enough. - -There should be a way to verify the HA capabilities of deployments, and if the -behavior of the environment is still correct after inducted failures, -simulated outages and so on. - -This tool should be a standalone component to be included by the user if -necessary, without breaking any of the dynamics present in TripleO. - -Proposed Change -=============== - -Overview --------- - -The proposal is to create an Ansible based project named tripleo-ha-utils that -will be consumable by the various tools that we use to deploy TripleO -environments like tripleo-quickstart or infrared or by manual deployments. - -The project will initially cover three principal roles: - -* **stonith-config**: a playbook used to automate the creation of fencing - devices in the overcloud; -* **instance-ha**: a playbook that automates the seventeen manual steps needed - to configure instance HA in the overcloud, test them via rally and verify - that instance HA works appropriately; -* **validate-ha**: a playbook that runs a series of disruptive actions in the - overcloud and verifies it always behaves correctly by deploying a - heat-template that involves all the overcloud components; - -Today the project exists outside the TripleO umbrella, and it is named -tripleo-quickstart-utils [1] (see "Alternatives" for the historical reasons of -this name). It is used internally inside promotion pipelines, and has -also been tested with success in RDOCloud. - -Pluggable implementation -~~~~~~~~~~~~~~~~~~~~~~~~ - -The base principle of the project is to give people the ability to integrate -the first roles with whatever kind of test. For example, today we're using -a simple bash framework to interact with the cluster (so pcs commands and -other interactions), rally to test instance-ha and Ansible itself to simulate -full power outage scenarios. -The idea is to keep this pluggable approach leaving the final user the choice -about what to use. - -Retro compatibility -~~~~~~~~~~~~~~~~~~~ - -One of the aims of this project is to be retro-compatible with the previous -version of OpenStack. Starting from Liberty, we cover instance-ha and -stonith-config Ansible playbooks for all the releases. -The same happens while testing HA since all the tests are plugged in depending -on the release. - -Alternatives ------------- - -While evaluating alternatives, the first thing to consider is that this -project aims to be a TripleO-centric set of tools for HA, not a generic -OpenStack's one. -We want tools to help the user answer questions like "Is the Galera bundle -cluster resource able to tolerate a stop and a consecutive start without -affecting the environment capabilities?" or "Is the environment able to -evacuate instances after being configured for Instance HA?". And the answer we -want is YES or NO. - -* *tripleo-validations*: the most logical place to put this, at least - looking at the name, would be tripleo-validations. By talking with folks - working on it, it came out that the meaning of tripleo-validations project is - not doing disruptive tests. Integrating this stuff would be out of scope. - -* *tripleo-quickstart-extras*: apart from the fact that this is not - something meant just for quickstart (the project supports infrared and - "plain" environments as well) even if we initially started there, in the - end, it came out that nobody was looking at the patches since nobody was - able to verify them. The result was a series of reviews stuck forever. - So moving back to extras would be a step backward. - -Other End User Impact ---------------------- - -None. The good thing about this solution is that there's no impact for anyone -unless the solution gets loaded inside an existing project. Since this will be -an external project, it will not impact anything of the current stuff. - -Performance Impact ------------------- - -None. Unless the deployments, the CI runs or whatever include the roles there -will be no impact, and so the performances will not change. - -Implementation -============== - -Primary assignees: - -* rscarazz - -Work Items ----------- - -* Import the tripleo-quickstart-utils [1] as a new repository and start new - deployments from there. - -Testing -======= - -Due to the disruptive nature of these tests, the TripleO CI should not be -updated to include these tests, mostly because of timing issues. -This project should remain optionally usable by people when needed, or in -specific CI environments meant to support longer than usual jobs. - -Documentation Impact -==================== - -All the implemented roles are today fully documented in the -tripleo-quickstart-utils [1] project, so importing its repository as is will -also give its full documentation. - -References -========== - -[1] Original project to import as new - https://github.com/redhat-openstack/tripleo-quickstart-utils diff --git a/specs/rocky/tripleo-rsyslog-remote-logging.rst b/specs/rocky/tripleo-rsyslog-remote-logging.rst deleted file mode 100644 index cfe8d70f..00000000 --- a/specs/rocky/tripleo-rsyslog-remote-logging.rst +++ /dev/null @@ -1,276 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== - TripleO Remote Logging -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/remote-logging - -This spec is meant to extend the tripleo-logging spec also for queens to -address key issues about log transport and storage that are separate from -the technical requirements created by logging for containerized processes. - -Problem Description -=================== - -Having logs stuck on individual overcloud nodes isn't a workable solution -for a modern system deployed at scale. But log aggregation is complex both -to implement and to scale. TripleO should provide a robust, well documented, -and scalable solution that will serve the majority of users needs and be -easily extensible for others. - - -Proposed Change -=============== - -Overview --------- - -In addition to the rsyslog logging to stdout defined for containers in the -triple-logging spec this spec outlines how logging to remote targets should -work in detail. - -Essentially this comes down to a set of options for the config -of the rsyslog container. Other services will have a fixed rsyslog config -that forwards messages to the rsyslog container to pick up over journald. - -1. Logging destination, local, remote direct, or remote aggregator. - -Remote direct means to go direct to a storage solution, in this case -Elasticsearch or plaintext on the disk. Remote aggregator is a design where -the processing, formatting, and insertion of the logs is a task left to the -aggregator server. Using aggregators it's possible to scale log collection to -hundreds of overcloud nodes without overwhelming the storage backend with -inefficient connections. - -2. Log caching for remote targets - -In the case of remote targets a caching system can be setup, where logs are -stored temporarily on the local machine in a configurable disk or memory cache -until they can be uploaded to an aggregator or storage system. While some in -memory cache is mandatory users may select a disk cache depending on how -important it is that all logs be saved and stored. This allows recovery -without loss of messages during network outages or service outages. - - -3. Log security in transit - -In some cases encryption during transit may be required. rsyslog offers -ssl based encryption that should be easily deployable. - -4. Standard and extensible format - -By default logs should be formatted as outlined by the Redhat common logging -initiative. By standardizing logging format where possible various tools -and analytics become more portable. - -Mandatory fields for this standard formatting include. - -version: the version of the logging template -level: loglevel -message: the log message -tags: user specific tagging info - -Additional fields must be added in the format of - -. - -See an example by rsyslog for storage in Elasticsearch below. - -@timestamp November 27th 2017, 08:54:40.091 -@version 2016.01.06-0 -_id AV_9wiWQzdGOuK5_zY5J -_index logstash-2017.11.27.08 -_score -_type rsyslog -browbeat.cloud_name openstack-12-noncontainers-beta -hostname lorenzo.perf.lab.eng.rdu.redhat.com -level info -message Stopping LVM2 PV scan on device 8:2... -pid 1 -rsyslog.appname systemd -rsyslog.facility daemon -rsyslog.fromhost-ip 10.12.20.155 -rsyslog.inputname imptcp -rsyslog.protocol-version 1 -syslog.timegenerated November 27th 2017, 08:54:40.092 -systemd.t.BOOT_ID 1e99848dbba047edaf04b150313f67a8 -systemd.t.CAP_EFFECTIVE 1fffffffff -systemd.t.CMDLINE /usr/lib/systemd/systemd --switched-root --system --deserialize 21 -systemd.t.COMM systemd -systemd.t.EXE /usr/lib/systemd/systemd -systemd.t.GID 0 -systemd.t.MACHINE_ID 0d7fed5b203f4664b0b4be90e4a8a992 -systemd.t.SELINUX_CONTEXT system_u:system_r:init_t:s0 -systemd.t.SOURCE_REALTIME_TIMESTAMP 1511790880089672 -systemd.t.SYSTEMD_CGROUP / -systemd.t.TRANSPORT journal -systemd.t.UID 0 -systemd.u.CODE_FILE src/core/unit.c -systemd.u.CODE_FUNCTION unit_status_log_starting_stopping_reloading -systemd.u.CODE_LINE 1417 -systemd.u.MESSAGE_ID de5b426a63be47a7b6ac3eaac82e2f6f -systemd.u.UNIT lvm2-pvscan@8:2.service -tags - -As a visual aid here's a quick diagram of the flow of data. - - -> -> -> - -In the process container logs from the application are packaged with metadata -from systemd and other components depending on how rsyslog is configured, -journald acts as a transport aggregating this input across all containers for -the rsyslog container which formats this data into storable json and handles -things like transforming fields and adding additional metadta as desired. -Finally the data is inserted into elasticsearch or further held by an -aggrebator for a few seconds before being bulk inserted into Elasticsearch. - - -Alternatives ------------- - -TripleO already has some level of FluentD integration, but performance issues -make it unusable at scale. Furthermore it's not well prepared for container -logging. - -Ideally FluentD as a logging backend would be maintained, improved, and modified -to use the common logging format for easy swapping of solutions. - -Security Impact ---------------- - -The security of remotely stored data and the log storage database is outside -of the scope of this spec. The major remaining concerns are security in -in transit and the changes required to systemd for rsyslog to send data -remotely. - -A new systemd policy will have to be put into place to ensure that systemd -can successfully log to remote targets. By default the syslog rules prevent -any outside world access or port access, both of which are required for -log forwarding. - -For log encryption in transit a ssl certificate will have to be generated and -distributed to all nodes in the cloud securely, probably during deployment. -Special care should be taken to ensure that any misconfigured instance of -rsyslog without a certificate where one is required do not transmit logs -by accident. - - -Other End User Impact ---------------------- - -Ideally users will read some documentation and pass an extra 5-6 variables to -TripleO to deploy with logging aggregation. It's very important that logging -be easy to setup with sane defaults and no requirement on the user to implement -their own formatting or template. - -Users may also have to setup a database for log storage and an aggregator if -their deployment is large enough that they need one. Playbooks to do this -automatically will be provided, but probably don't belong in TripleO. - -Special care will have to be taken to size storage and aggregation hardware -to the task, while rsyslog is very efficient storage quickly becomes a problem -when a cloud can generate 100gb of logs a day. Especially since log storage -systems leave it up to the user to put in place rotation rules. - - -Performance Impact ------------------- - -For small clouds rsyslog direct to Elasticsearch will perform just fine. -As scale increases an aggregator (also running rsyslog, except configured -to accept and format input) is required. I have yet to test a large enough -cloud that an aggregator was at all stressed. Hundreds of gigs of logs a day -are possible with a single 32gb ram VM as an Elastic instance. - -For the Overcloud nodes forwarding their logs the impact is variable depending -on the users configuration. CPU requirements don't exceed single digits of a -single core even under heavy load but storage requirements can balloon if a -large on disk cache was specified and connectivity with the aggregator or -database is lost for prolonged periods. - -Memory usage is no more than a few hundred mb and most of that is the default -in memory log cache. Which once again could be expanded by the user. - - -Other Deployer Impact ---------------------- - -N/A - -Developer Impact ----------------- - -N/A - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - jkilpatr - -Other contributors: - jaosorior - -Work Items ----------- - -rsyslog container - jaosorior - -rsyslog templating and deployment role - jkilpatr - -aggregator and storage server deployment tooling - jkilpatr - - -Dependencies -============ - -Blueprint dependencies: - -https://blueprints.launchpad.net/tripleo/+spec/logging-stdout-rsyslog - -Package dependencies: - -rsyslog, rsyslog-elasticsearch, rsyslog-mmjsonparse - -specifically version 8 of rsyslog, which is the earliest -supported by rsyslog-elasticsearch, these are packaged in -Centos and rhel 7.4 extras. - -Testing -======= - -Logging aggregation can be tested in CI by deploying it during any existing CI job. - -For extra validation have a script to check the output into Elasticsearch. - - -Documentation Impact -==================== - -Documentation will need to be written about the various modes and tunables for -logging and how to deploy them. As well as sizing recommendations for the log -storage system and aggregators where required. - - -References -========== - -https://review.openstack.org/#/c/490047/ - -https://review.openstack.org/#/c/521083/ - -https://blueprints.launchpad.net/tripleo/+spec/logging-stdout-rsyslog diff --git a/specs/rocky/tripleo-upgrade.rst b/specs/rocky/tripleo-upgrade.rst deleted file mode 100644 index 372c81f5..00000000 --- a/specs/rocky/tripleo-upgrade.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -===================================================== -A unified tool for upgrading TripleO based deploments -===================================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-upgrade - -In order to avoid work duplication and automation code being out of sync with the -official documentation we would like to create a single repository hosting the upgrade -automation code that can be run on top of deployments done with various tools. - -Problem Description -=================== -Currently automation code for TripleO upgrades is spread across several repositories -and it is tightly coupled with the framework being used for deployment, e.g. tripleo- -quickstart or Infrared. - -Proposed Change -=============== - -Overview --------- - -Our proposal is to decouple the upgrade automation code and make it deployment tool -agnostic. This way it could be consumed in different scenarios such as CI, automated -or manual testing. - -Alternatives ------------- - -For the previous releases the automation code has been hosted in diffrent repositories -such as tripleo-quickstart-extras, infrared or private repos. This is not convenient -as they all cover basically the same workflow so we are duplicating work. We would like -to avoid this and collaborate on a single repository. - -Security Impact ---------------- - -None. - -Other End User Impact ---------------------- - -This tool allows the users to run the TripleO upgrade in an automated fashion or -semi-automatic by creating scripts for each upgrade step which can be later run manually -by the user. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -This tools helps developers by providing a quick way to run TripleO upgrades. This could -be useful when reproducing and debugging reported issues. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - matbu, mcornea - -Work Items ----------- - -* Create new repository in Openstack Git -* Migrate repository with its history from https://github.com/redhat-openstack/tripleo-upgrade - -Dependencies -============ - -* ansible - -Testing -======= - - -Documentation Impact -==================== - - -References -========== - diff --git a/specs/rocky/tripleo-vitrage-integration.rst b/specs/rocky/tripleo-vitrage-integration.rst deleted file mode 100644 index 38f19358..00000000 --- a/specs/rocky/tripleo-vitrage-integration.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================== -Support Vitrage(Root Cause Analysis, RCA) Service -================================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-vitrage-integration - -[Vitrage]_ is the official OpenStack RCA project. It can perfectly organizes, -analyzes and visualizes the holistic view of the Cloud. - -Vitrage provides functions as follows: - -* A clear view of the Cloud Topology - -* Deduced alarms and states - -* RCA for alarms/events - -Via Vitrage, the end users can understand what happened in a complex cloud -environment, get the root cause of problems and then resolve issues in time. - -Problem Description -=================== - -Currently the installation and configuration of Vitrage in openstack is done -manually or using devstack. It shall be automated via tripleo. - -Integration Vitrage in TripleO is benefit for building the OPNFV platform. -It helps the OPNFV [Doctor]_ project using Vitrage as inspector component to -build the fault management framework with [Apex]_ installer which is an OPNFV -installation and deployment tool based on TripleO. - -Proposed Change -=============== - -Overview --------- - -This spec proposes changes to automate the deployment of Vitrage using TripleO. - -* Add puppet-vitrage package to overcloud-full image. - -* Define Vitrage Service in THT. - -* Add how and when to deploy Vitrage in puppet-tripleo. - -Alternatives ------------- - -None - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -Vitrage service is default disabled in a Deployment. Need to enable it -if deployer want to use it. - -Developer Impact ----------------- - -None - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - dong wenjuan - -Work Items ----------- - -As outlined in the proposed changes. - -Dependencies -============ - -The Vitrage RPM package must be in RDO repo. - -Testing -======= - -Add the test for CI scenarios. - -Documentation Impact -==================== - -The setup and configuration of the Vitrage server should be documented. - -References -========== - -.. [Vitrage] https://wiki.openstack.org/wiki/Vitrage -.. [Apex] https://wiki.opnfv.org/display/apex -.. [Doctor] https://wiki.opnfv.org/display/doctor diff --git a/specs/rocky/ui-automation-testing.rst b/specs/rocky/ui-automation-testing.rst deleted file mode 100644 index 59d06d56..00000000 --- a/specs/rocky/ui-automation-testing.rst +++ /dev/null @@ -1,123 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -UI Automation Testing -========================================== - -https://blueprints.launchpad.net/tripleo/+spec/automated-ui-testing - -We would like to introduce a suite of automated integration tests for the -TripleO UI. This will prevent regressions, and will lead to more stable -software. - -Problem Description -=================== - -At the moment, upstream CI only tests for lint errors, and runs our unit tests. -We'd like to add more integration tests for tripleo-ui to the CI pipeline. This -will include a selenium-based approach. This allows us to simulate a browser by -using a headless browser when running in CI, and we can detect a lot more -problems than we ever could with just unit testing. - -Proposed Change -=============== - -Overview --------- - -We would like write a Tempest plugin for tripleo-ui which uses Selenium to drive -a headless browser to execute the tests. We chose Tempest because it's a -standard in OpenStack, and gives us nice error reporting. - -We already have the `tempest-tripleo-ui`_ project set up. - -We plan to write a CI job to run our code in Tempest. In the initial -implementation, this will only cover checking for presence of certain UI -elements, and no deployments will actually be run. - -Alternatives ------------- - -The alternative is that we do all of our testing manually, waste time, have -lower velocity, and have more bugs. - -Security Impact ---------------- - -The security impact of this is minimal as it's CI-specific, and not user-facing. - -Other End User Impact ---------------------- - -End users won't interact with this feature. - -Performance Impact ------------------- - -This feature will only consume CI resources. There should be no negative -resource impact on the End User. - -Other Deployer Impact ---------------------- - -Our goal is to produce software that is more stable. But we're not changing any -features, per se. - -Developer Impact ----------------- - -Developers will gain a higher degree of confidence in their software. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - hpokorny - -Other contributors: - ukalifon - akrivoka - -Work Items ----------- - -* Write Selenium tests -* Write Tempest plugin code to run Selenium tests -* Write a new openstack-infra job to run the Tempest plugin on ``check`` and - ``gate``. At first, this will be a simple sanity job to make sure that the UI - has been rendered. The CI job won't run a deployment. - -Dependencies -============ - -* Tempest -* Selenium - -Testing -======= - -This is a bit meta. - -Documentation Impact -==================== - -We will document how a developer who is new to the tripleo-ui project can get -started with writing new integration tests. - -References -========== - -.. _tempest-tripleo-ui: https://github.com/openstack/tempest-tripleo-ui - -openstack-dev mailing list discussion: - -* http://lists.openstack.org/pipermail/openstack-dev/2017-June/119185.html -* http://lists.openstack.org/pipermail/openstack-dev/2017-July/119261.html diff --git a/specs/stein/all-in-one-upgrades-jobs.rst b/specs/stein/all-in-one-upgrades-jobs.rst deleted file mode 100644 index 697e41d4..00000000 --- a/specs/stein/all-in-one-upgrades-jobs.rst +++ /dev/null @@ -1,233 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============================================================== -Improve upgrade_tasks CI coverage with the standalone installer -=============================================================== - -https://blueprints.launchpad.net/tripleo/+spec/upgrades-ci-standalone - -The main goal of this work is to improve coverage of service upgrade_tasks in -tripleo ci upgrades jobs, by making use of the Standalone_installer_work_. -Using a standalone node as a single node 'overcloud' allows us to exercise -both controlplane and dataplane services in the same job and within current -resources of 2 nodes and 3 hours. Furthermore and once proven successful -this approach can be extended to include even single service upgrades testing -to vastly improve on the current coverage with respect to all the service -upgrade_tasks defined in the tripleo-heat-templates (which is currently minimal). - -Traditionally upgrades jobs have been restricted by resource constraints -(nodes and walltime). For example the undercloud and overcloud upgrade are -never exercised in the same job, that is an overcloud upgrade job uses an undercloud that is already on the target version (so called mixed version deployment). - -A further example is that upgrades jobs have typically exercised either -controlplane or dataplane upgrades (i.e. controllers only, or compute only) -and never both in the same job, again because constraints. The currently running -tripleo-ci-centos-7-scenario000-multinode-oooq-container-upgrades_ job for -example has 2 nodes, where one is undercloud and one is overcloud controller. -The workflow *is* being exercised, but controller only. Furthermore, whilst -the current_upgrade_ci_scenario_ is only exercising a small subset of the -controlplane services, it is still running at well over 140 minutes. So there -is also very little coverage with respect to the upgrades_tasks across the -many different service templates defined in the tripleo-heat-templates. - -Thus the main goal of this work is to use the standalone installer to define -ci jobs that test the service upgrade_tasks for a one node 'overcloud' with -both controlplane and dataplane services. This approach is composable as the -services in the stand-alone are fully configurable. Thus after the first -iteration of compute/control, we can also define per-service ci jobs and over -time hopefully reach coverage for all the services deployable by TripleO. - -Finally it is worth emphasising that the jobs defined as part of this work will not -be testing the TripleO upgrades *workflow* at all. Rather this is about testing -the service upgrades_tasks specifically. The workflow instead will be tested -using the existing ci upgrades job (tripleo-ci-centos-7-scenario000-multinode-oooq-container-upgrades_) subject to modifications to strip it down to a bare -minimum required (e.g. hardly any services). There are more pointers to this -from the discussion at the TripleO-Stein-PTG_ but ultimately we will have two -approximations of the upgrade tested in ci - the service upgrade_tasks as -described by this spec, and the workflow itself using a different ci job or -modifying the existing one. - -.. _Standalone_installer_work: http://lists.openstack.org/pipermail/openstack-dev/2018-June/131135.html -.. _tripleo-ci-centos-7-scenario000-multinode-oooq-container-upgrades: https://github.com/openstack-infra/tripleo-ci/blob/4101a393f29c18a84f64cd95a28c41c8142c5b05/zuul.d/multinode-jobs.yaml#L384 -.. _current_upgrade_ci_scenario: https://github.com/openstack/tripleo-heat-templates/blob/9f1d855627cf54d26ee540a18fc8898aaccdda51/ci/environments/scenario000-multinode-containers.yaml#L21 -.. _TripleO-Stein-PTG: https://etherpad.openstack.org/p/tripleo-ptg-stein - -Problem Description -=================== - -As described above we have not been able to have control and dataplane -services upgraded as part of the same tripleo ci job. Such a job would -have to be 3 nodes for starters (undercloud,controller,compute). - -A *full* upgrade workflow would need the following steps: - - * deploy undercloud, deploy overcloud - * upgrade undercloud - * upgrade prepare the overcloud (heat stack update generates playbooks) - * upgrade run controllers (ansible-playbook via mistral workflow) - * upgrade run computes/storage etc (repeat until all done) - * upgrade converge (heat stack update). - -The problem being solved here is that we can run only some approximation of -the upgrade workflow, specifically the upgrade_tasks, for a composed set -of services and do so within the ci timeout. The first iteration will focus on -modelling a one node 'overcloud' with both controller and compute services. If -we prove this to be successful we can also consider single-service upgrades -jobs (a job for testing just nova,or glance upgrade tasks for example) for -each of services that we want to test the upgrades tasks. Thus even though -this is just an approximation of the upgrade (upgrade_tasks only, not the full -workflow), it can hopefully allow for a wider coverage of services in ci -than is presently possible. - -One of the early considerations when writing this spec was how we could enforce -a separation of services with respect to the upgrade workflow. That is, enforce -that controlplane upgrade_tasks and deploy_steps are executed first and then -dataplane compute/storage/ceph as is usually the case with the upgrade workflow. -However review comments on this spec as well as PTG discussions around it, in -particular that this is just some approximation of the upgrade (service -upgrade tasks, not workflow) in which case it may not be necessary to artificially -induce this control/dataplane separation here. This may need to be revisited -once implementation begins. - -Another core challenge that needs solving is how to collect ansible playbooks -from the tripleo-heat-templates since we don't have a traditional undercloud -heat stack to query. This will hopefully be a lesser challenge assuming we can -re-use the transient heat process used to deploy the standalone node. Futhermore -discussion around this point at the TripleO-Stein-PTG_ has informed us of a way -to keep the heat stack after deployment with keep-running_ so we could just -re-use it as we would with a 'normal' deployment. - -Proposed Change -=============== - -Overview --------- - -We will need to define a new ci job in the tripleo-ci_zuul.d_standalone-jobs_ -(preferably following the currently ongoing ci_v3_migrations_ define this as -v3 job). - -For the generation of the playbooks themselves we hope to use the ephemeral -heat service that is used to deploy the stand-alone node, or use the keep-running_ -option to the stand-alone deployment to keep the stack around after deployment. - -As described in the problem statement we hope to avoid the task of having to -distinguish between control and dataplane services in order to enforce that -controlplane services are upgraded first. - -.. _tripleo-ci_zuul.d_standalone-jobs: https://github.com/openstack-infra/tripleo-ci/blob/4101a393f29c18a84f64cd95a28c41c8142c5b05/zuul.d/standalone-jobs.yaml -.. _ci_v3_migrations: https://review.openstack.org/#/c/578432/8 -.. _keep-running: https://github.com/openstack/python-tripleoclient/blob/a57531382535e92e2bfd417cee4b10ac0443dfc8/tripleoclient/v1/tripleo_deploy.py#L911 - -Alternatives ------------- - -Add another node and have 3 node upgrades jobs together with increasing the -walltime but this is not scalable in the long term assuming limited -resources! - - -Security Impact ---------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -More coverage of services should mean less breakage because of upgrades -incompatible things being merged. - -Developer Impact ----------------- - -Might be easier for developers too who may have limited access to resources -to take the reproducer script with the standalone jobs and get a dev env for -testing upgrades. - -Implementation -============== - -Assignee(s) ------------ - -tripleo-ci and upgrades squads - -Work Items ----------- - -First we must solve the problem of generating the ansible playbooks, that -will include all the latest configuration from the tripleo-heat-templates at -the time of upgrade (including all upgrade_tasks etc) when there is no -undercloud Heat stack to query. - -We might consider some non-heat solution by parsing the tripleo-heat-templates -but I don't think that is a feasible solution (re-inventing wheels). There is -ongoing work to transfer tasks to roles which is promising and that is another -area to explore. - -One obvious mechanism to explore given the current tools is to re-use the -same ephemeral heat process that the stand-alone uses in deploying the -overcloud, but setting the usual 'upgrade-init' environment files for a short -stack 'update'. This is not tested at all yet so needs to be investigated -further. As identified earlier there is now in fact a keep-running_ option to the -tripleoclient that will keep this heat process around - -For the first iteration of this work we will aim to use the minimum possible combination -of services to implement a 'compute'/'control' overcloud. That is, using the existing -services from the current current_upgrade_ci_scenario_ with the addition of nova-compute -and any dependencies. - -Finally a third major consideration is how to execute this service upgrade, that -is how to invoke the playbook generation and then run the resulting playbooks -(it probably doesn't need to converge if we are just interested in the upgrades -tasks). One consideration might be to re-use the existing python-tripleoclient -"openstack overcloud upgrade" prepare and run sub-commands. However the first -and currently favored approach will be to use the existing stand-alone client -commands (tripleo_upgrade_ tripleo_deploy_). So one work item is to try these -and discover any modifications we might need to make them work for us. - -Items: - * Work out/confirm generation the playbooks for the standalone upgrade tasks. - * Work out any needed changes in the client/tools to execute the ansible playbooks - * Define new ci job in the tripleo-ci_zuul.d_standalone-jobs_ with control and - compute services, that will exercise upgrade_tasks, deployment_tasks and - post_upgrade_tasks playbooks. - -Once this first iteration is complete we can then consider defining multiple -jobs for small subsets of services, or even for single services. - -.. _tripleo_upgrade: https://github.com/openstack/python-tripleoclient/blob/6b0f54c07ae8d0dd372f16684c863efa064079da/tripleoclient/v1/tripleo_upgrade.py#L33 -.. _tripleo_deploy: https://github.com/openstack/python-tripleoclient/blob/6b0f54c07ae8d0dd372f16684c863efa064079da/tripleoclient/v1/tripleo_deploy.py#L80 - -Dependencies -============ - -This obviously depends on stand-alone installer - -Testing -======= - -There will be at least one new job defined here - -Documentation Impact -==================== - -None - -References -========== diff --git a/specs/stein/inflight-validations.rst b/specs/stein/inflight-validations.rst deleted file mode 100644 index f65b6c13..00000000 --- a/specs/stein/inflight-validations.rst +++ /dev/null @@ -1,142 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================= -In-flight Validations for the overcloud -======================================= - - -https://blueprints.launchpad.net/tripleo/+spec/inflight-validations - -Currently, we don't have any way to run validations inside a deploy run. This -spec aims to provide the necessary information on how to implement such -in-flight validations for an overcloud deploy. - -Problem Description -=================== - -Currently, operators and developers have to wait a long time before getting an -error in case a service isn't running as expected. - -This leads to loss of time and resources. - -Proposed Change -=============== - -Overview --------- - -After each container/service is started, a new step is added to run one or more -validations on the deployed host in order to ensure the service is actually -working as expected at said step. - -These validations must not use Mistral Workflow, in order to provide support -for the undercloud/standalone case. - -The best way to push those validations would be through the already existing -``deploy_steps_tasks`` keywork. A validation should be either at the start -of the next step, or at the end of the current step we want to check. - -The validations should point to an external playbook, for instance hosted in -``tripleo-validations``. If there isn't real use to create a playbook for the -validation, it might be inline - but it must be short, for example a single test -for an open port. - -Alternatives ------------- - -There isn't really other alternative. We might think running the validation -ansible playbook directly is a good idea, but it will break the wanted -convergence with the UI. - -For now, there isn't such validations, we can start fresh. - -Security Impact ---------------- - -No security impact. - -Upgrade Impact --------------- - -If a service isn't starting properly, the upgrade might fail. This is also true -for a fresh deploy. - -We might want different validation tasks/workflows if we're in an upgrade -state. - -Other End User Impact ---------------------- - -End user will get early failure in case of issues detected by the validations. -This is an improvement, as for now it might fail at a later step, and might -break things due to the lack of valid state. - -Performance Impact ------------------- - -Running in-flight validation WILL slow the overall deploy/upgrade process, but -on the other hand, it will ensure we have a clean state before each step. - -Other Deployer Impact ---------------------- - -No other deployer impact. - -Developer Impact ----------------- - -Validations will need to be created and documented in order to get proper runs. - - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - cjeanner - -Other contributors: - - -Work Items ----------- - -* Add new hook for the ``validation_tasks`` -* Provide proper documentation on its use - -Dependencies -============ - -* Please keep in mind the Validation Framework spec when implementing things: - https://review.openstack.org/589169 - - -Testing -======= - -TBD - - -Documentation Impact -==================== - -What is the impact on the docs? Don't repeat details discussed above, but -please reference them here. - - -References -========== - -* https://review.openstack.org/589169 diff --git a/specs/stein/nova-less-deploy.rst b/specs/stein/nova-less-deploy.rst deleted file mode 100644 index 01e315c5..00000000 --- a/specs/stein/nova-less-deploy.rst +++ /dev/null @@ -1,638 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================= -Provision nodes without Nova and Glance -======================================= - -https://blueprints.launchpad.net/tripleo/+spec/nova-less-deploy - -Currently TripleO undercloud uses Heat, Nova, Glance, Neutron and Ironic for -provisioning bare metal machines. This blueprint proposes excluding Heat, Nova -and Glance from this flow, removing Nova and Glance completely from the -undercloud. - -Problem Description -=================== - -Making TripleO workflows use Ironic directly to provision nodes has quite a few -benefits: - -#. First and foremost, getting rid of the horrible "no valid hosts found" - exception. The scheduling will be much simpler and the errors will be - clearer. - - .. note:: - This and many other problems with using Nova in the undercloud come from - the fact that Nova is cloud-oriented software, while the undercloud is - more of a traditional installer. In the "pet vs cattle" metaphore, Nova - handles the "cattle" case, while the undercloud is the "pet" case. - -#. Also important for the generic provisioner case, we'll be able to get rid of - Nova and Glance, reducing the memory footprint. - -#. We'll get rid of pre-deploy validations that currently try to guess what - Nova scheduler will expect. - -#. We'll be able to combine nodes deployed by Ironic with pre-deployed servers. - -#. We'll become in charge of building the configdrive, potentially putting more - useful things there. - -#. Hopefully, scale-up will be less error-prone. - -Also in the future we may be able to: - -#. Integrate things like building RAID on demand much easier. - -#. Use introspection data in scheduling and provisioning decisions. - Particularly, we can automate handling root device hints. - -#. Make Neutron optional and use static DHCP and/or *os-net-config*. - -Proposed Change -=============== - -Overview --------- - -This blueprint proposes removal replacing the triad Heat-Nova-Glance with -Ironic driven directly by Mistral. To avoid placing Ironic-specific code into -tripleo-common, a new library metalsmith_ has been developed and accepted into -the Ironic governance. - -As part of the implementation, this blueprint proposes completely separting the -bare metal provisioning process from software configuration, including the CLI -level. This has two benefits: - -#. Having a clear separation between two error-prone processes simplifies - debugging for operators. - -#. Reusing the existing *deployed-server* workflow simplifies the - implementation. - -In the distant future, the functionality of metalsmith_ may be moved into -Ironic API itself. In this case it will be phased out, while keeping the same -Mistral workflows. - -Operator workflow ------------------ - -As noted in Overview_, the CLI/GUI workflow will be split into hardware -provisioning and software configuration parts (the former being optional). - -#. In addition to existing Heat templates, a new file - baremetal_deployment.yaml_ will be populated by an operator with the bare - metal provisioning information. - -#. Bare metal deployment will be conducted by a new CLI command or GUI - operation using the new `deploy_roles workflow`_:: - - openstack overcloud node provision \ - -o baremetal_environment.yaml baremetal_deployment.yaml - - This command will take the input from baremetal_deployment.yaml_, provision - requested bare metal machines and output a Heat environment file - baremetal_environment.yaml_ to use with the *deployed-server* feature. - -#. Finally, the regular deployment is done, including the generated file:: - - openstack overcloud deploy \ - \ - -e baremetal_environment.yaml \ - -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml \ - -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-bootstrap-environment-centos.yaml \ - -r /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-server-roles-data.yaml - -For simplicity the two commands can be combined:: - - openstack overcloud deploy \ - \ - -b baremetal_deployment.yaml \ - -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml \ - -e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-bootstrap-environment-centos.yaml \ - -r /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-server-roles-data.yaml - -The new argument ``--baremetal-deployment``/``-b`` will accept the -baremetal_deployment.yaml_ and do the deployment automatically. - -Breakdown of the changes ------------------------- - -This section describes the required changes in depth. - -Image upload -~~~~~~~~~~~~ - -As Glance will no longer be used, images will have to be served from other -sources. Ironic supports HTTP and file sources from its images. For the -undercloud case, the file source seems to be the most straightforward, also the -*Edge* case may require using HTTP images. - -To make both cases possible, the ``openstack overcloud image upload`` command -will now copy the three overcloud images (``overcloud-full.qcow2``, -``overcloud-full.kernel`` and ``overcloud-full.ramdisk``) to -``/var/lib/ironic/httpboot/overcloud-images``. This will allow referring to -images both via ``file:///var/lib/ironic/httpboot/overcloud.images/...`` and -``http(s)://:/overcloud-images/...``. - -Finally, a checksum file will be generated from the copied images using:: - - cd /var/lib/ironic/httpboot/overcloud-images - md5sum overcloud-full.* > MD5SUMS - -This is required since the checksums will no longer come from Glance. - -baremetal_deployment.yaml -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This file will describe which the bare metal provisioning parameters. It will -provide the information that is currently implicitly deduced from the Heat -templates. - -.. note:: - We could continue extracting it from the templates well. However, a separate - file will avoid a dependency on any Heat-specific logic, potentially - benefiting standalone installer cases. It also provides the operators with - more control over the provisioning process. - -The format of this file resembles one of the ``roles_data`` file. It describes -the deployment parameters for each role. The file contains a list of roles, -each with a ``name``. Other accepted parameters are: - -``count`` - number of machines to deploy for this role. Defaults to 1. -``profile`` - profile (``compute``, ``control``, etc) to use for this role. Roughly - corresponds to a flavor name for a Nova based deployment. Defaults to no - profile (any node can be picked). -``hostname_format`` - a template for generating host names. This is similar to - ``HostnameFormatDefault`` of a ``roles_data`` file and should use - ``%index%`` to number the nodes. The default is ``%stackname%--%index%``. -``instances`` - list of instances in the format accepted by `deploy_instances workflow`_. - This allows to tune parameters per instance. - -Examples -^^^^^^^^ - -Deploy one compute and one control with any profile: - -.. code-block:: yaml - - - name: Compute - - name: Controller - -HA deployment with two computes and profile matching: - -.. code-block:: yaml - - - name: Compute - count: 2 - profile: compute - hostname_format: compute-%index%.example.com - - name: Controller - count: 3 - profile: control - hostname_format: controller-%index%.example.com - -Advanced deployment with custom hostnames and parameters set per instance: - -.. code-block:: yaml - - - name: Compute - profile: compute - instances: - - hostname: compute-05.us-west.example.com - nics: - - network: ctlplane - fixed_ip: 10.0.2.5 - traits: - - HW_CPU_X86_VMX - - hostname: compute-06.us-west.example.com - nics: - - network: ctlplane - fixed_ip: 10.0.2.5 - traits: - - HW_CPU_X86_VMX - - name: Controller - profile: control - instances: - - hostname: controller-1.us-west.example.com - swap_size_mb: 4096 - - hostname: controller-2.us-west.example.com - swap_size_mb: 4096 - - hostname: controller-3.us-west.example.com - swap_size_mb: 4096 - -deploy_roles workflow -~~~~~~~~~~~~~~~~~~~~~ - -The workflow ``tripleo.baremetal_deploy.v1.deploy_roles`` will accept the -information from baremetal_deployment.yaml_, convert it into the low-level -format accepted by the `deploy_instances workflow`_ and call the -`deploy_instances workflow`_ with it. - -It will accept the following mandatory input: - -``roles`` - parsed baremetal_deployment.yaml_ file. - -It will accept one optional input: - -``plan`` - plan/stack name, used for templating. Defaults to ``overcloud``. - -It will return the same output as the `deploy_instances workflow`_ plus: - -``environment`` - the content of the generated baremetal_environment.yaml_ file. - -Examples -^^^^^^^^ - -The examples from baremetal_deployment.yaml_ will be converted to: - -.. code-block:: yaml - - - hostname: overcloud-compute-0 - - hostname: overcloud-controller-0 - -.. code-block:: yaml - - - hostname: compute-0.example.com - profile: compute - - hostname: compute-1.example.com - profile: compute - - hostname: controller-0.example.com - profile: control - - hostname: controller-1.example.com - profile: control - - hostname: controller-2.example.com - profile: control - -.. code-block:: yaml - - - hostname: compute-05.us-west.example.com - nics: - - network: ctlplane - fixed_ip: 10.0.2.5 - profile: compute - traits: - - HW_CPU_X86_VMX - - hostname: compute-06.us-west.example.com - nics: - - network: ctlplane - fixed_ip: 10.0.2.5 - profile: compute - traits: - - HW_CPU_X86_VMX - - hostname: controller-1.us-west.example.com - profile: control - swap_size_mb: 4096 - - hostname: controller-2.us-west.example.com - profile: control - swap_size_mb: 4096 - - hostname: controller-3.us-west.example.com - profile: control - swap_size_mb: 4096 - -deploy_instances workflow -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The workflow ``tripleo.baremetal_deploy.v1.deploy_instances`` is a thin wrapper -around the corresponding metalsmith_ calls. - -The following inputs are mandatory: - -``instances`` - list of requested instances in the format described in `Instance format`_. -``ssh_keys`` - list of SSH public keys contents to put on the machines. - -The following inputs are optional: - -``ssh_user_name`` - SSH user name to create, defaults to ``heat-admin`` for compatibility. -``timeout`` - deployment timeout, defaults to 3600 seconds. -``concurrency`` - deployment concurrency - how many nodes to deploy at the same time. Defaults - to 20, which matches introspection. - -Instance format -^^^^^^^^^^^^^^^ - -The instance record format closely follows one of the `metalsmith ansible -role`_ with only a few TripleO-specific additions and defaults changes. - -Either or both of the following fields must be present: - -``hostname`` - requested hostname. It is used to identify the deployed instance later on. - Defaults to ``name``. -``name`` - name of the node to deploy on. If ``hostname`` is not provided, ``name`` is - also used as the hostname. - -The following fields will be supported: - -``capabilities`` - requested node capabilities (except for ``profile`` and ``boot_option``). -``conductor_group`` - requested node's conductor group. This is primary for the *Edge* case when - nodes managed by the same Ironic can be physically separated. -``nics`` - list of requested NICs, see metalsmith_ documentation for details. Defaults - to ``{"network": "ctlplane"}`` which requests creation of a port on the - ``ctlplane`` network. -``profile`` - profile to use (e.g. ``compute``, ``control``, etc). -``resource_class`` - requested node's resource class, defaults to ``baremetal``. -``root_size_gb`` - size of the root partition in GiB, defaults to 49. -``swap_size_mb`` - size of the swap partition in MiB, if needed. -``traits`` - list of requested node traits. -``whole_disk_image`` - boolean, whether to treat the image (``overcloud-full.qcow2`` or provided - through the ``image`` field) as a whole disk image. Defaults to false. - -The following fields will be supported, but the defaults should work for all -but the most extreme cases: - -``image`` - file or HTTP URL of the root partition or whole disk image. -``image_kernel`` - file or HTTP URL of the kernel image (partition images only). -``image_ramdisk`` - file or HTTP URL of the ramdisk image (partition images only). -``image_checksum`` - checksum of URL of checksum of the root partition or whole disk image. - -Certificate authority configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If TLS is used in the undercloud, we need to make the nodes trust -the Certificate Authority (CA) that signed the TLS certificates. -If ``/etc/pki/ca-trust/source/anchors/cm-local-ca.pem`` exists, it will be -included in the generated configdrive, so that the file is copied into the same -location on target systems. - -Outputs -^^^^^^^ - -The workflow will provide the following outputs: - -``ctlplane_ips`` - mapping of host names to their respective IP addresses on the ``ctlplane`` - network. -``instances`` - mapping of host names to full instance representations with fields: - - ``node`` - Ironic node representation. - ``ip_addresses`` - mapping of network names to list of IP addresses on them. - ``hostname`` - instance hostname. - ``state`` - `metalsmith instance state`_. - ``uuid`` - Ironic node uuid. - -Also two subdicts of ``instances`` are provided: - -``existing_instances`` - only instances that already existed. -``new_instances`` - only instances that were deployed. - -.. note:: - Instances are distinguised by their hostnames. - -baremetal_environment.yaml -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This file will serve as an output of the bare metal provisioning process. It -will be fed into the overcloud deployment command. Its goal is to provide -information for the *deployed-server* workflow. - -The file will contain the ``HostnameMap`` generated from role names and -hostnames, e.g. - -.. code-block:: yaml - - parameter_defaults: - HostnameMap: - overcloud-controller-0: controller-1.us-west.example.com - overcloud-controller-1: controller-2.us-west.example.com - overcloud-controller-2: controller-3.us-west.example.com - overcloud-novacompute-0: compute-05.us-west.example.com - overcloud-novacompute-1: compute-06.us-west.example.com - -undeploy_instances workflow -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The workflow ``tripleo.baremetal_deploy.v1.undeploy_instances`` will take a -list of hostnames and undeploy the corresponding nodes. - -Novajoin replacement --------------------- - -The *novajoin* service is currently used to enroll nodes into IPA and provide -them with TLS certificates. Unfortunately, it has hard dependencies on Nova, -Glance and Metadata API, even though the information could be provided via -other means. Actually, the metadata API cannot always be provided with Ironic -(notably, it may not be available when using isolated provisioning networks). - -A potential solution is to provide the required information via a configdrive, -and make the nodes register themselves instead. - -Alternatives ------------- - -* Do nothing, continue to rely on Nova and work around cases when it does - match our goals well. See `Problem Description`_ for why it is not desired. - -* Avoid metalsmith_, use OpenStack Ansible modules or Bifrost. They currently - lack features (such as VIF attach/detach API) and do not have any notion of - scheduling. Implementing sophisticated enough scheduling in pure Ansible - seems a serious undertaking. - -* Avoid Mistral, drive metalsmith_ via Ansible. This is a potential future - direction of this work, but currently it seems much simpler to call - metalsmith_ Python API from Mistral actions. We would anyway need Mistral ( - (or Ansible Tower) to drive Ansible, because we need some API level. - -* Remove Neutron in the same change. Would reduce footprint even further, but - some operators may find the presence of an IPAM desirable. Also setting up - static DHCP would increase the scope of the implementation substantially and - complicate the upgrade even further. - -* Keep Glance but remove Nova. Does not make much sense, since Glance is only a - requirement because of Nova. Ironic can deploy from HTTP or local file - locations just as well. - -Security Impact ---------------- - -* Overcloud images will be exposed to unauthenticated users via HTTP. We need - to communicate it clearly that secrets must not be built into images in plain - text and should be delivered via *configdrive* instead. If it proves - a problem, we can limit ourselves to providing images via local files. - - .. note:: - This issue exists today, as images are transferred via insecure medium in - all supported deploy methods. - -* Removing two services from the undercloud will reduce potential attack - surface and simplify audit. - -Upgrade Impact --------------- - -The initial version of this feature will be enabled for new deployments only. - -The upgrade procedure will happen within a release, not between releases. -It will go roughly as follows: - -#. Upgrade to a release where undercloud without Nova and Glance is supported. - -#. Make a full backup of the undercloud. - -#. Run ``openstack overcloud image upload`` to ensure that the - ``overcloud-full`` images are available via HTTP(s). - -The next steps will probably be automated via an Ansible playbook or a Mistral -workflow: - -#. Mark deployed nodes *protected* in Ironic to prevent undeploying them - by mistake. - -#. Run a Heat stack update replacing references to Nova servers with references - to deployed servers. This will require telling Heat not to remove the - instances. - -#. Mark nodes as managed by *metalsmith* (optional, but simplifies - troubleshooting). - -#. Update node's ``instance_info`` to refer to images over HTTP(s). - - .. note:: This may require temporary moving nodes to maintenance. - -#. Run an undercloud update removing Nova and Glance. - -Other End User Impact ---------------------- - -* Nova CLI will no longer be available for troubleshooting. It should not be a - big problem in reality, as most of the problems it is used for are caused by - using Nova itself. - - metalsmith_ provides a CLI tool for troubleshooting and advanced users. We - will document using it for tasks like determining IP addresses of nodes. - -* It will no longer be possible to update images via Glance API, e.g. from GUI. - It should not be a bit issue, as most of users use pre-built images. Advanced - operators are likely to resort to CLI anyway. - -* *No valid host found* error will no longer be seen by operators. metalsmith_ - provides more detailed errors, and is less likely to fail because of its - scheduling approach working better with the undercloud case. - -Performance Impact ------------------- - -* A substantial speed-up is expected for deployments because of removing - several layers of indirection. The new deployment process will also fail - faster if the scheduling request cannot be satisfied. - -* Providing images via local files will remove the step of downloading them - from Glance, providing even more speed-up for larger images. - -* An operator will be able to tune concurrency of deployment via CLI arguments - or GUI parameters, other than ``nova.conf``. - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -New features for bare metal provisioning will have to be developed with this -work in mind. It may mean implementing something in metalsmith_ code instead of -relying on Nova servers or flavors, or Glance images. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Dmitry Tantsur, IRC: dtantsur, LP: divius - -Work Items ----------- - -Phase 1 (Stein, technical preview): - -#. Update ``openstack overcloud image upload`` to copy images into the HTTP - location and generate checksums. - -#. Implement `deploy_instances workflow`_ and `undeploy_instances workflow`_. - -#. Update validations to not fail if Nova and/or Glance are not present. - -#. Implement `deploy_roles workflow`_. - -#. Provide CLI commands for the created workflows. - -#. Provide an experimental OVB CI job exercising the new approach. - -Phase 2 (T+, fully supported): - -#. Update ``openstack overcloud deploy`` to support the new workflow. - -#. Support scaling down. - -#. Provide a `Novajoin replacement`_. - -#. Provide an upgrade workflow. - -#. Consider deprecating provisioning with Nova and Glance. - -Dependencies -============ - -* metalsmith_ library will be used for easier access to Ironic+Neutron API. - -Testing -======= - -Since testing this feature requires bare metal provisioning, a new OVB job will -be created for it. Initially it will be experimental, and will move to the -check queue before the feature is considered fully supported. - -Documentation Impact -==================== - -Documentation will have to be reworked to explain the new deployment approach. -Troubleshooting documentation will have to be updated. - -References -========== - -.. _metalsmith: https://docs.openstack.org/metalsmith/latest/ -.. _metalsmith ansible role: https://docs.openstack.org/metalsmith/latest/user/ansible.html#instance -.. _metalsmith instance state: https://docs.openstack.org/metalsmith/latest/reference/api/metalsmith.html#metalsmith.Instance.state diff --git a/specs/stein/ostempest-tripleo.rst b/specs/stein/ostempest-tripleo.rst deleted file mode 100644 index 045a6f86..00000000 --- a/specs/stein/ostempest-tripleo.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================== -Integrate os_tempest role with TripleO -====================================== - -Launchpad Blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/os-tempest-tripleo - -Tempest provides a set of API and integrations tests with batteries -included in order to validate the OpenStack Deployment. In TripleO -project, we are working towards using a unified tempest role i.e. -`os_tempest` provided by OpenStack Ansible project in TripleO CI -in order to foster collaboration with multiple deployment tools and -improve our testing strategies within OpenStack Community. - -Problem Description -=================== - -In the OpenStack Ecosystem, we have multiple *ansible based* deployment tools -that use their own roles for install/configure and running tempest testing. -Each of these roles is trying to do similar stuff tied to the different -deployment tools. For example: `validate-tempest` ansible role on TripleO CI -provides most of the stuff but it is tied with the TripleO deployment and -provides some nice feature (Like: bugcheck, failed tests email notification, -stackviz, python-tempestconf support for auto tempest.conf generation) which -are missing in other roles. It is leading to duplication and reduces what -tempest tests are not working across them, leading to no collaboration on -the Testing side. - -The OpenStack Ansible team provides `os_tempest` role for installing/ -configuring/running tempest and post tempest results processing and there -is a lot of duplication between their work and the roles used for testing -by the various deployment tools.It almost provides most of the stuff -provided by each of the deployment tool specific tempest roles. There are -few stuffs which are missing can be added in the role and make it useable -so that other deployment tools can consume it. - -Proposed Change -=============== - -Using unified `os_tempest` ansible role in TripleO CI will help to maintain -one less role within TripleO project and help us to collaborate with -openstack-ansible team in order to share/improve tests strategies across -OpenStack ecosystem and solve tempest issues fastly. - -In order to achieve that, we need: - * Improve `os_tempest` role to add support for package/container install, - python-tempestconf, stackviz, skip list, bugcheck, tempest - log collection at the proper place. - - * Have a working CI job on standalone running tempest from `os_tempest` - role as well as on OSA side. - - * Provide an easy migration path from validate-tempest role. - -Alternatives ------------- - -If we do not use the existing `os_tempest` role then we need to re-write the -`validate-tempest` role which will result in again duplication and it will -cost too much time and it also requires another set of efforts for adoption -in the community which does not seems to feasible. - -Security Impact ---------------- - -None - -Upgrade Impact --------------- - -None - -Other End User Impact ---------------------- - -We need to educate users for migrating to `os_tempest`. - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Helps more collaboration and improves testing. - -Implementation -============== - -Assignee(s) ------------ - - -Primary assignee: - * Arx Cruz (arxcruz) - * Chandan Kumar (chkumar246) - * Martin Kopec (mkopec) - - -Work Items ----------- - -* Install tempest and it's dependencies from Distro packages -* Running tempest from containers -* Enable stackviz -* python-tempestconf support -* skiplist management -* Keeping all tempest related files at one place -* Bugcheck -* Standalone based TripleO CI job consuming os_tempest role -* Migration path from validate-tempest to os_tempest role -* Documentation update on How to use it -* RDO packaging - -Dependencies -============ - -Currently, os_tempest role depends on `python_venv_build` role when -tempest is installed from source (git, pip, venv). We need to package it in RDO. - -Testing -======= - -The unified tempest role `os_tempest` will replace validate-tempest -role with much more improvements. - - -Documentation Impact -==================== - -Documentation on how to consume `os_tempest` needs to be updated. - - -References -========== - -* Unified Tempest role creation & calloboration email: - http://lists.openstack.org/pipermail/openstack-dev/2018-August/133838.html - -* os_tempest role: - http://git.openstack.org/cgit/openstack/openstack-ansible-os_tempest diff --git a/specs/stein/podman.rst b/specs/stein/podman.rst deleted file mode 100644 index caf6d8d4..00000000 --- a/specs/stein/podman.rst +++ /dev/null @@ -1,322 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================= -Podman support for container management -======================================= - -Launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/podman-support - -There is an ongoing desire to manage TripleO containers with a set of tools -designed to solve complex problems when deploying applications. -The containerization of TripleO started with a Docker CLI implementation -but we are looking at how we could leverage the container orchestration -on a Kubernetes friendly solution. - - -Problem Description -=================== - -There are three problems that this document will cover: - -* There is an ongoing discussion on whether or not Docker will be - maintained on future versions of Red Hat platforms. There is a general - move on OCI (Open Containers Initiative) conformant runtimes, as CRI-O - (Container Runtime Interface for OCI). - -* The TripleO community has been looking at how we could orchestrate the - containers lifecycle with Kubernetes, in order to bring consistency with - other projects like OpenShift for example. - -* The TripleO project aims to work on the next version of Red Hat platforms, - therefore we are looking at Docker alternatives in Stein cycle. - - -Proposed Change -=============== - -Introduction ------------- - -The containerization of TripleO has been an ongoing effort since a few releases -now and we've always been looking at a step-by-step approach that tries to -maintain backward compatibility for the deployers and developers; and also -in a way where upgrade from a previous release is possible, without too much -pain. With that said, we are looking at a proposed change that isn't too much -disruptive but is still aligned with the general roadmap of the container -story and hopefully will drive us to manage our containers with Kubernetes. -We use Paunch project to provide an abstraction in our container integration. -Paunch will deal with container configurations formats with backends support. - -Integrate Podman CLI --------------------- - -The goal of Podman is to allow users to run standalone (non-orchestrated) -containers which is what we have been doing with Docker until now. -Podman also allows users to run groups of containers called Pods where a Pod is -a term developed for the Kubernetes Project which describes an object that -has one or more containerized processes sharing multiple namespaces -(Network, IPC and optionally PID). -Podman doesn't have any daemon which makes it lighter than Docker and use a -more traditional fork/exec model of Unix and Linux. -The container runtime used by Podman is runc. -The CLI has a partial backward compatibility with Docker so its integration -in TripleO shouldn't be that painful. - -It is proposed to add support for Podman CLI (beside Docker CLI) in TripleO -to manage the creation, deletion, inspection of our containers. -We would have a new parameter called ContainerCli in TripleO, that if set to -'podman', will make the container provisioning done with Podman CLI and not -Docker CLI. - -Because there is no daemon, there are some problems that we needs to solve: - -* Automatically restart failed containers. -* Automatically start containers when the host is (re)booted. -* Start the containers in a specific order during host boot. -* Provide an channel of communication with containers. -* Run container healthchecks. - -To solve the first 3 problems, it is proposed to use Systemd: - -* Use Restart so we can configure a restart policy for our containers. - Most of our containers would run with Restart=always policy, but we'll - have to support some exceptions. -* The systemd services will be enabled by default so the containers start - at boot. -* The ordering will be managed by Wants which provides Implicit Dependencies - in Systemd. Wants is a weaker version of Requires. It'll allow to make sure - we start HAproxy before Keepalived for example, if they are on the same host. - Because it is a weak dependency, they will only be honored if the containers - are running on the same host. -* The way containers will be managed (start/stop/restart/status) will be - familiar for our operators used to control Systemd services. However - we probably want to make it clear that this is not our long term goal to - manage the containers with Systemd. - -The Systemd integration would be: - -* complete enough to cover our use-cases and bring feature parity with the - Docker implementation. -* light enough to be able to migrate our container lifecycle with Kubernetes - in the future (e.g. CRI-O). - - -For the fourth problem, we are still investigating the options: - -* varlink: interface description format and protocol that aims to make services - accessible to both humans and machines in the simplest feasible way. -* CRI-O: CI-based implementation of Kubernetes Container Runtime Interface - without Kubelet. For example, we could use a CRI-O Python binding to - communicate with the containers. -* A dedicated image which runs the rootwrap daemon, with rootwrap filters to only run the allowed - commands. The controlling container will have the rootwrap socket mounted in so that it can - trigger allowed calls in the rootwrap container. For pacemaker, the rootwrap container will allow - image tagging. For neutron, the rootwrap container will spawn the processes inside the container, - so it will need to be a long-lived container that is managed outside paunch. - - +---------+ +----------+ - | | | | - | L3Agent +-----+ Rootwrap | - | | | | - +---------+ +----------+ - - In this example, the L3Agent container has mounted in the rootwrap daemon socket so that it can - run allowed commands inside the rootwrap container. - -Finally, the fifth problem is still an ongoing question. -There are some plans to support healthchecks in Podman but nothing has been -done as of today. We might have to implement something on our side with -Systemd. - -Alternatives -============ - -Two alternatives are proposed. - -CRI-O Integration ------------------ - -CRI-O is meant to provide an integration path between OCI conformant runtimes -and the kubelet. Specifically, it implements the Kubelet Container Runtime -Interface (CRI) using OCI conformant runtimes. Note that the CLI utility for -interacting with CRI-O isn't meant to be used in production, so managing -the containers lifecycle with a CLI is only possible with Docker or Podman. - -So instead of a smooth migration from Docker CLI to Podman CLI, we could go -straight to Kubernetes integration and convert our TripleO services to work -with a standalone Kubelet managed by CRI-O. -We would have to generate YAML files for each container in a Pod format, -so CRI-O can manage them. -It wouldn't require Systemd integration, as the containers will be managed -by Kubelet. -The operator would control the container lifecycle by using kubectl commands -and the automated deployment & upgrade process would happen in Paunch with -a Kubelet backend. - -While this implementation will help us to move to a multi-node Kubernetes -friendly environment, it remains the most risky option in term of the -quantity of work that needs to happen versus the time that we have to design, -implement, test and ship the next tooling before the end of Stein cycle. - -We also need to keep in mind that CRI-O and Podman share containers/storage -and containers/image libraries, so the issues that we have had with Podman -will be hit with CRI-O as well. - -Keep Docker ------------ - -We could keep Docker around and do not change anything in the way we manage -containers. We could also keep Docker and make it work with CRI-O. -The only risk here is that Docker tooling might not be supported in the future -by Red Hat platforms and we would be on our own if any issue with Docker. -The TripleO community is always seeking for an healthy and long term -collaboration between us and the projects communities that we are interracting -with. - -Proposed roadmap -================ - -In Stein: - -* Make Paunch support Podman as an alternative to Docker. -* Get our existing services fully deployable on Podman, with parity to - what we had with Docker. -* If we have time, add Podman pod support to Paunch - -In "T" cycle: - -* Rewrite all of our container yaml to the pod format. -* Add a Kubelet backend to Paunch (or change our agent tooling to call - Kubelet directly from Ansible). -* Get our existing service fully deployable via Kublet, with parity to - what we had with Podman / Docker. -* Evaluate switching to Kubernetes proper. - - -Security Impact -=============== - -The TripleO containers will rely on Podman security. -If we don't use CRI-O or varlink to communicate with containers, we'll have -to consider running some containers in privileged mode and mount -/var/lib/containers into the containers. This is a security concern and -we'll have to evaluate it. -Also, we'll have to make the proposed solution with SELinux in Enforcing mode. - -Docker solution doesn't enforce selinux separation between containers. -Podman does, and there's currently no easy way to deactivate that globally. -So we'll basically get a more secure containers with Podman, as we have to -support separation from the very beginning. - -Upgrade Impact -============== - -The containers that were managed by Docker Engine will be removed and -provisioned into the new runtime. This process will happen when Paunch -generates and execute the new container configuration. -The operator shouldn't have to do any manual action and the migration will be -automated, mainly by Paunch. -The Containerized Undercloud upgrade job will test the upgrade of an Undercloud -running Docker containers on Rocky and upgrade to Podman containers on Stein. -The Overcloud upgrade jobs will also test. - -Note: as the docker runtime doesn't have the selinux separation, -some chcon/relabelling might be needed prior the move to podman runtime. - -End User Impact -=============== - -The operators won't be able to run Docker CLI like before and instead will -have to use Podman CLI, where some backward compatibility is garanteed. - -Performance Impact -================== - -There are different aspects of performances that we'll need to investigate: - -* Container performances (relying on Podman). -* How Systemd + Podman work together and how restart work versus Docker engine. - -Deployer Impact -=============== - -There shouldn't be much impact for the deployer, as we aim to make this change -the most transparent as possible. The only option (so far) that will be -exposed to the deployer will be "ContainerCli", where only 'docker' and -'podman' will be supported. If 'podman' is choosen, the transition will be -automated. - -Developer Impact -================ - -There shouldn't be much impact for the developer of TripleO services, except -that there are some things in Podman that slightly changed when comparing -with Docker. For example Podman won't create the missing directories when -doing bind-mount into the containers, while Docker create them. - -Implementation -============== - -Contributors ------------- - -* Bogdan Dobrelya -* Cédric Jeanneret -* Emilien Macchi -* Steve Baker - -Work Items ----------- - -* Update TripleO services to work with Podman (e.g. fix bind-mounts issues). -* SELinux separation (relates to bind-mounts rights + some other issues when - we're calling iptables/other host command from a containe) -* Systemd integration. -* Healthcheck support. -* Socket / runtime: varlink? CRI-O? -* Upgrade workflow. -* Testing. -* Documentation for operators. - - -Dependencies -============ - -* The Podman integration depends a lot on how stable is the tool and how - often it is released and shipped so we can test it in CI. -* The Healthchecks interface depends on Podman's roadmap. - -Testing -======= - -First of all, we'll switch the Undercloud jobs to use Podman and this work -should be done by milestone-1. Both the deployment and upgrade jobs should -be switched and actually working. -The overcloud jobs should be switched by milestone-2. - -We'll keep Docker testing support until we keep testing running on CentOS7 -platform. - -Documentation Impact -==================== - -We'll need to document the new commands (mainly the same as Docker), and -the differences of how containers should be managed (Systemd instead of Docker -CLI for example). - - -References -========== - -* https://www.projectatomic.io/blog/2018/02/reintroduction-podman/ -* https://github.com/kubernetes-sigs/cri-o -* https://github.com/kubernetes/community/blob/master/contributors/devel/container-runtime-interface.md -* https://varlink.org/ -* https://github.com/containers/libpod/blob/master/transfer.md -* https://etherpad.openstack.org/p/tripleo-standalone-kubelet-poc diff --git a/specs/stein/safe-side-containers.rst b/specs/stein/safe-side-containers.rst deleted file mode 100644 index e1c3af5b..00000000 --- a/specs/stein/safe-side-containers.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================================== -TripleO - Pattern to safely spawn a container from a container -============================================================== - -This spec describes a pattern which can be used as an alternative to -what TripleO does today to allow certain containers (Neutron, etc.) to -spawn side processes which require special privs like network -namespaces. Specifically it avoids exposing the docker socket or -using Podman nsenter hacks that have recently entered the codebase in Stein. - -Problem Description -=================== - -In Queens TripleO implemented a containerized architecture with the goal of -containerizing all OpenStack services. This architecture was a success but -a few applications had regressions when compared with their baremetal deployed -equivalent. One of these applications was Neutron, which requires the ability -to spawn long lived "side" processes that are launched directly from the -Neutron agents themselves. In the original Queens architecture Neutron -launched these side processes inside of the agent container itself which -caused a service disruption if the neutron agents themselves were restarted. -This was previously not the case on baremetal as these processes would continue -running across an agent restart/upgrade. - -The work around in Rocky was to add "wrapper" scripts for Neutron agents and -to expose the docker socket to each agent container. These wrappers scripts -were bind mounted into the containers so that they overwrote the normal location -of the side process. Using this crude mechanism binaries like 'dnsmasq' and -'haproxy' would instead launch a shell script instead of the normal binary and -these custom shell scripts relied on the an exposed docker socket from the -host to be able to launch a side container with the same arguments supplied -to the script. - -This mechanism functionally solved the issues with our containerization but -exposed some security problems in that we were now exposing the ability to -launch any container to these Neutron agent containers (privileged containers -with access to a docker socket). - -In Stein things changed with our desire to support Podman. Unlike Docker -Podman does not include a daemon on the host. All Podman commands are executed -via a CLI which runs the command on the host directly. We landed -patches which required Podman commands to use nsenter to enter the hosts -namespace and run the commands there directly. Again this mechanism requires -extra privileges to be granted to the Neutron agent containers in order for -them to be able to launch these commands. Furthermore the mechanism is -a bit cryptic to support and debug in the field. - -Proposed Change -=============== - -Overview --------- - -Use systemd on the host to launch the side process containers directly with -support for network namespaces that Neutron agents require. The benefit of -this approach is that we no longer have to give the Neutron containers privs -to launch containers which they shouldn't require. - -The pattern could work like this: - -#. A systemd.path file monitors a know location on the host for changes. - Example (neutron-dhcp-dnsmasq.path): - -.. code-block:: yaml - - [Path] - PathModified=/var/lib/neutron/neutron-dnsmasq-processes-timestamp - PathChanged=/var/lib/neutron/neutron-dnsmasq-processes-timestamp - - [Install] - WantedBy=multi-user.target - -#. When systemd.path notices a change it fires the service for this - path file: - Example (neutron-dhcp-dnsmasq.service): - -.. code-block:: yaml - - [Unit] - Description=neutron dhcp dnsmasq sync service - - [Service] - Type=oneshot - ExecStart=/usr/local/bin/neutron-dhcp-dnsmasq-process-sync - User=root - -#. We use the same "wrapper scripts" used today to write two files. The - first file is a dump of CLI arguments used to launch the process - on the host. This file can optionally include extra data like - network namespaces which are required for some neutron side processes. - The second file is a timestamp which is monitored by systemd.path - on the host for changes and is used as a signal that it needs to - process the first file with arguments. - -# When a change is detected the systemd.service above executes a script on the - host to cleanly launch containerized side processes. When the script finishes - launching processes it truncates the file to start with a clean slate. - -# Both the wrapper scripts and the host scripts use flock to eliminate race - conditions which could cause issues in relaunching or missed containers. - -Alternatives ------------- - -With Podman an API like varlink would be an option however it would likely -still required exposure to a socket on the host which would involve -extra privileges like what we have today. This would avoid the nsenter hacks -however. - -An architecture like Kubernetes would give us an API which could be used -to launch containers directly via the COE. - -Additionally an external process manager in Neutron that is "containers aware" -could be written to improve either of the above options. The current python -in Neutron was writtin primarily for launching processes on baremetal with -assumptions that some of the processes it launches are meant to live across -a contain restart. Implementing a class that can launch side processes via a -clean interface rather than overwriting binaries would be desirable. -Classes which supported launching containers via Kubernetes and or Systemd -via the host directly could be supported. - -Security Impact ---------------- - -This mechanism should allow us to remove some of the container privileges for -neutron agents which in the past were used to execute containers. It is -a more restrictive crude interface that allows the containers only to launch -a specific type of process rather than any container it chooses. - -Upgrade Impact --------------- - -The side process containers should be the same regardless of how they are -launched so the upgrade should be minimal. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - dan-prince - -Other contributors: - emilienm - -Work Items ----------- - -# Ansible playbook to create systemd files, wrappers - -# TripleO Heat template updates to use the new playbooks - -# Remove/deprecate the old docker.socket and nsenter code from puppet-tripleo diff --git a/specs/stein/tripleo-routed-networks-templates.rst b/specs/stein/tripleo-routed-networks-templates.rst deleted file mode 100644 index c1b29463..00000000 --- a/specs/stein/tripleo-routed-networks-templates.rst +++ /dev/null @@ -1,522 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================================== -TripleO Routed Networks Deployment (Spine-and-Leaf Clos) -======================================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-routed-networks-templates - -This blueprint is part of a the series tripleo-routed-networks-deployment [0]_. - -TripleO uses shared L2 networks for all networks except the provisioning -network today. (Support for L3 provisioning network where added in Queens.) - -L3 support on the provisioning network is using network segments, a concept -in Neutron routed networks, we can represent more than one subnet per VLAN. -Without network segments, we would be limited to one subnet per VLAN. - -For the non-provisioning networks we have no way to model a true L3 routed -network in TripleO today. When deploying such an architecture we currently -create custom (neutron) networks for all the different l2 segments for each -isolated network. While this approach works it comes with some caveats. - -This spec covers refactoring the TripleO Heat Templates to support deployment -onto networks which are segregated into multiple layer 2 domains with routers -forwarding traffic between layer 2 domains. - - -Problem Description -=================== - -The master blueprint for routed networks for deployments breaks the problem -set into multiple parts [0]_. This blueprint presents the problems which are -applicable to this blueprint below. - - -Problem Descriptions -==================== - - -Problem #1: Deploy systems onto a routed provisioning network. - -While we can model a routed provisioning network and deploy systems on top of -that network today. Doing so requires additional complex configuration, such -as: - - * Setting up the required static routes to ensure traffic within the L3 - control plane takes the desired path troughout the network. - * L2 segments use different router addresses. - * L2 segments may use different subnet masks. - * Other L2 segment property differences. - - -This configuration is essentially manually passing in information in the -templates to deploy the overcloud. Information that was already provided when -deploying the undercloud. While this works, it increases complexity and the -possibility that the user provides incorrect configuration data. - -We should be able to get as much of this information based on what was provided -when deploying the undercloud. - -In order to support this model, there are some requirements that have to be -met in Heat and Neutron. - -**Alternative approaches to Problem #1:** - - -Approach 1: - -.. NOTE:: This is what we currently do. - -Since we control addresses and routes on the host nodes using a -combination of Heat templates and os-net-config, it may be possible to use -static routes to supernets to provide L2 adjacency, rather than relying on -Neutron to generate dynamic lists of routes that would need to be updated -on all hosts. - -The end result of this is that each host has a set of IP addresses and routes -that isolate traffic by function. In order for the return traffic to also be -isolated by function, similar routes must exist on both hosts, pointing to the -local gateway on the local subnet for the larger supernet that contains all -Internal API subnets. - -The downside of this is that we must require proper supernetting, and this may -lead to larger blocks of IP addresses being used to provide ample space for -scaling growth. For instance, in the example above an entire /16 network is set -aside for up to 255 local subnets for the Internal API network. This could be -changed into a more reasonable space, such as /18, if the number of local -subnets will not exceed 64, etc. This will be less of an issue with native IPv6 -than with IPv4, where scarcity is much more likely. - -Approch 2: - -Instead of passing parameters such as ControlPlaneCidr, -ControlPlaneDefaultRoute etc implement Neutron RFE [5]_ and Heat RFE [6]_. In -tripleo-heat-templates we can then use get_attr to get the data. And we leave -it to neutron to calculate and provide the routes for the L3 network. - -This would require [3]_, which I believe was in quite good shape before it was -abandoned due to activity policy. (An alternative would be to change -os-net-config to have an option to only change and apply routing configuration. -Something like running `ifdown-routes -`_ -/ -`ifup-routes -`_ -, however [3]_ is likely the better solution.) - - ------- - -**Problem #2: Static IP assignment: Choosing static IPs from the correct -subnet** - -Some roles, such as Compute, can likely be placed in any subnet, but we will -need to keep certain roles co-located within the same set of L2 domains. For -instance, whatever role is providing Neutron services will need all controllers -in the same L2 domain for VRRP to work properly. - -The network interfaces will be configured using templates that create -configuration files for os-net-config. The IP addresses that are written to -each node's configuration will need to be on the correct subnet for each host. -In order for Heat to assign ports from the correct subnets, we will need to -have a host-to-subnets mapping. - -Possible Solutions, Ideas or Approaches: - -.. NOTE:: We currently use #2, by specifying parameters for each role. - -1. The simplest implementation of this would probably be a mapping of - role/index to a set of subnets, so that it is known to Heat that - Controller-1 is in subnet set X and Compute-3 is in subnet set Y. The node - would then have the ip and subnet info for each network chosen from the - appropriate set of subnets. For other nodes, we would need to - programatically determine which subnets are correct for a given node. -2. We could associate particular subnets with roles, and then use one role - per L2 domain (such as per-rack). This might be achieved with a map of - roles to subnets, or by specifying parameters for each role such as: - supernet, subnet (ID and/or ip/netmask), and subnet router. -3. Initial implementation might follow the model for isolated networking - demonstrated by the environments/ips-from-pool-all.yaml. Developing the - ips-from-pool model first will allow testing various components with - spine-and-leaf while the templates that use dynamic assignment of IPs - within specified subnets are developed. -4. The roles and templates should be refactored to allow for dynamic IP - assignment within subnets associated with the role. We may wish to evaluate - the possibility of storing the routed subnets in Neutron using the routed - networks extensions that are still under development. However, in this - case, This is probably not required to implement separate subnets in each - rack. -5. A scalable long-term solution is to map which subnet the host is on - during introspection. If we can identify the correct subnet for each - interface, then we can correlate that with IP addresses from the correct - allocation pool. This would have the advantage of not requiring a static - mapping of role to node to subnet. In order to do this, additional - integration would be required between Ironic and Neutron (to make Ironic - aware of multiple subnets per network, and to add the ability to make - that association during introspection. - -We will also need to take into account sitations where there are heterogeneous -hardware nodes in the same layer 2 broadcast domain (such as within a rack). - -.. Note:: This can be done either using node groups in NetConfigDataLookup as - implemented in review [4]_ or by using additional custom roles. - ------- - -**Problem #3: Isolated Networking Requires Static Routes to Ensure Correct VLAN -is Used** - -In order to continue using the Isolated Networks model, routes will need to be -in place on each node, to steer traffic to the correct VLAN interfaces. The -routes are written when os-net-config first runs, but may change. We -can't just rely on the specific routes to other subnets, since the number of -subnets will increase or decrease as racks are added or taken away. - -Possible Solutions, Ideas or Approaches: - -1. Require that supernets are used for various network groups. For instance, - all the Internal API subnets would be part of a supernet, for instance - 172.17.0.0/16 could be used, and broken up into many smaller subnets, such - as /24. This would simplify the routes, since only a single route for - 172.17.0.0/16 would be required pointing to the local router on the - 172.17.x.0/24 network. - - Example: - Suppose 2 subnets are provided for the Internal API network: 172.19.1.0/24 - and 172.19.2.0/24. We want all Internal API traffic to traverse the Internal - API VLANs on both the controller and a remote compute node. The Internal API - network uses different VLANs for the two nodes, so we need the routes on the - hosts to point toward the Internal API gateway instead of the default - gateway. This can be provided by a supernet route to 172.19.x.x pointing to - the local gateway on each subnet (e.g. 172.19.1.1 and 172.19.2.1 on the - respective subnets). This could be represented in an os-net-config with the - following:: - - - - type: interface - name: nic3 - addresses: - - - ip_netmask: {get_param: InternalApiXIpSubnet} - routes: - - - ip_netmask: {get_param: InternalApiSupernet} - next_hop: {get_param: InternalApiXDefaultRoute} - - Where InternalApiIpSubnet is the IP address on the local subnet, - InternalApiSupernet is '172.19.0.0/16', and InternalApiRouter is either - 172.19.1.1 or 172.19.2.1 depending on which local subnet the host belongs to. -2. Modify os-net-config so that routes can be updated without bouncing - interfaces, and then run os-net-config on all nodes when scaling occurs. - A review for this functionality is in progress [3]_. -3. Instead of passing parameters to THT about routes (or supernet routes), - implement Neutron RFE [5]_ and Heat RFE [6]_. In tripleo-heat-templates we - can then use get_attr to get the data we currently read from user provided - parameters such as the InternalApiSupernet and InternalApiXDefaultRoute in - the example above. (We might also consider replacing [6]_ with a change - extending the ``network/ports/port.j2`` in tripleo-heat-templates to output - this data.) - -os-net-config configures static routes for each interface. If we can keep the -routing simple (one route per functional network), then we would be able to -isolate traffic onto functional VLANs like we do today. - -It would be a change to the existing workflow to have os-net-config run on -updates as well as deployment, but if this were a non-impacting event (the -interfaces didn't have to be bounced), that would probably be OK. (An -alternative is to add an option to have an option in os-net-config that only -adds new routes. Something like, os-net-config --no-activate + -ifdown-routes/ifup-routes.) - -At a later time, the possibility of using dynamic routing should be considered, -since it reduces the possibility of user error and is better suited to -centralized management. The overcloud nodes might participate in internal -routing protocols. SDN solutions are another way to provide this, or other -approaches may be considered, such as setting up OVS tunnels. - ------- - -**Problem #4: Isolated Networking in TripleO Heat Templates Needs to be -Refactored** - -The current isolated networking templates use parameters in nested stacks to -define the IP information for each network. There is no room in the current -schema to define multiple subnets per network, and no way to configure the -routers for each network. These values are provided by single parameters. - -Possible Solutions, Ideas or Approaches: - -1. We would need to refactor these resources to provide different routers - for each network. -2. We extend the custom and isolated networks in TripleO to add support for - Neutron routed-networks (segments) and multiple subnets. Each subnet will be - mapped to a different L2 segment. We should make the extension backward - compatible and only enable Neutron routed-networks (I.e associate subnets - with segments.) when the templates used define multiple subnets on a - network. To enable this we need some changes to land in Neutron and Heat, - these are the in-progress reviews: - - * Allow setting network-segment on subnet update [7]_ - * Allow updating the segment property of OS::Neutron::Subnet [8]_ - * Add first_segment convenience attr to OS::Neutron::Net [9]_ - - - -Proposed Change -=============== -The proposed changes are discussed below. - -Overview --------- - -In order to provide spine-and-leaf networking for deployments, several changes -will have to be made to TripleO: - -1. Support for DHCP relay in Neutron DHCP servers (in progress), and Ironic - DHCP servers (this is addressed in separate blueprints in the same series). -2. Refactor assignment of Control Plane IPs to support routed networks (that - is addressed by a separate blueprint: tripleo-predictable-ctlplane-ips [2]_. -3. Refactoring of TripleO Heat Templates network isolation to support multiple - subnets per isolated network, as well as per-subnet and supernet routes. -4. Changes to Infra CI to support testing. -5. Documentation updates. - -Alternatives ------------- - -The approach outlined here is very prescriptive, in that the networks must be -known ahead of time, and the IP addresses must be selected from the appropriate -pool. This is due to the reliance on static IP addresses provided by Heat. -Heat will have to model the subnets and associate them with roles (node -groups). - -One alternative approach is to use DHCP servers to assign IP addresses on all -hosts on all interfaces. This would simplify configuration within the Heat -templates and environment files. Unfortunately, this was the original approach -of TripleO, and it was deemed insufficient by end-users, who wanted stability -of IP addresses, and didn't want to have an external dependency on DHCP. - -Another approach is to use the DHCP server functionality in the network switch -infrastructure in order to PXE boot systems, then assign static IP addresses -after the PXE boot is done via DHCP. This approach only solves for part of the -requirement: the net booting. It does not solve the desire to have static IP -addresses on each network. This could be achieved by having static IP addresses -in some sort of per-node map. However, this approach is not as scalable as -programatically determining the IPs, since it only applies to a fixed number of -hosts. We want to retain the ability of using Neutron as an IP address -management (IPAM) back-end, ideally. - -Another approach which was considered was simply trunking all networks back -to the Undercloud, so that dnsmasq could respond to DHCP requests directly, -rather than requiring a DHCP relay. Unfortunately, this has already been -identified as being unacceptable by some large operators, who have network -architectures that make heavy use of L2 segregation via routers. This also -won't work well in situations where there is geographical separation between -the VLANs, such as in split-site deployments. - -Security Impact ---------------- - -One of the major differences between spine-and-leaf and standard isolated -networking is that the various subnets are connected by routers, rather than -being completely isolated. This means that without proper ACLs on the routers, -networks which should be private may be opened up to outside traffic. - -This should be addressed in the documentation, and it should be stressed that -ACLs should be in place to prevent unwanted network traffic. For instance, the -Internal API network is sensitive in that the database and message queue -services run on that network. It is supposed to be isolated from outside -connections. This can be achieved fairly easily if supernets are used, so that -if all Internal API subnets are a part of the 172.19.0.0/16 supernet, a simple -ACL rule will allow only traffic between Internal API IPs (this is a simplified -example that would be generally applicable to all Internal API router VLAN -interfaces or for a global ACL):: - - allow traffic from 172.19.0.0/16 to 172.19.0.0/16 - deny traffic from * to 172.19.0.0/16 - -The isolated networks design separates control plane traffic from data plane -traffic, and separates administrative traffic from tenant traffic. In order -to preserve this separatation of traffic, we will use static routes pointing -to supernets. This ensures all traffic to any subnet within a network will exit -via the interface attached to the local subnet in that network. It will be -important for the end user to implement ACLs in a routed network to prevent -remote access to networks that would be completely isolated in a shared L2 -deployment. - -Other End User Impact ---------------------- - -Deploying with spine-and-leaf will require additional parameters to -provide the routing information and multiple subnets required. This will have -to be documented. Furthermore, the validation scripts may need to be updated -to ensure that the configuration is validated, and that there is proper -connectivity between overcloud hosts. - -Performance Impact ------------------- - -Much of the traffic that is today made over layer 2 will be traversing layer -3 routing borders in this design. That adds some minimal latency and overhead, -although in practice the difference may not be noticeable. One important -consideration is that the routers must not be too overcommitted on their -uplinks, and the routers must be monitored to ensure that they are not acting -as a bottleneck, especially if complex access control lists are used. - -Other Deployer Impact ---------------------- - -A spine-and-leaf deployment will be more difficult to troubleshoot than a -deployment that simply uses a set of VLANs. The deployer may need to have -more network expertise, or a dedicated network engineer may be needed to -troubleshoot in some cases. - -Developer Impact ----------------- - -Spine-and-leaf is not easily tested in virt environments. This should be -possible, but due to the complexity of setting up libvirt bridges and -routes, we may want to provide a pre-configured quickstart environment -for testing. This may involve building multiple libvirt bridges -and routing between them on the Undercloud, or it may involve using a -DHCP relay on the virt-host as well as routing on the virt-host to simulate -a full routing switch. A plan for development and testing will need to be -developed, since not every developer can be expected to have a routed -environment to work in. It may take some time to develop a routed virtual -environment, so initial work will be done on bare metal. - -A separate blueprint will cover adding routed network support to -tripleo-quickstart. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - - Dan Sneddon - -Other assignees: - - Bob Fournier - - Harald Jensas - - Steven Hardy - - Dan Prince - -Approver(s) ------------ - -Primary approver: - Alex Schultz - -Work Items ----------- - -1. Implement support for DHCP on routed networks using DHCP relay, as - described in Problem #1 above. -2. Add parameters to Isolated Networking model in Heat to support supernet - routes for individual subnets, as described in Problem #3. -3. Modify Isolated Networking model in Heat to support multiple subnets, as - described in Problem #4. -4. Implement support for iptables on the Controller, in order to mitigate - the APIs potentially being reachable via remote routes, as described in - the Security Impact section. Alternatively, document the mitigation - procedure using ACLs on the routers. -5. Document the testing procedures. -6. Modify the documentation in tripleo-docs to cover the spine-and-leaf case. -7. Modify the Ironic-Inspector service to record the host-to-subnet mappings, - perhaps during introspection, to address Problem #2 (long-term). - - -Implementation Details ----------------------- - -Workflow: - -1. Operator configures DHCP networks and IP address ranges -2. Operator imports baremetal instackenv.json -3. When introspection or deployment is run, the DHCP server receives the DHCP - request from the baremetal host via DHCP relay -4. If the node has not been introspected, reply with an IP address from the - introspection pool* and the inspector PXE boot image -5. If the node already has been introspected, then the server assumes this is - a deployment attempt, and replies with the Neutron port IP address and the - overcloud-full deployment image -6. The Heat templates are processed which generate os-net-config templates, and - os-net-config is run to assign static IPs from the correct subnets, as well - as routes to other subnets via the router gateway addresses. - -When using spine-and-leaf, the DHCP server will need to provide an -introspection IP address on the appropriate subnet, depending on the -information contained in the DHCP relay packet that is forwarded by the segment -router. dnsmasq will automatically match the gateway address (GIADDR) of the -router that forwarded the request to the subnet where the DHCP request was -received, and will respond with an IP and gateway appropriate for that subnet. - -The above workflow for the DHCP server should allow for provisioning IPs on -multiple subnets. - -Dependencies -============ - -There may be a dependency on the Neutron Routed Networks. This won't be clear -until a full evaluation is done on whether we can represent spine-and-leaf -using only multiple subnets per network. - -There will be a dependency on routing switches that perform DHCP relay service -for production spine-and-leaf deployments. - -Testing -======= - -In order to properly test this framework, we will need to establish at least -one CI test that deploys spine-and-leaf. As discussed in this spec, it isn't -necessary to have a full routed bare metal environment in order to test this -functionality, although there is some work to get it working in virtual -environments such as OVB. - -For bare metal testing, it is sufficient to trunk all VLANs back to the -Undercloud, then run DHCP proxy on the Undercloud to receive all the -requests and forward them to br-ctlplane, where dnsmasq listens. This -will provide a substitute for routers running DHCP relay. For Neutron -DHCP, some modifications to the iptables rule may be required to ensure -that all DHCP requests from the overcloud nodes are received by the -DHCP proxy and/or the Neutron dnsmasq process running in the dhcp-agent -namespace. - -Documentation Impact -==================== - -The procedure for setting up a dev environment will need to be documented, -and a work item mentions this requirement. - -The TripleO docs will need to be updated to include detailed instructions -for deploying in a spine-and-leaf environment, including the environment -setup. Covering specific vendor implementations of switch configurations -is outside this scope, but a specific overview of required configuration -options should be included, such as enabling DHCP relay (or "helper-address" -as it is also known) and setting the Undercloud as a server to receive -DHCP requests. - -The updates to TripleO docs will also have to include a detailed discussion -of choices to be made about IP addressing before a deployment. If supernets -are to be used for network isolation, then a good plan for IP addressing will -be required to ensure scalability in the future. - -References -========== - -.. [0] `Blueprint: TripleO Routed Networks for Deployments `_ -.. [2] `Spec: User-specifiable Control Plane IP on TripleO Routed Isolated Networks `_ -.. [3] `Review: Modify os-net-config to make changes without bouncing interface `_ -.. [4] `Review: Add support for node groups in NetConfigDataLookup `_ -.. [5] `[RFE] Create host-routes for routed networks (segments) `_ -.. [6] `[RFE] Extend attributes of Server and Port resource to client interface configuration data `_ -.. [7] `Allow setting network-segment on subnet update `_ -.. [8] `Allow updating the segment property of OS::Neutron::Subnet `_ -.. [9] `Add first_segment convenience attr to OS::Neutron::Net `_ diff --git a/specs/stein/upgrades-with-operating-system.rst b/specs/stein/upgrades-with-operating-system.rst deleted file mode 100644 index e7d17004..00000000 --- a/specs/stein/upgrades-with-operating-system.rst +++ /dev/null @@ -1,747 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================= -Major Upgrades Including Operating System Upgrade -================================================= - -https://blueprints.launchpad.net/tripleo/+spec/upgrades-with-os - -.. note:: - Abbreviation "OS" in this spec stands for "operating system", not - "OpenStack". - -So far all our update and upgrade workflows included doing minor -operating system updates (essentially a ``yum update``) on the -machines managed by TripleO. This will need to change as we can't stay -on a single OS release indefinitely -- we'll need to perform a major -OS upgrade. The intention is for the TripleO tooling to help with the -OS upgrade significantly, rather than leaving this task entirely to -the operator. - - -Problem Description -=================== - -We need to upgrade undercloud and overcloud machines to a new release -of the operating system. - -We would like to provide an upgrade procedure both for environments -where Nova and Ironic are managing the overcloud servers, and -"Deployed Server" environments where we don't have control over -provisioning. - -Further constraints are imposed by Pacemaker clusters: Pacemaker is -non-containerized, so it is upgraded via packages together with the -OS. While Pacemaker would be capable of a rolling upgrade, Corosync -also changes major version, and starts to rely on knet for the link -protocol layer, which is incompatible with previous version of -Corosync. This introduces additional complexity: we can't do OS -upgrade in a rolling fashion naively on machines which belong to the -Pacemaker cluster (controllers). - - -Proposed Change - High Level View -================================= - -The Pacemaker constraints will be addressed by performing a one-by-one -(though not rolling) controller upgrade -- temporarily switching to a -single-controller cluster on the new OS, and gradually upgrading the -rest. This will also require implementation of persistent OpenStack -data transfer from older to newer OS releases (to preserve uptime and -for easier recoverability in case of failure). - -We will also need to ensure that at least 2 ceph-mon services run at -all times, so ceph-mon services will keep running even after we switch -off Pacemaker and OpenStack on the 2 older controllers. - -We should scope two upgrade approaches: full reprovisioning, and -in-place upgrade via an upgrade tool. Each come with different -benefits and drawbacks. The proposed CLI workflows should ideally be -generic enough to allow picking the final preferred approach of -overcloud upgrade late in the release cycle. - -While the overcloud approach is still wide open, undercloud seems to -favor an in-place upgrade due to not having a natural place to persist -the data during reprovisioning (e.g. we can't assume overcloud -contains Swift services), but that could be overcome by making the -procedure somewhat more manual and shifting some tasks onto the -operator. - -The most viable way of achieving an in-place (no reprovisioning) -operating system upgrade currently seems to be `Leapp`_, "an app -modernization framework", which should include in-place upgrade -capabilites. - -Points in favor of in-place upgrade: - -* While some data will need to be persisted and restored regardless of - approach taken (to allow safe one-by-one upgrade), reprovisioning - may also require managing data which would otherwise persist on its - own during an in-place upgrade. - -* In-place upgrade allows using the same approach for Nova+Ironic and - Deployed Server environments. If we go with reprovisioning, on - Deployed Server environments the operator will have to reprovision - using their own tooling. - -* Environments with a single controller will need different DB - mangling procedure. Instead of ``system_upgrade_transfer_data`` step - below, their DB data will be included into the persist/restore - operations when reprovisioning the controller. - -Points in favor of reprovisioning: - -* Not having to integrate with external in-place upgrade tool. E.g. in - case of CentOS, there's currently not much info available about - in-place upgrade capabilities. - -* Allows to make changes which wouldn't otherwise be possible, - e.g. changing a filesystem. - -* Reprovisioning brings nodes to a clean state. Machines which are - continuously upgraded without reprovisioining can potentially - accumulate unwanted artifacts, resulting in increased number of - problems/bugs which only appear after an upgrade, but not on fresh - deployments. - - -Proposed Change - Operator Workflow View -======================================== - -The following is an example of expected upgrade workflow in a -deployment with roles: **ControllerOpenstack, Database, Messaging, -Networker, Compute, CephStorage**. It's formulated in a -documentation-like manner so that we can best imagine how this is -going to work from operator's point of view. - - -Upgrading the Undercloud ------------------------- - -The in-place undercloud upgrade using Leapp will likely consist of the -following steps. First, prepare for OS upgrade via Leapp, downloading -the necessary packages:: - - leapp upgrade - -Then reboot, which will upgrade the OS:: - - reboot - -Then run the undercloud upgrade, which will bring back the undercloud -services (using the newer OpenStack release):: - - openstack tripleo container image prepare default \ - --output-env-file containers-prepare-parameter.yaml - openstack undercloud upgrade - -If we wanted or needed to upgrade the undercloud via reprovisioning, -we would use a `backup and restore`_ procedure as currently -documented, with restore perhaps being utilized just partially. - - -Upgrading the Overcloud ------------------------ - -#. **Update the Heat stack**, generate Heat outputs for building - upgrade playbooks:: - - openstack overcloud upgrade prepare - - Notes: - - * Among the ```` should be - ``containers-prepare-parameter.yaml`` bringing in the containers - of newer OpenStack release. - -#. **Prepare an OS upgrade on one machine from each of the - "schema-/cluster-sensitive" roles**:: - - openstack overcloud upgrade run \ - --tags system_upgrade_prepare \ - --limit controller-openstack-0,database-0,messaging-0 - - Notes: - - * This stops all services on the nodes selected. - - * For external installers like Ceph, we'll have a similar - external-upgrade command, which can e.g. remove the nodes from - the Ceph cluster:: - - openstack overcloud external-upgrade run \ - --tags system_upgrade_prepare \ - -e system_upgrade_nodes=controller-openstack-0,database-0,messaging-0 - - * If we use in-place upgrade: - - * This will run the ``leapp upgrade`` command. It should use - newer OS and newer OpenStack repos to download packages, and - leave the node ready to reboot into the upgrade process. - - * Caution: Any reboot after this is done on a particular node - will cause that node to automatically upgrade to newer OS. - - * If we reprovision: - - * This should persist node's important data to the - undercloud. (Only node-specific data. It would not include - e.g. MariaDB database content, which would later be transferred - from one of the other controllers instead.) - - * Services can export their ``upgrade_tasks`` to do the - persistence, we should provide an Ansible module or role to - make it DRY. - -#. **Upload new overcloud base image**:: - - openstack overcloud image upload --update-existing \ - --image-path /home/stack/new-images - - Notes: - - * For Nova+Ironic environments only. After this step any new or - reprovisioned nodes will receive the new OS. - -#. **Run an OS upgrade on one node from each of the - "schema-/cluster-sensitive" roles** or **reprovision those nodes**. - - Only if we do reprovisioning:: - - openstack server rebuild controller-openstack-0 - openstack server rebuild database-0 - openstack server rebuild messaging-0 - - openstack overcloud admin authorize \ - --overcloud-ssh-user \ - --overcloud-ssh-key \ - --overcloud-ssh-network \ - --limit controller-openstack-0,database-0,messaging-0 - - Both reprovisioning and in-place:: - - openstack overcloud upgrade run \ - --tags system_upgrade_run \ - --limit controller-openstack-0,database-0,messaging-0 - - Notes: - - * This step either performs a reboot of the nodes and lets Leapp - upgrade them to newer OS, or reimages the nodes with a fresh new - OS image. After they come up, they'll have newer OS but no - services running. The nodes can be checked before continuing. - - * In case of reprovisioning: - - * The ``overcloud admin authorize`` will ensure existence of - ``tripleo-admin`` user and authorize Mistral's ssh keys for - connection to the newly provisioned nodes. The - ``--overcloud-ssh-*`` work the same as for ``overcloud - deploy``. - - * The ``--tags system_upgrade_run`` is still necessary because it - will restore the node-specific data from the undercloud. - - * Services can export their ``upgrade_tasks`` to do the - restoration, we should provide an Ansible module or role to - make it DRY. - - * Ceph-mon count is reduced by 1 (from 3 to 2 in most - environments). - - * Caution: This will have bad consequences if run by accident on - unintended nodes, e.g. on all nodes in a single role. If - possible, it should refuse to run if --limit is not specified. If - possible further, it should refuse to run if a full role is - included, rather than individual nodes. - -#. **Stop services on older OS and transfer data to newer OS**:: - - openstack overcloud external-upgrade run \ - --tags system_upgrade_transfer_data \ - --limit ControllerOpenstack,Database,Messaging - - Notes: - - * **This is where control plane downtime starts.** - - * Here we should: - - * Detect which nodes are on older OS and which are on newer OS. - - * Fail if we don't find *at least one* older OS and *exactly - one* newer OS node in each role. - - * On older OS nodes, stop all services except ceph-mon. (On newer - node, no services are running yet.) - - * Transfer data from *an* older OS node (simply the first one in - the list we detect, or do we need to be more specific?) to - *the* newer OS node in a role. This is probably only going to - do anything on the Database role which includes DBs, and will - be a no-op for others. - - * Services can export their ``external_upgrade_tasks`` for the - persist/restore operations, we'll provide an Ansible module or - role to make it DRY. The transfer will likely go via undercloud - initially, but it would be nice to make it direct in order to - speed it up. - -#. **Run the usual upgrade tasks on the newer OS nodes**:: - - openstack overcloud upgrade run \ - --limit controller-openstack-0,database-0,messaging-0 - - Notes: - - * **Control plane downtime stops at the end of this step.** This - means the control plane downtime spans two commands. We should - *not* make it one command because the commands use different - parts of upgrade framework underneath, and the separation will - mean easier re-running of individual parts, should they fail. - - * Here we start pcmk cluster and all services on the newer OS - nodes, using the data previously transferred from the older OS - nodes. - - * Likely we won't need any special per-service upgrade tasks, - unless we discover we need some data conversions or - adjustments. The node will be with all services stopped after - upgrade to newer OS, so likely we'll be effectively "setting up a - fresh cloud on pre-existing data". - - * Caution: At this point the newer OS nodes became the authority on - data state. Do not re-run the previous data transfer step after - services have started on newer OS nodes. - - * (Currently ``upgrade run`` has ``--nodes`` and ``--roles`` which - both function the same, as Ansible ``--limit``. Notably, nothing - stops you from passing role names to ``--nodes`` and vice - versa. Maybe it's time to retire those two and implement - ``--limit`` to match the concept from Ansible closely.) - -#. **Perform any service-specific && node-specific external upgrades, - most importantly Ceph**:: - - openstack overcloud external-upgrade run \ - --tags system_upgrade_run \ - -e system_upgrade_nodes=controller-openstack-0,database-0,messaging-0 - - Notes: - - * Ceph-ansible here runs on a single node and spawns a new version - of ceph-mon. Per-node run capability will need to be added to - ceph-ansible. - - * Ceph-mon count is restored here (in most environments, it means - going from 2 to 3). - -#. **Upgrade the remaining control plane nodes**. Perform all the - previous control plane upgrade steps for the remaining controllers - too. Two important notes here: - - * **Do not run the ``system_upgrade_transfer_data`` step anymore.** - The remaining controllers are expected to join the cluster and - sync the database data from the primary controller via DB - replication mechanism, no explicit data transfer should be - necessary. - - * To have the necessary number of ceph-mons running at any given - time (often that means 2 out of 3), the controllers (ceph-mon - nodes) should be upgraded one-by-one. - - After this step is finished, all of the nodes which are sensitive - to Pacemaker version or DB schema version should be upgraded to - newer OS, newer OpenStack, and newer ceph-mons. - -#. **Upgrade the rest of the overcloud nodes** (Compute, Networker, - CephStorage), **either one-by-one or in batches**, depending on - uptime requirements of particular nodes. E.g. for computes this - would mean evacuating and then also running:: - - openstack overcloud upgrade run \ - --tags system_upgrade_prepare \ - --limit novacompute-0 - - openstack overcloud upgrade run \ - --tags system_upgrade_run \ - --limit novacompute-0 - - openstack overcloud upgrade run \ - --limit novacompute-0 - - - Notes: - - * Ceph OSDs can be removed by the ``external-upgrade run --tags - system_upgrade_prepare`` step before reprovisioning, and after - ``upgrade run`` command, ceph-ansible can recreate the OSD via - the ``external-upgrade run --tags system_upgrade_run`` step, - always limited to the OSD being upgraded:: - - # Remove OSD - openstack overcloud external-upgrade run \ - --tags system_upgrade_prepare \ - -e system_upgrade_nodes=novacompute-0 - - # <> - - # Re-deploy OSD - openstack overcloud external-upgrade run \ - --tags system_upgrade_run \ - -e system_upgrade_nodes=novacompute-0 - -#. **Perform online upgrade** (online data migrations) after all nodes - have been upgraded:: - - openstack overcloud external-upgrade run \ - --tags online_upgrade - -#. **Perfrom upgrade converge** to re-assert the overcloud state:: - - openstack overcloud upgrade converge - -#. **Clean up upgrade data persisted on undercloud**:: - - openstack overcloud external-upgrade run \ - --tags system_upgrade_cleanup - - -Additional notes on data persist/restore ----------------------------------------- - -* There are two different use cases: - - * Persistence for things that need to survive reprovisioning (for - each node) - - * Transfer of DB data from node to node (just once to bootstrap the - first new OS node in a role) - -* The `synchronize Ansible module`_ shipped with Ansible seems - fitting, we could wrap it in a role to handle common logic, and - execute the role via ``include_role`` from - ``upgrade_tasks``. - -* We would persist the temporary data on the undercloud under a - directory accessible only by the user which runs the upgrade - playbooks (``mistral`` user). The root dir could be - ``/var/lib/tripleo-upgrade`` and underneath would be subdirs for - individual nodes, and one more subdir level for services. - - * (Undercloud's Swift also comes to mind as a potential place for - storage. However, it would probably add more complexity than - benefit.) - -* **The data persist/restore operations within the upgrade do not - supplement or replace backup/restore procedures which should be - performed by the operator, especially before upgrading.** The - automated data persistence is solely for upgrade purposes, not for - disaster recovery. - - -Alternatives ------------- - -* **Parallel cloud migration.** We could declare the in-place upgrade - of operating system + OpenStack as too risky and complex and time - consuming, and recommend standing up a new cloud and transferring - content to it. However, this brings its own set of challenges. - - This option is already available for anyone whose environment is - constrained such that normal upgrade procedure is not realistic, - e.g. in case of extreme uptime requirements or extreme risk-aversion - environments. - - Implementing parallel cloud migration is probably best handled on a - per-environment basis, and TripleO doesn't provide any automation in - this area. - -* **Upgrading the operating system separately from OpenStack.** This - would simplify things on several fronts, but separating the - operating system upgrade while preserving uptime (i.e. upgrading the - OS in a rolling fashion node-by-node) currently seems not realistic - due to: - - * The pacemaker cluster (corosync) limitations mentioned earlier. We - would have to containerize Pacemaker (even if just ad-hoc - non-productized image). - - * Either we'd have to make OpenStack (and dependencies) compatible - with OS releases in a way we currently do not intend, or at least - ensure such compatibility when running containerized. E.g. for - data transfer, we could then probably use Galera native - replication. - - * OS release differences might be too large. E.g. in case of - differing container runtimes, we might have to make t-h-t be able - to deploy on two runtimes within one deployment. - -* **Upgrading all control plane nodes at the same time as we've been - doing so far.** This is not entirely impossible, but rebooting all - controllers at the same time to do the upgrade could mean total - ceph-mon unavailability. Also given that the upgraded nodes are - unreachable via ssh for some time, should something go wrong and the - nodes got stuck in that state, it could be difficult to recover back - into a working cloud. - - This is probably not realistic, mainly due to concerns around Ceph - mon availability and risk of bricking the cloud. - - -Security Impact ---------------- - -* How we transfer data from older OS machines to newer OS machines is - a potential security concern. - -* The same security concern applies for per-node data persist/restore - procedure in case we go with reprovisioning. - -* The stored data may include overcloud node's secrets and should be - cleaned up from the undercloud when no longer needed. - -* In case of using the `synchronize Ansible module`_: it uses rsync - over ssh, and we would store any data on undercloud in a directory - only accessible by the same user which runs the upgrade playbooks - (``mistral``). This undercloud user has full control over overcloud - already, via ssh keys authorized for all management operations, so - this should not constitute a significant expansion of ``mistral`` - user's knowledge/capabilities. - - -Upgrade Impact --------------- - -* The upgrade procedure is riskier and more complex. - - * More things can potentially go wrong. - - * It will take more time to complete, both manually and - automatically. - -* Given that we upgrade one of the controllers while the other two are - still running, the control plane services downtime could be slightly - shorter than before. - -* When control plane services are stopped on older OS machines and - running on newer OS machine, we create a window without high - availability. - -* Upgrade framework might need some tweaks but on high level it seems - we'll be able to fit the workflow into it. - -* All the upgrade steps should be idempotent, rerunnable and - recoverable as much as we can make them so. - - -Other End User Impact ---------------------- - -* Floating IP availability could be affected. Neutron upgrade - procedure typically doesn't immediately restart sidecar containers - of L3 agent. Restarting will be a must if we upgrade the OS. - - -Performance Impact ------------------- - -* When control plane services are stopped on older OS machines and - running on newer OS machine, only one controller is available to - serve all control plane requests. - -* Depending on role/service composition of the overcloud, the reduced - throughput could also affect tenant traffic, not just control plane - APIs. - - -Other Deployer Impact ---------------------- - -* Automating such procedure introduces some code which had better not - be executed by accident. The external upgrade tasks which are tagged - ``system_upgrade_*`` should also be tagged ``never``, so that they - only run when explicitly requested. - -* For the data transfer step specifically, we may also introduce a - safety "flag file" on the target overcloud node, which would prevent - re-running of the data transfer until the file is manually removed. - - -Developer Impact ----------------- - -Developers who work on specific composable services in TripleO will -need to get familiar with the new upgrade workflow. - - -Main Risks ----------- - -* Leapp has been somewhat explored but its viability/readiness for our - purpose is still not 100% certain. - -* CI testing will be difficult, if we go with Leapp it might be - impossible (more below). - -* Time required to implement everything may not fit within the release - cycle. - -* We have some idea how to do the data persist/restore/transfer parts, - but some prototyping needs to be done there to gain confidence. - -* We don't know exactly what data needs to be persisted during - reprovisioning. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignees:: - | jistr, chem, jfrancoa - -Other contributors:: - | fultonj for Ceph - - -Work Items ----------- - -With aditional info in format: (how much do we know about this task, -estimate of implementation difficulty). - -* (semi-known, est. as medium) Change tripleo-heat-templates + - puppet-tripleo to be able to set up a cluster on just one controller - (with newer OS) while the Heat stack knows about all - controllers. This is currently not possible. - -* (semi-known, est. as medium) Amend upgrade_tasks to work for - Rocky->Stein with OS upgrade. - -* ``system_upgrade_transfer_data``: - - * (unknown, est. as easy) Detect upgraded vs. unupgraded machines to - transfer data to/from. - - * (known, est. as easy) Stop all services on the unupgraded machines - transfer data to/from. (Needs to be done via external upgrade - tasks which is new, but likely not much different from what we've - been doing.) - - * (semi-known, est. as medium/hard) Implement an Ansible role for - transferring data from one node to another via undercloud. - - * (unknown, est. as medium) Figure out which data needs transferring - from old controller to new, implement it using the above Ansible - role -- we expect only MariaDB to require this, any special - services should probably be tackled by service squads. - -* (semi-known, est. as medium/hard) Implement Ceph specifics, mainly - how to upgrade one node (mon, OSD, ...) at a time. - -* (unknown, either easy or hacky or impossible :) ) Implement - ``--limit`` for ``external-upgrade run``. (As external upgrade runs - on undercloud by default, we'll need to use ``delegate_to`` or - nested Ansible for overcloud nodes. I'm not sure how well --limit - will play with this.) - -* (known, est. as easy) Change update/upgrade CLI from ``--nodes`` - and ``--roles`` to ``--limit``. - -* (semi-known, est. as easy/medium) Add ``-e`` variable pass-through - support to ``external-upgrade run``. - -* (unknown, unknown) Test as much as we can in CI -- integrate with - tripleo-upgrade and OOOQ. - -* For reprovisioning: - - * (semi-known, est. as medium) Implement ``openstack overcloud admin - authorize``. Should take ``--stack``, ``--limit``, - ``--overcloud-ssh-*`` params. - - * (semi-known, est. as medium/hard) Implement an Ansible role for - temporarily persisting overcloud nodes' data on the undercloud and - restoring it. - - * (known, est. as easy) Implement ``external-upgrade run --tags - system_upgrade_cleanup``. - - * (unknown, est. as hard in total, but should probably be tackled by - service squads) Figure out which data needs persisting for - particular services and implement the persistence using the above - Ansible role. - -* For in-place: - - * (semi-known, est. as easy) Calls to Leapp in - ``system_upgrade_prepare``, ``system_upgrade_run``. - - * (semi-known, est. as medium) Implement a Leapp actor to set up or - use the repositories we need. - -Dependencies -============ - -* For in-place: Leapp tool being ready to upgrade the OS. - -* Changes to ceph-ansible might be necessary to make it possible to - run it on a single node (for upgrading mons and OSDs node-by-node). - - -Testing -======= - -Testing is one of the main estimated pain areas. This is a traditional -problem with upgrades, but it's even more pronounced for OS upgrades. - -* Since we do all the OpenStack infra cloud testing of TripleO on - CentOS 7 currently, it would make sense to test an upgrade to - CentOS 8. However, CentOS 8 is nonexistent at the time of writing. - -* It is unclear when Leapp will be ready for testing an upgrade from - CentOS 7, and it's probably the only thing we'd be able to execute - in CI. The ``openstack server rebuild`` alternative is probably not - easily executable in CI, at least not in OpenStack infra clouds. We - might be able to emulate reprovisioning by wiping data. - -* Even if we find a way to execute the upgrade in CI, it might still - take too long to make the testing plausible for validating patches. - - -Documentation Impact -==================== - -Upgrade docs will need to be amended, the above spec is written mainly -from the perspective of expected operator workflow, so it should be a -good starting point. - - -References -========== - -* `Leapp`_ - -* `Leapp actors`_ - -* `Leapp architecture`_ - -* `Stein PTG etherpad`_ - -* `backup and restore`_ - -* `synchronize Ansible module`_ - -.. _Leapp: https://leapp-to.github.io/ -.. _Leapp actors: https://leapp-to.github.io/actors -.. _Leapp architecture: https://leapp-to.github.io/architecture -.. _Stein PTG etherpad: https://etherpad.openstack.org/p/tripleo-ptg-stein -.. _backup and restore: http://tripleo.org/install/controlplane_backup_restore/00_index.html -.. _synchronize Ansible module: https://docs.ansible.com/ansible/latest/modules/synchronize_module.html diff --git a/specs/stein/validation-framework.rst b/specs/stein/validation-framework.rst deleted file mode 100644 index 8f8770ce..00000000 --- a/specs/stein/validation-framework.rst +++ /dev/null @@ -1,279 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================================= -Provide a common Validation Framework inside python-tripleoclient -================================================================= - -https://blueprints.launchpad.net/tripleo/+spec/validation-framework - -Currently, we're lacking a common validation framework in tripleoclient. This -framework should provide an easy way to validate environment prior deploy and -prior update/upgrade, on both undercloud and overcloud. - -Problem Description -=================== - -Currently, we have two types of validations: - -* Those launched prior the undercloud deploy, embedded into the deploy itself - -* Those launched at will via a Mistral Workflow - -There isn't any unified way to call any validations by itself in an easy way, -and we lack the capacity to easily add new validations for the undercloud -preflight checks. - -The current situation is not optimal, as the operator must go in the UI in order -to run validations - there is a way to run them from the CLI, using the exact -same workflows as the UI. This can't be used in order to get proper preflight -validations, especially when we don't get a working Mistral (prior the -undercloud deploy, or with all-on-one/standalone). - -Moreover, there is a need to make the CLI and UI converge. The latter already -uses the full list of validations. Adding the full support of -tripleo-validations to the CLI will improve the overall quality, usability and -maintenance of the validations. - -Finally, a third type should be added: service validations called during the -deploy itself. This doesn't directly affect the tripleoclient codebase, but -tripleo-heat-templates. - -Proposed Change -=============== - -Overview --------- - -In order to improve the current situation, we propose to create a new -"branching" in the tripleoclient commands: `openstack tripleo validator` - -This new subcommand will allow to list and run validations in an independent -way. - -Doing so will allow to get a clear and clean view on the validations we can run -depending on the stage we're in. - -(Note: the subcommand has yet to be defined - this is only a "mock-up".) - -The following subcommands should be supported: - -* ``openstack tripleo validator list``: will display all the available - validations with a small description, like "validate network capabilities on - undercloud" - -* ``openstack tripleo validator run``: will run the validations. Should take - options, like: - - * ``--validation-name``: run only the passed validation. - * ``--undercloud``: runs all undercloud-related validations - * ``--overcloud``: runs all overcloud-related validations - * ``--use-mistral``: runs validations through Mistral - * ``--use-ansible``: runs validations directly via Ansible - * ``--plan``: allows to run validations against specific plan. Defaults to - $TRIPLEO_PLAN_NAME or "overcloud" - -* in addition, common options for all the subcommands: - - * ``--extra-roles``: path to a local directory containing validation - roles maintained by the operator, or swift directory containing extra - validation roles. - * ``--output``: points to a valid Ansible output_callback, such as the native - *json*, or custom *validation_output*. The default one should be the latter - as it renders a "human readable" output. More callbacks can be added later. - -The ``--extra-roles`` must support both local path and remote swift -container, since the custom validation support will push any validation to a -dedicated swift directory. - -The default engine will be determined by the presence of Mistral: if Mistral is -present and accepting requests (meaning the Undercloud is most probably -deployed), the validator has to use it by default. If no Mistral is present, it -must fallback on the ansible-playbook. - -The validations should be in the form of Ansible roles, in order to be -easily accessed from Mistral as well (as it is currently the case). It will -also allow to get a proper documentation, canvas and gives the possibility to -validate the role before running it (ensuring there are metadata, output, -and so on). - -We might also create some dedicated roles in order to make a kind of -"self validation", ensuring we actually can run the validations (network, -resources, and so on). - -The UI uses Mistral workflows in order to run the validations - the CLI must -be able to use those same workflows of course, but also run at least some -validations directly via ansible, especially when we want to validate the -undercloud environment before we even deploy it. - -Also, in order to avoid Mistral modification, playbooks including validation -roles will be created. - -In the end, all the default validation roles should be in one and only one -location: tripleo-validations. The support for "custom validations" being added, -such custom validation should also be supported (see references for details). - -In order to get a proper way to "aim" the validations, proper validation groups -must be created and documented. Of course, one validation can be part of -multiple groups. - -In addition, a proper documentation with examples describing the Good Practices -regarding the roles content, format and outputs should be created. - -For instance, a role should contain a description, a "human readable error -output", and if applicable a possible solution. - -Proper testing for the default validations (i.e. those in tripleo-validations) -might be added as well in order to ensure a new validation follows the Good -Practices. - -We might want to add support for "nagios-compatible outputs" and exit codes, -but it is not sure running those validations through any monitoring tool is a -good idea due to the possible load it might create. This has to be discussed -later, once we get the framework in place. - -Alternatives ------------- - -No real alternatives in fact. Currently, we have many ways to validate, but -they are all unrelated, not concerted. If we don't provide a unified framework, -we will get more and more "side validations ways" and it won't be maintainable. - -Security Impact ---------------- - -Rights might be needed for some validations - they should be added accordingly -in the system sudoers, in a way that limits unwanted privilege escalations. - - -Other End User Impact ---------------------- - -The end user will get a proper way to validate the environment prior to any -action. -This will give more confidence in the final product, and ease the update and -upgrade processes. - -It will also provide a good way to collect information about the systems in -case of failures. - -If a "nagios-compatible output" is to be created (mix of ansible JSON output, -parsing and compatibility stuff), it might provide a way to get a daily report -about the health of the stack - this might be a nice feature, but not in the -current scope (will need a new stdout_callback for instance). - -Performance Impact ------------------- - -The more validations we get, the more time it might take IF we decide to run -them by default prior any action. - -The current way to disable them, either with a configuration file or a CLI -option will stay. - -In addition, we can make a great use of "groups" in order to filter out greedy -validations. - - -Other Deployer Impact ---------------------- - -Providing a CLI subcommand for validation will make the deployment easier. - -Providing a unified framework will allow an operator to run the validations -either from the UI, or from the CLI, without any surprise regarding the -validation list. - -Developer Impact ----------------- - -A refactoring will be needed in python-tripleoclient and probably in -tripleo-common in order to get a proper subcommand and options. - -A correct way to call Ansible from Python is to be decided (ansible-runner?). - -A correct way to call Mistral workflows from the CLI is to be created if it -does not already exist. - -In the end, the framework will allow other Openstack projects to push their own -validations, since they are the ones knowing how and what to validate in the -different services making Openstack. - -All validations will be centralized in the tripleo-validations repository. -This means we might want to create a proper tree in order to avoid having -100+ validations in the same directory. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - cjeanner - -Other contributors: - akrivoka - ccamacho - dpeacock - florianf - - -Work Items ----------- - -* List current existing validations in both undercloud_preflight.py and - openstack-tripleo-validations. - -* Decide if we integrate ansible-runner as a dependency (needs to be packaged). - -* Implement the undercloud_preflight validations as Ansible roles. - -* Implement a proper way to call Ansible from the tripleoclient code. - -* Implement support for a configuration file dedicated for the validations. - -* Implement the new subcommand tree in tripleoclient. - -* Validate, Validate, Validate. - - -Dependencies -============ - -* Ansible-runner: https://github.com/ansible/ansible-runner - -* Openstack-tripleo-validations: https://github.com/openstack/tripleo-validations - - - -Testing -======= - -The CI can't possibly provide the "right" environment with all the requirements. -The code has to implement a way to configure the validations so that the CI -can override the *productive* values we will set in the validations. - - -Documentation Impact -==================== - -A new entry in the documentation must be created in order to describe this new -framework (for the devs) and new subcommand (for the operators). - -References -========== - -* http://lists.openstack.org/pipermail/openstack-dev/2018-July/132263.html - -* https://bugzilla.redhat.com/show_bug.cgi?id=1599829 - -* https://bugzilla.redhat.com/show_bug.cgi?id=1601739 - -* https://review.openstack.org/569513 (custom validation support) - -* https://docs.openstack.org/tripleo-docs/latest/install/validations/validations.html diff --git a/specs/stein/zero-footprint-installer.rst b/specs/stein/zero-footprint-installer.rst deleted file mode 100644 index 423d5620..00000000 --- a/specs/stein/zero-footprint-installer.rst +++ /dev/null @@ -1,127 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================ -TripleO Zero Footprint Installer -================================ - -https://blueprints.launchpad.net/tripleo/+spec/zero-footprint - -This spec introduces support for an installer mode which has zero -(or at least much less) dependencies than we do today. It is meant -to be an iteration of the Undercloud and All-In-One (standalone) -installers that allows you to end up with the same result without -having to install all of the TripleO dependencies on your host machine. - -Problem Description -=================== - -Installing python-tripleoclient on a host machine currently installs -a lot of dependencies many of which may be optional for smaller -standalone type installations. Users of smaller standalone installations -can have a hard time understanding the differences between what TripleO -dependencies get installed vs which services TripleO installs. - -Additionally, some developers would like a fast-track way to develop and -run playbooks without requiring local installation of an Undercloud which -in many cases is done inside a virtual machine to encapsulate the dependencies -that get installed. - -Proposed Change -=============== - -A new zero footprint installer can help drive OpenStack Tripleoclient -commands running within a container. Using this approach you can: - -1. Generate Ansible playbooks from a set of Heat templates - (tripleo-heat-templates), Heat environments, and Heat parameters - exactly like we do today using a Container. No local dependencies - would be required to generate the playbooks. - -2. (optionally) Execute the playbooks locally on the host machine. This would - require some Ansible modules to be installed that TripleO depends on but - is a much smaller footprint than what we require elsewhere today. - -Alternatives ------------- - -Create a subpackage of python-tripleoclient which installs less dependencies. -The general footprint of required packages would still be quite high (lots -of OpenStack packages will still be installed for the client tooling). - -Or do nothing and continue to use VMs to encapsulate the dependencies for -an Undercloud/All-In-One installer and generate Ansible playbooks. Setting -up a local VM requires more initial setup and dependencies however and is -heavier than just using a local container to generate the same playbooks. - -Security Impact ---------------- - -As a container will be used to generate Ansible playbooks the user may -need to expose some local data/files to the installer container. This is -likely a minimal concern as we already require this data to be exposed to -the Undercloud and All-In-One installers. - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -Faster deployment and testing of local All-On-One setups. - -Other Deployer Impact ---------------------- - -None - - -Developer Impact ----------------- - -Faster deployment and testing of local All-On-One setups. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - dprince - -Work Items ----------- - -* A new 'tripleoclient' container -* New project to drive the installation (Talon?) -* Continue to work on refining the Ansible playbook modules to provide a - cleaner set of playbook dependencies. Specifically those that depend on - the any of the traditional TripleO/Heat agent hooks and scripts. -* documentation updates - -Dependencies -============ - -None. - -Testing -======= - -This new installer can likely suppliment or replace some of the testing we -are doing for All-In-One (standalone) deployments in upstream CI. - -Documentation Impact -==================== - -Docs will need to be updated. - -References -========== - -None diff --git a/specs/template.rst b/specs/template.rst deleted file mode 100644 index 5a403f75..00000000 --- a/specs/template.rst +++ /dev/null @@ -1,226 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Example Spec - The title of your blueprint -========================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo - -Introduction paragraph -- why are we doing anything? A single paragraph of -prose that operators can understand. - -Some notes about using this template: - -* Your spec should be in ReSTructured text, like this template. - -* Please wrap text at 80 columns. - -* The filename in the git repository should match the launchpad URL, for - example a URL of: https://blueprints.launchpad.net/tripleo/+spec/awesome-thing - should be named awesome-thing.rst - -* Please do not delete any of the sections in this template. If you have - nothing to say for a whole section, just write: None - -* For help with syntax, see http://sphinx-doc.org/rest.html - -* To test out your formatting, build the docs using tox, or see: - http://rst.ninjs.org - - -Problem Description -=================== - -A detailed description of the problem: - -* For a new feature this might be use cases. Ensure you are clear about the - actors in each use case: End User vs Deployer - -* For a major reworking of something existing it would describe the - problems in that feature that are being addressed. - - -Proposed Change -=============== - -Overview --------- - -Here is where you cover the change you propose to make in detail. How do you -propose to solve this problem? - -If this is one part of a larger effort make it clear where this piece ends. In -other words, what's the scope of this effort? - -Alternatives ------------- - -What other ways could we do this thing? Why aren't we using those? This doesn't -have to be a full literature review, but it should demonstrate that thought has -been put into why the proposed solution is an appropriate one. - -Security Impact ---------------- - -Describe any potential security impact on the system. Some of the items to -consider include: - -* Does this change touch sensitive data such as tokens, keys, or user data? - -* Does this change involve cryptography or hashing? - -* Does this change require the use of sudo or any elevated privileges? - -* Does this change involve using or parsing user-provided data? This could - be directly at the API level or indirectly such as changes to a cache layer. - -* Can this change enable a resource exhaustion attack, such as allowing a - single API interaction to consume significant server resources? Some examples - of this include launching subprocesses for each connection, or entity - expansion attacks in XML. - -For more detailed guidance, please see the OpenStack Security Guidelines as -a reference (https://wiki.openstack.org/wiki/Security/Guidelines). These -guidelines are a work in progress and are designed to help you identify -security best practices. For further information, feel free to reach out -to the OpenStack Security Group at openstack-security@lists.openstack.org. - -Upgrade Impact --------------- - -Describe potential upgrade impact on the system. - -* Is this change meant to become the default for deployments at some - point in the future? How do we migrate existing deployments to that - feature? - -* Can the system be upgraded to this feature using the upgrade hooks - provided by the composable services framework? - -* Describe any plans to deprecate configuration values or - features. (For example, if we change the directory name that - instances are stored in, how do we handle instance directories - created before the change landed? Do we move them? Do we have a - special case in the code? Do we assume that the operator will - recreate all the instances in their cloud?) - -* Please state anything that operators upgrading from the previous - release need to be aware of. Do they need to perform extra manual - operations? - -Other End User Impact ---------------------- - -Are there ways a user will interact with this feature? - -Performance Impact ------------------- - -Describe any potential performance impact on the system, for example -how often will new code be called, and is there a major change to the calling -pattern of existing code. - -Examples of things to consider here include: - -* A small change in a utility function or a commonly used decorator can have a - large impacts on performance. - -Other Deployer Impact ---------------------- - -Discuss things that will affect how you deploy and configure OpenStack -that have not already been mentioned, such as: - -* What config options are being added? Should they be more generic than - proposed (for example a flag that other hypervisor drivers might want to - implement as well)? Are the default values ones which will work well in - real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -Developer Impact ----------------- - -Discuss things that will affect other developers working on OpenStack. - - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -* Include specific references to specs and/or blueprints in tripleo, or in other - projects, that this one either depends on or is related to. - -* If this requires functionality of another project that is not currently used - by Tripleo (such as the glance v2 API when we previously only required v1), - document that fact. - -* Does this feature require any new library dependencies or code otherwise not - included in OpenStack? Or does it depend on a specific version of library? - - -Testing -======= - -Please discuss how the change will be tested. - -Is this untestable in CI given current limitations (specific hardware / -software configurations available)? If so, are there mitigation plans (3rd -party testing, gate enhancements, etc). - - -Documentation Impact -==================== - -What is the impact on the docs? Don't repeat details discussed above, but -please reference them here. - - -References -========== - -Please add any useful references here. You are not required to have any -reference. Moreover, this specification should still make sense when your -references are unavailable. Examples of what you could include are: - -* Links to mailing list or IRC discussions - -* Links to notes from a summit session - -* Links to relevant research, if appropriate - -* Related specifications as appropriate (e.g. if it's an EC2 thing, link the EC2 docs) - -* Anything else you feel it is worthwhile to refer to diff --git a/specs/train/certificate-management.rst b/specs/train/certificate-management.rst deleted file mode 100644 index a973b791..00000000 --- a/specs/train/certificate-management.rst +++ /dev/null @@ -1,197 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -===================================================== -Move certificate management in tripleo-heat-templates -===================================================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/ansible-certmonger - -Problem Description -=================== - -There are multiple issues with the current way certificates are managed with -Puppet and Certmonger, especially in a containerized environment: - -* Multiple containers are using the same certificate -* There isn't any easy way to find out which container needs to be restarted - upon certificate renewal -* Shared certificates are bad - -The main issue now is the use of "pkill", especially for httpd services. Since -Certmonger has no knowledge of what container has an httpd service running, -it uses a wide fly swatter in the hope all related services will effectively -be reloaded with the new certificate. - -The usage of "pkill" by Certmonger is prevented on a SELinux enforcing host. - -Proposed Change -=============== - -Introduction ------------- - -While the use of certmonger isn't in question, the way we're using it is. - -The goal of this document is to describe how we could change that usage, -allowing to provide a better security, while allowing Certmonger to restart -only the needed containers in an easy fashion. - -Implement certmonger in Ansible -------------------------------- - -A first step will be to implement a certmonger "thing" in Ansible. There are -two ways to do that: - -* Reusable role -* Native Ansible module - -While the first one is faster to implement, the second would be better, since -it will allow to provide a clean way to manage the certificates. - -Move certificate management to tripleo-heat-templates ------------------------------------------------------ - -Once we have a way to manage Certmonger within Ansible, we will be able to move -calls directly in relevant tripleo-heat-templates files, allowing to generate -per-container certificate. - -Doing so will also allow Certmonger to know exactly which container to -restart upon certificate renewal, using a simple "container_cli kill" command. - -Alternatives -============ - -One alternative is proposed - -Maintain a list ---------------- - -We could maintain the code as-is, and just add a list for the containers -needing a restart/reload. Certmonger would loop on that list, and do its -job upon certificate renewal. - -This isn't a good solution, since the list will eventually lack updates, and -this will create new issues instead of solving the current ones. - -Also, it doesn't allow to get per-container certificate, which is bad. - -Proposed roadmap -================ - -In Stein: - -* Create "tripleo-certmonger" Ansible reusable role in tripleo-common - -In Train: - -* Move certificate management/generation within tripleo-heat-templates. -* Evaluate the benefices of moving to a proper Ansible module for Certmonger. -* If evaluation is good and we have time, implement it and update current code. - -In "U" release: - -* Check if anything relies on puppet-certmonger, and if not, drop this module. - -Security Impact -=============== - -We will provide a better security level by avoiding shared x509 keypairs. - -Upgrade Impact -============== - -Every container using the shared certificate will be restarted in order to -load the new, dedicated one. - -We will have to ensure the nova metadata are properly updated in order to -let novajoin create the services in FreeIPA, allowing to request per-service -certificates. - -Tests should also be made regarding novajoin update/upgrade in order to ensure -all is working as expected. - -If the containers are already using dedicated certificates, no other impact is -expected. - -End User Impact -=============== - -During the upgrade, a standard short downtime is to be expected, unless -the deployment is done using HA. - -Performance Impact -================== - -No major performance impact is expected. - -Deployer Impact -=============== - -No major deployer impact is expected. - -Developer Impact -================ - -People adding new services requiring a certificate will need to call the -Certmonger module/role in the new tripleo-heat-templates file. - -They will also need to ensure new metadata is properly generated in order to -let novajoin create the related service in FreeIPA. - -Implementation -============== - -Contributors ------------- - -* Cédric Jeanneret -* Grzegorz Grasza -* Nathan Kinder - -Work Items ----------- - -* Implement reusable role for Certmonger -* Move certificate management to tripleo-heat-templates -* Remove certmonger parts from Puppet -* Update/create needed documentations about the certificate management - -Later: -* Implement a proper Ansible Module -* Update the role in order to wrap module calls - - -Dependencies -============ - -None - currently, no Certmonger module for Ansible exists. - -Testing -======= - -We have to ensure the dedicated certificate is generated with the right -content, and ensure it's served by the right container. - -We can do that using openssl CLI, maybe adding a new check in the CI via -a new role in tripleo-quickstart-extras. - -This is also deeply linked to novajoin, thus we have to ensure it works as -expected. - -Documentation Impact -==================== - -We will need to document how the certificate are managed. - -References -========== - -* `Example of existing certificate management in Ansible `_ -* `Skeleton certmonger_getcert `_ -* `Existing reusable roles in TripleO `_ diff --git a/specs/train/undercloud-minion.rst b/specs/train/undercloud-minion.rst deleted file mode 100644 index a2b6a021..00000000 --- a/specs/train/undercloud-minion.rst +++ /dev/null @@ -1,167 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================== -Scale Undercloud with a Minion -============================== - -https://blueprints.launchpad.net/tripleo/undercloud-minion - -In order to improve our scale, we have identified heat-engine and possibly -ironic-conductor as services that we can add on to an existing undercloud -deployment. Adding heat-engine allows for additional processing capacity -when creating and updating stacks for deployment. By adding a new light -weight minion node, we can scale the Heat capacity horizontally. - -Additionally since these nodes could be more remote, we could add an -ironic-conductor instance to be able to manage hosts in a remote region -while still having a central undercloud for the main management. - - -Problem Description -=================== - -Currently we use a single heat-engine on the undercloud for the deployment. -According to the Heat folks, it can be beneficial for processing to have -additional heat-engine instances for scale. The recommended scaling is out -rather than up. Additionally by being able to deploy a secondary host, we -can increase our capacity for the undercloud when additional scale capacity -is required. - - -Proposed Change -=============== - -Overview --------- - -We are proposing to add a new undercloud "minion" configuration that can be -used by operators to configure additional instances of heat-engine and -ironic-conductor when they need more processing capacity. We would also -allow the operator to disable heat-engine from the main undercloud to reduce -the resource usage of the undercloud. By removing the heat-engine from the -regular undercloud, the operator could possibly avoid timeouts on other services -like keystone and neutron that can occur when the system is under load. - -Alternatives ------------- - -An alternative would be to make the undercloud deployable in a traditional -HA capacity where we share the services across multiple nodes. This would -increase the overall capacity but adds additional complexity to the undercloud. -Additionally this does not let us target specific services that are resource -heavy. - -Security Impact ---------------- - -The new node would need to have access to the the main undercloud's keystone, -database and messaging services. - -Upgrade Impact --------------- - -The new minion role would need to be able to be upgraded by the user. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -* This additional minion role may improve heat processing due to the additional - resource capacity being provided. - -* Locating an ironic-conductor closer to the nodes being managed can improve - performance by being closer to the systems (less latency, etc). - - -Other Deployer Impact ---------------------- - -Additional undercloud role and a new undercloud-minion.conf configuration file -will be created. Additionally a new option may be added to the undercloud.conf -to manage heat-engine instalation. - -Developer Impact ----------------- - -None. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - mwhahaha - -Other contributors: - slagle - EmilienM - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - -python-tripleoclient -~~~~~~~~~~~~~~~~~~~~ - -* New 'openstack undercloud minion deploy' command for installation - -* New 'openstack undercloud minion upgrade' command for upgrades - -* New configuration file 'undercloud-minion.conf' to drive the installation - and upgrades. - -* New configuration option in 'undercloud.conf' to provide ability to disable - the heat-engine on the undercloud. - -tripleo-heat-templates -~~~~~~~~~~~~~~~~~~~~~~ - -* New 'UndercloudMinion' role file - -* New environment file for the undercloud minion deployment - -* Additional environment files to enable or disable heat-engine and - ironic-conductor. - -Dependencies -============ - -None. - -Testing -======= - -We would add a new CI job to test the deployment of the minion node. This job -will likely be a new multinode job. - - - -Documentation Impact -==================== - -We will need to document the usage of the undercloud minion installation and -the specific use cases where this can be beneficial. - - -References -========== - -See the notes from the Train PTG around Scaling. - -* https://etherpad.openstack.org/p/tripleo-ptg-train - -* https://etherpad.openstack.org/p/DEN-tripleo-forum-scale diff --git a/specs/ussuri/mistral-to-ansible.rst b/specs/ussuri/mistral-to-ansible.rst deleted file mode 100644 index c5fa699c..00000000 --- a/specs/ussuri/mistral-to-ansible.rst +++ /dev/null @@ -1,205 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================ -Replace Mistral with Ansible -============================ - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-mistral-to-ansible - -The goal of this proposal is to replace Mistral in TripleO with Ansible -playbooks. - - -Problem Description -=================== - -Mistral was originally added to take the place of an “API” and provide common -logic for tripleoclient and TripleO UI. After the TripleO UI was removed, the -only consumer of Mistral is tripleoclient. This means that Mistral now adds -unnecessary overhead and complexity. - - -Proposed Change -=============== - -Overview --------- - -Remove Mistral from the TripleO undercloud and convert all Mistral workbooks, -workflows and actions to Ansible playbooks within tripleo-ansible. tripleoclient -will then be updated to execute the Ansible playbooks rather than the Mistral -workflows. - -Alternatives ------------- - -The only other alternative candidate is to keep using Mistral and accept the -complexity and reinvest in the project. - -Security Impact ---------------- - -* As the code will be re-writing Mistral workflows that currently deal with - passwords, tokens and secrets we will need to be careful. However the logic - should be largely the same. - -* With the eventual removal of Mistral and Zaqar two complex systems can be - removed which will reduce the surface area for security issues. - -* The new Ansible playbooks will only use the undercloud OpenStack APIs, - therefore they shouldn't create a new attack vector. - - - -Upgrade Impact --------------- - -* Upgrades will need to remove Mistral services and make sure the Ansible - playbooks are in place. - -* Older versions of tripleoclient will no longer work with the undercloud as - they will expect Mistral to be present. - -* Most of the data in Mistral is ephemeral, but some longer term data is stored - in Mistral environments. This data will likely be moved to Swift. - - -Other End User Impact ---------------------- - -The output of CLI commands will change format. For example, the Mistral -workflow ID will no longer be included and other Ansible specific output will -be included. Where possible we will favour streaming Ansible output to the -user, making tripleoclient very light and transparent. - -Some CLI commands, such as introspection will need to fundamentally change -their output. Currently they send real time updates and progress to the client -with Zaqar. Despite moving the execution locally, we are unable to easily get -messages from a Ansible playbook while it is running. This means the user may -need to wait a long time before they get any feedback. - - -Performance Impact ------------------- - -There is no expected performance impact as the internal logic should be largely -the same. However, the Ansible playbooks will be executed where the user runs -the CLI rather than by the Mistral server. This could then be slower or faster -depending on the resources available to the machine and the network connection -to the undercloud. - -The undercloud itself should have more resources available since it wont be -running Mistral or Zaqar. - - -Other Deployer Impact ---------------------- - -If anyone is using the Mistral workflows directly, they will stop working. We -currently don't know of any users doing this and it was never documented. - - -Developer Impact ----------------- - -Developers will need to contribute to Ansible playbooks instead of Mistral -workflows. As the pool of developers that know Ansible is larger than those -that know Mistral this should make development easier. Ansible contributions -will likely expect unit/functional tests. - - -Implementation -============== - -Assignee(s) ------------ - - -Primary assignee: - d0ugal - -Other contributors: - -- apetrich -- ekultails -- sshnaidm -- cloudnull - -Work Items ----------- - -Storyboard is being used to track this work: - https://storyboard.openstack.org/#!/board/208 - -- Migrate the Mistral workflows to Ansible playbooks. - -- Migrate or replace custom Mistral actions to Ansible native components. - -- Remove Mistral and Zaqar. - -- Update documentation specific to Mistral. - -- Extend our auto-documentation plugin to support playbooks within - tripleo-ansible. This will allow us to generate API documentation for all - playbooks committed to tripleo-ansible, which will include our new `cli` - prefixed playbooks. - -Converting Mistral Workflows to Ansible -*************************************** - -For each Mistral workflow the following steps need to be taken to port them -to Ansible. - -- Re-write the Mistral workflow logic in Ansible, reusing the Mistral Python - actions where appropriate. - -- Update python-tripleoclient to use the new Ansible playbooks. It should - prefer showing the native Ansible output rather than attempting to replicate - the previous output. - -- The Workflows and related code should be deleted from tripleo-common. - -A complete example can be seen for the `openstack undercloud backup` command. - -- `Ansible Playbook `_ -- `Updated tripleoclient `_ -- `Removal of all workflow code `_ - - -Dependencies -============ - -None - - -Testing -======= - -Since this change will largely be a re-working of existing code the changes -will be tested by the existing CI coverage. This should be improved and -expanded as is needed. - - -Documentation Impact -==================== - -Any references to Mistral will need to be updated to point to the new ansible -playbook. - - -References -========== - -* https://review.opendev.org/#/q/topic:mistral-removal+OR+topic:mistral_to_ansible - -* https://bugs.launchpad.net/tripleo/+bugs?field.tag=mistral-removal - -* http://lists.openstack.org/pipermail/openstack-discuss/2019-October/010384.html - -* https://storyboard.openstack.org/#!/board/208 diff --git a/specs/ussuri/scaling-with-ansible-inventory.rst b/specs/ussuri/scaling-with-ansible-inventory.rst deleted file mode 100644 index c99f8de9..00000000 --- a/specs/ussuri/scaling-with-ansible-inventory.rst +++ /dev/null @@ -1,251 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================== -Scaling with the Ansible Inventory -================================== - -https://blueprints.launchpad.net/tripleo/scaling-with-Ansible-inventory - -Scaling an existing deployment should be possible by adding new host -definitions directly to the Ansible inventory, and not having to increase the -Count parameters. - -Problem Description -=================== - -Currently to scale a deployment, a Heat stack update is required. The stack -update reflects the new desired node count of each role, which is then -represented in the generated Ansible inventory. The inventory file is then used -by the config-download process when ansible-playbook is executed to perform the -software configuration on each node. - -Updating the Heat stack with the new desired node count has posed some -scaling challenges. Heat creates a set of resources associated with each node. -As the number of nodes in a deployment increases, Heat has more and more -resources to manage. - -As the stack size grows, Heat must be tuned with software configurations or -horizontally scaled with additional engine workers. However, horizontal scaling -of Heat workers will only help so much as eventually other service workers -would need to be scaled as well, such as database, messaging, or Keystone -worker process. Having to increasingly scale worker processes results in -additional physical resource consumption. - -Heat performance also begins to degrade as stack size increases. It takes -longer and longer for stack operations to complete as node count increases. The -stack operation time often reaches into taking many hours, which is usually -outside the range of typical maintenance windows. - -It is also hard to predict what changes Heat will make. Often, no changes are -desired other than to scale out to new nodes. However, unintended template -changes or user error around forgetting to pass environment files poses -additional unnecessary risk to the scaling operation. - - -Proposed Change -=============== - -Overview --------- - -The proposed change would allow for users to directly add new node definitions -to the Ansible inventory by way of a new Heat parameter to allow for scaling -services onto those new nodes. No change in the Count parameters would be -required. - -A minimum set of data would be required when adding a new node to the Ansible -inventory. Presently, this includes the TripleO role, and an IP address on each -network that is used by that role. - -Only scaling of already defined roles will be possible with this method. -Defining new roles would still require a full Heat stack update which defined -the new role. - -Once the new node(s) are added to the inventory, ansible-playbook could be -rerun with the config-download directory to scale the software services out -on to the new nodes. - -As increasing the node count in the Heat stack operation won't be necessary -when scaling, if baremetal provisioning is required for the new nodes, then -this work depends on the nova-less-deploy work: - -https://specs.openstack.org/openstack/tripleo-specs/specs/stein/nova-less-deploy.html - -Once baremetal provisioning is migrated out of Heat with the above work, then -new nodes can be provisioned with those new workflows before adding them -directly to the Ansible inventory. - -Since new nodes added directly to the Ansible inventory would still be -consuming IP's from the subnet ranges defined for the overcloud networks, -Neutron needs to be made aware of those assignments so that there are no -overlapping IP addresses. This could be done with a new interface in -tripleo-heat-templates that allows for specifying the extra node inventory -data. The parameter would be called ``ExtraInventoryData``. The templates would -take care of operating on that input and creating the appropriate Neutron ports -to correspond to the IP addresses specified in the data. - -When tripleo-ansible-inventory is used to generate the inventory, it would -query Heat as it does today, but also layer in the extra inventory data as -specified by ``ExtraInventoryData``. The resulting inventory would be a unified -view of all nodes in the deployment. - -``ExtraInventoryData`` may be a list of files that are consumed with Heat's -get_file function so that the deployer can keep their inventory data organized -by file. - -Alternatives ------------- - -This change is primarily targeted at addressing scaling issues around the -Heat stack operation. Alternative methods include using undercloud minions: - -https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/undercloud_minion.html - -Multi-stack/split-controlplane also addresses the issue somewhat by breaking up -the deployment into smaller and more manageable stacks: - -https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/distributed_compute_node.html - -These alternatives are complimentary to the proposed solution here, and all of -these solutions can be used together for the greatest benefits. - -Direct manipulation of inventory data -_____________________________________ - -Another alternative would be to not make use of any new interface in the -templates such as the previously mentioned ``ExtraInventoryData``. Users could just -update the inventory file manually, or drop inventory files in a specified -location (since Ansible can use a directory as an inventory source). - -The drawbacks to this approach are that another tool would be necessary to -create associated ports in Neutron so that there are no overlapping IP -addresses. It could also be a manual step, although that is prone to error. - -The advantages to this approach is that it would completely eliminate the stack -update operation as part of the scaling. Not having any stack operation is -appealing in some regards due to the potential to forget environment files or -other user error (out of date templates, etc). - -Security Impact ---------------- - -IP addresses and hostnames would potentially exist in user managed templates -that have the value for ``ExtraInventoryData``, however this is no different than -what is present today. - -Upgrade Impact --------------- - -The upgrade process will need to be aware that not all nodes are represented in -the Heat stack, and some will be represented only in the inventory. This should -not be an issue as long as there is a consistent interface to get a single -unified inventory as there exists now. - -Any changes around creating the unified view of the inventory should be made -within the implementation of that interface (tripleo-ansible-inventory) such -that existing tooling continues to use an inventory that contains all nodes for -a deployment. - -Other End User Impact ---------------------- - -Users will potentially have to manage additional environment files for the -extra inventory data. - -Performance Impact ------------------- - -Performance should be improved during scale out operations. - -However, it should be noted that Ansible will face scaling challenges as well. -While this change does not directly introduce those new challenges, it may -expose them more rapidly as it bypasses the Heat scaling challenges. - -For example, it is not expected that simply adding hundreds or thousands of new -nodes directly to the Ansible inventory means that scaling operation would -succeed. It would likely expose new scaling challenges in other tooling, such -as the playbook and role tasks or Ansible itself. - -Other Deployer Impact ---------------------- - -Since this proposal is meant to align with the nova-less-deploy, all nodes -(whether they are known to Heat or not) would be unprovisioned if the -deployment is deleted. - -If using pre-provisioned nodes, then there is no change in behavior in that -deleting the Heat stack does not actually "undeploy" any software. This -proposal does not change that behavior. - -Developer Impact ----------------- - -Developers could more quickly test scaling by bypassing the Heat stack update -completely if desired, or using the ``ExtraInventoryData`` interface. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - James Slagle - -Work Items ----------- - -* Add new parameter ``ExtraInventoryData`` - -* Add Heat processing of ``ExtraInventoryData`` - - * create Neutron ports - - * add stack outputs - -* Update tripleo-ansible-inventory to consume from added stack outputs - -* Update HostsEntry to be generic - -Dependencies -============ - -* Depends on nova-less-deploy work for baremetal provisioning outside of Heat. - If using pre-provisioned nodes, does not depend on nova-less-deploy. - -* All deployment configurations coming out of Heat need to be generic per role. - Most of this work was complete in Train, however this should be reviewed. For - example, the HostsEntry data is still static and Heat is calculating the node - list. This data needs to be moved to an Ansible template. - - -Testing -======= - -Scaling is not currently tested in CI, however perhaps it could be with this -change. - -Manual test plans and other test automation would need to be updated to also -test scaling with ``ExtraInventoryData``. - - -Documentation Impact -==================== - -Documentation needs to be added for ``ExtraInventoryData``. - -The feature should also be fully explained in that users and deployers need to -be made aware of the change of how nodes may or may not be represented in the -Heat stack. - -References -========== - -* https://specs.openstack.org/openstack/tripleo-specs/specs/stein/nova-less-deploy.html -* https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/undercloud_minion.html -* https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/distributed_compute_node.html diff --git a/specs/ussuri/tripleo-operator-ansible.rst b/specs/ussuri/tripleo-operator-ansible.rst deleted file mode 100644 index 64fad99f..00000000 --- a/specs/ussuri/tripleo-operator-ansible.rst +++ /dev/null @@ -1,331 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================================================================= -tripleo-operator-ansible - Ansible roles and modules to interact with TripleO -============================================================================= - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-operator-ansible - -As an operator of a TripleO deployment, I would like to be able to comsume -supported ansible roles and modules that let me perform TripleO related -actions in my automation. - -Problem Description -=================== - -The existing tripleo-ansible_ repository currently contains roles, plugins -and modules that are consumed by TripleO to perform the actual deployments and -configurations. As these are internal implementations to TripleO, we would not -want operators consuming these directly. The tripleo-ansible_ repository is -also branched which means that the contents within the repo and packaging -are specific to a singular release. This spec propose that we create a new -repository targeted for external automation for any supported version. - -Currently Operators do not have a set of official ansible roles and modules -that can be used to deploy and manage TripleO environments. For folks who wish -to manage their TripleO environments in an automated fashion, we have seen -multiple folks implement the same roles to manage TripleO. e.g. -tripleo-quickstart_, tripleo-quickstart-extras_, infrared_, tripleo-lab_. - -* TripleO should provide a set of ansible roles and modules that can be used - by the end user to deploy and manage an Undercloud and Overcloud. - -* TripleO should provide a set of ansible roles and modules that can be used - to perform scaling actions. - -* TripleO should provide a set of ansible roles and modules that can be used - to perform update and upgrade actions. - -.. _tripleo-ansible: https://opendev.org/openstack/tripleo-ansible -.. _infrared: https://github.com/redhat-openstack/infrared -.. _tripleo-quickstart: https://opendev.org/openstack/tripleo-quickstart -.. _tripleo-quickstart-extras: https://opendev.org/openstack/tripleo-quickstart-extras -.. _tripleo-lab: https://github.com/cjeanner/tripleo-lab - -Proposed Change -=============== - -Overview --------- - -TripleO should create a new repository where ansible roles, plugins and -modules that wrap TripleO actions can be stored. This repository should be -branchless so that the roles can be used with any currently supported version -of TripleO. The goal is to only provide automation for TripleO actions and not -necessarily other cloud related actions. The roles in this new repository -should only be targeted to providing an automation interface for the existing -`tripleoclient commands`_. The repository may provide basic setups actions such -as implementing a wrapper around tripleo-repos_. The roles contained in this -repository should not implement additional day 2 cloud related operations such -as creating servers, networks or other resources on the deployed Overcloud. - -This new repository should be able to be packaged and distributed via an RPM -as well as being able to be published to `Ansible Galaxy`_. The structure -of this new repository should be Ansible collections_ compatible. - -The target audience of the new repository would be end users (operators, -developers, etc) who want to write automation around TripleO. The new -repository and roles would be our officially supported automation artifacts. -One way to describe this would be like providing Puppet modules for a given -peice of software so that it can be consumed by users who use Puppet. The -existing CLI will continue to function for users who do not want to use -Ansible to automate TripleO deployments or who wish to continue to use the CLI -by hand. The roles are not a replacement for the CLI, but only provide an -official set of roles for people who use Ansible. - -The integration point for Ansible users would be the roles provided via -tripleo-operator-ansible. We would expect users to perform actions by -including our provided roles. - -An example playbook for a user could be: - -.. code-block:: yaml - - - hosts: undercloud - gather_facts: true - tasks: - - include_role: - role: tripleo_undercloud - tasks_from: install - vars: - tripleo_undercloud_configuration: - DEFAULT: - undercloud_debug: True - local_ip: 192.168.50.1/24 - - name: Copy nodes.json - copy: - src: /home/myuser/my-environment-nodes.json - dest: /home/stack/nodes.json - - include_role: - role: tripleo_baremetal - tasks_from: introspection - vars: - tripleo_baremetal_nodes_file: /home/stack/nodes.json - tripleo_baremetal_introspection_provide: True - tripleo_baremetal_introspection_all_managable: True - - include_role: - role: tripleo_overcloud - tasks_from: deploy - vars: - tripleo_overcloud_environment_files: - - network_isolation.yaml - - ceph_storage.yaml - tripleo_overcloud_roles: - - Controller - - Networker - - Compute - - CephStorage - -The internals of these roles could possibly proceed in two different paths: - -* Implement simple wrappers around the invocation of the actual TripleO - commands using execs, shell or commands. This path will likely be the fastest - path to have an initial implementation. - -.. code-block:: yaml - - - name: Install undercloud - command: "openstack undercloud install {{ tripleo_undercloud_install_options }}" - chdir: "{{ tripleo_undercloud_install_directory }}" - - -* Implement a python wrapper to call into the provided tripleoclient classes. - This path may be a longer term goal as we may be able to provide better - testing by using modules. - -.. code-block:: python - - #!/usr/bin/python - - # import the python-tripleoclient - # undercloud cli - - from tripleoclient.v1 import undercloud - - import sys - import json - import os - import shlex - - # See the following for details - # https://opendev.org/openstack/python-tripleoclient/src/branch/ - # master/tripleoclient/v1/undercloud.py - - # setup the osc command - - - class Arg: - verbose_level = 4 - - - # instantiate the - u = undercloud.InstallUndercloud('tripleo', Arg()) - - # prog_name = 'openstack undercloud install' - tripleo_args = u.get_parser('openstack undercloud install') - - # read the argument string from the arguments file - args_file = sys.argv[1] - args_data = file(args_file).read() - - # For this module, we're going to do key=value style arguments. - arguments = shlex.split(args_data) - for arg in arguments: - - # ignore any arguments without an equals in it - if "=" in arg: - - (key, value) = arg.split("=") - - # if setting the time, the key 'time' - # will contain the value we want to set the time to - - if key == "dry_run": - if value == "True": - tripleo_args.dry_run = True - else: - tripleo_args.dry_run = False - - tripleo_args.force_stack_validations = False - tripleo_args.no_validations = True - tripleo_args.force_stack_update = False - tripleo_args.inflight = False - - # execute the install via python-tripleoclient - rc = u.take_action(tripleo_args) - - if rc != 0: - print(json.dumps({ - "failed": True, - "msg": "failed tripleo undercloud install" - })) - sys.exit(1) - - print(json.dumps({ - "changed": True, - "msg": "SUCCESS" - })) - sys.exit(0) - -.. code-block:: yaml - - - name: Install undercloud - tripleo_undercloud: - install: true - foo: bar - -These implementations will need to be evaluated to understand which works -best when attempting to support multiple versions of TripleO where options -may or may not be available. The example of this is where we supported one -cli parameter in versions >= Stein but not prior to this. - -The goal is to have a complete set of roles to do basic deployments within -a single cycle. We should be able to itterate on the internals of the roles -once we have established basic set to prove out the concept. More complex -actions or other version support may follow on in later cycles. - -.. _tripleoclient commands: https://docs.openstack.org/python-tripleoclient/latest/index.html -.. _tripleo-repos: https://opendev.org/openstack/tripleo-repos -.. _Ansible Galaxy: https://galaxy.ansible.com/ -.. _collections: https://docs.ansible.com/ansible/latest/dev_guide/developing_collections.html - -Alternatives ------------- - -* Do nothing and continue to have multiple tools re-implement the actions in - ansible roles. - -* Pick a singular implementaion from the existing set and merge them together - within this existing tool. This however may include additional actions that - are outside of the scope of the TripleO management. This may also limit the - integration by others if established interfaces are too opinionated. - -Security Impact ---------------- - -None. - -Upgrade Impact --------------- - -There should be no upgrade impact other than pulling in the upgrade related -actions into this repository. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -None. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -Developers will need to ensure the supported roles are updated if the cli -or other actions are updated with new options or patterns. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - mwhahaha - -Other contributors: - weshay - emilienm - cloudnull - -Work Items ----------- - -The existing roles should be evaulated to see if they can be reused and pulled -into the new repository. - -* Create new tripleo-operator-ansible -* Establish CI and testing framework for the new repository -* Evaulate and pull in existing roles if possible -* Initial implementation may only be a basic wrapper over the cli -* Update tripleo-quickstart to leverage the newly provided roles and remove - previously roles. - -Dependencies -============ - -If there are OpenStack service related actions that need to occur, we may need -to investigate the inclusion of OpenStackSDK, shade or other upstream related -tools. - -Testing -======= - -The new repository should have molecule testing for any new role created. -Additionally once tripleo-quickstart begins to consume the roles we will need -to ensure that other deployment related CI jobs are included in the testing -matrix. - -Documentation Impact -==================== - -The roles should be documented (perferrably automated) for the operators to -be able to consume these new roles. - -References -========== - -None. diff --git a/specs/victoria/simple-container-generation.rst b/specs/victoria/simple-container-generation.rst deleted file mode 100644 index c8544883..00000000 --- a/specs/victoria/simple-container-generation.rst +++ /dev/null @@ -1,427 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -=========================== -Simple Container Generation -=========================== - -Simple container generation is an initiative to reduce complexity in the -TripleO container build, deployment, and distribution process by reducing the -size and scope of the TripleO container build tools. - -The primary objective of this initiative is to replace Kolla, and our -associated Kolla customization tools, as the selected container generation -tool-kit. The TripleO community has long desired an easier solution for -deployers and integrators alike and this initiative is making that desire a -reality. - -The Simple container generation initiative is wanting to pivot from a -tool-chain mired between a foundational component of Kolla-Ansible and a -general purpose container build system, to a vertically integrated solution -that is only constructing what TripleO needs, in a minimally invasive, and -simple to understand way. - -[#f3]_ - - -Problem Description -=================== - -TripleO currently leverages Kolla to produce container images. These images are -built for Kolla-Ansible using an opinionated build process which has general -purpose features. While our current images work, they're large and not well -suited for the TripleO use-case, especially in distributed data-centers. The -issue of container complexity and size impacts three major groups, deployers, -third party integrators, and maintainers. As the project is aiming to simplify -interactions across the stack, the container life cycle and build process has -been identified as something that needs to evolve. The TripleO project needs -something vertically integrated which produces smaller images, that are easier -to maintain, with far fewer gyrations required to tailor images to our needs. - - -Proposed Change -=============== - -Overview --------- - -Implement a container file generation role, and a set of statically defined -override variable files which are used to generate our required -container files. [#f2]_ - -Layering -^^^^^^^^ - -.. code-block:: text - - tripleo-base+---+ - | - | - +---+-openstack-${SERVICE}-1-common-+-->openstack-${SERVICE}-1-a - | | - | +-->openstack-${SERVICE}-1-b - | | - | +-->openstack-${SERVICE}-1-c - +-->openstack-${SERVICE}-2 - | - +-->ancillary-${SERVICE}-1 - | - +-->ancillary-${SERVICE}-2 - - -User Experience -^^^^^^^^^^^^^^^ - -Building the standard set of images will be done through a simple command line -interface using the TripleO python client. - -.. code-block:: shell - - $ openstack tripleo container image build [opts] - - -This simple sub-command will provide users the ability to construct images as -needed, generate container files, and debug runtime issues. - - -CLI Options -^^^^^^^^^^^ - -The python TripleO client options for the new container image build entry point. - -=========== =============================== ================================================================= -Option Default Description -=========== =============================== ================================================================= -config-file $PATH/overcloud_containers.yaml Configuration file setting the list of containers to build. -exclude [] Container type exclude. Can be specified multiple times. -work-dir /tmp/container-builds Container builds directory, storing the container files and - logs for each image and its dependencies. -skip-push False Skip pushing images to the registry -skip-build False Only generates container files without producing a local build. -base centos Base image name. -type binary Image type. -tag latest Image tag. -registry localhost Container registry URL. -namespace tripleomaster Container registry namespace. -volume [] Container bind mount used when building the image. Should be - specified multiple times if multiple volumes. -=========== =============================== ================================================================= - - -Container Image Build Tools -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Container images will be built using Buildah_, The required Buildah -functionality will leverage `BuildahBuilder` via `python-tripleoclient` -integration and be exposed though CLI options. - -.. _Buildah: https://buildah.io - - -Image layout -^^^^^^^^^^^^ - -Each image will have its own YAML file which has access to the following -parameters. Each YAML file will have one required parameter (tcib_from for the -source image to build from) and optional parameters. - -================= ============================= ==================== ======== =================================================== -Option Default Type Required Description -================= ============================= ==================== ======== =================================================== -tcib_path `{{ lookup('env', 'HOME') }}` String Path to generated the container file(s) for a given - image. -tcib_args Dict[str, str] Single level `key:value` pairs. Implements arg_. -tcib_from `centos:8` Str True Container image to deploy from. Implements from_. -tcib_labels Dict[str, str] Single level `key:value` pairs. Implements label_. -tcib_envs Dict[str, str] Single level `key:value` pairs. Implements env_. -tcib_onbuilds List[str] =String. Implements onbuild_. -tcib_volumes List[str] =String. Implements volume_. -tcib_workdir Str Implements workdir_. -tcib_adds List[str] =String. Implements add_. -tcib_copies List[str] =String. Implements copy_. -tcib_exposes List[str] =String. Implements expose_. -tcib_user Str Implements user_. -tcib_shell Str Implements shell_. -tcib_runs List[str] =String. Implements run_. -tcib_healthcheck Str Implements healthcheck_. -tcib_stopsignal Str Implements stopsignal_. -tcib_entrypoint Str Implements entrypoint_. -tcib_cmd Str Implements cmd_. -tcib_actions List[Dict[str, str]] Each item is a Single level Dictionary `key:value` - pairs. Allows for arbitrary verbs which maintains - ordering. -tcib_gather_files List[str] Each item is a String. Collects files from the - host and stores them in the build directory. -================= ============================= ==================== ======== =================================================== - -.. _arg: https://docs.docker.com/engine/reference/builder/#arg -.. _from: https://docs.docker.com/engine/reference/builder/#from -.. _label: https://docs.docker.com/engine/reference/builder/#label -.. _env: https://docs.docker.com/engine/reference/builder/#env -.. _onbuild: https://docs.docker.com/engine/reference/builder/#onbuild -.. _volume: https://docs.docker.com/engine/reference/builder/#volume -.. _workdir: https://docs.docker.com/engine/reference/builder/#workdir -.. _add: https://docs.docker.com/engine/reference/builder/#add -.. _copy: https://docs.docker.com/engine/reference/builder/#copy -.. _expose: https://docs.docker.com/engine/reference/builder/#expose -.. _user: https://docs.docker.com/engine/reference/builder/#user -.. _shell: https://docs.docker.com/engine/reference/builder/#shell -.. _run: https://docs.docker.com/engine/reference/builder/#run -.. _healthcheck: https://docs.docker.com/engine/reference/builder/#healthcheck -.. _stopsignal: https://docs.docker.com/engine/reference/builder/#stopsignal -.. _entrypoint: https://docs.docker.com/engine/reference/builder/#entrypoint -.. _cmd: https://docs.docker.com/engine/reference/builder/#cmd - - - Application packages are sorted within each container configuration file. - This provides a programmatic interface to derive package sets, allows - overrides, and is easily visualized. While the package option is not - processes by the `tripleo_container_image_build` role, it will serve as a - standard within our templates. - - ================ ==================================================== - Option Description - ================ ==================================================== - tcib_packages Dictionary of packages to install. - - .. code-block:: yaml - - common: - - openstack-${SERVICE}-common - distro-1: - common: - - openstack-${SERVICE}-proprietary - x86_64: - - $dep-x86_64 - power: - - $dep-power - distro-2: - common: - - openstack-${SERVICE} - - $dep - ================ ==================================================== - - This option is then captured and processed by a simple `RUN` action. - - .. code-block:: yaml - - tcib_actions: - - run: "dnf install -y {{ tcib_packages['common'] }} {{ tcib_packages[ansible_distribution][ansible_architecture] }}" - - -Example Container Variable File -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - tcib_from: ubi8 - tcib_path: "{{ lookup('env', 'HOME') }}/example-image" - tcib_labels: - maintainer: MaintainerX - tcib_entrypoint: dumb-init --single-child -- - tcib_stopsignal: SIGTERM - tcib_envs: - LANG: en_US.UTF-8 - tcib_runs: - - mkdir -p /etc/ssh && touch /etc/ssh/ssh_known_host - tcib_copies: - - /etc/hosts /opt/hosts - tcib_gather_files: - - /etc - tcib_packages: - common: - - curl - centos: - x86_64: - - wget - tcib_actions: - - run: "dnf install -y {{ tcib_packages['common'] }} {{ tcib_packages[ansible_distribution][ansible_architecture] }}" - - copy: /etc/resolv.conf /resolv.conf - - run: ["/bin/bash", "-c", "echo hello world"] - - -Container File Structure -^^^^^^^^^^^^^^^^^^^^^^^^ - -The generated container file(s) will follow a simple directory structure -which provide an easy way to view, and understand, build relationships and -dependencies throughout the stack. - -.. code-block:: shell - - tripleo-base/${CONTAINERFILE} - tripleo-base/ancillary-${SERVICE}-1/${CONTAINERFILE} - tripleo-base/ancillary-${SERVICE}-2/${CONTAINERFILE} - tripleo-base/openstack-${SERVICE}-1-common/${CONTAINERFILE} - tripleo-base/openstack-${SERVICE}-1-common/openstack-${SERVICE}-1-a/${CONTAINERFILE} - tripleo-base/openstack-${SERVICE}-1-common/openstack-${SERVICE}-1-b/${CONTAINERFILE} - tripleo-base/openstack-${SERVICE}-1-common/openstack-${SERVICE}-1-c/${CONTAINERFILE} - tripleo-base/openstack-${SERVICE}-2/${CONTAINERFILE} - - -Alternatives ------------- - -* Use Ansible Bender - -Ansible Bender was evaluated as a tool which could help to build the container -images. However it has not been productized downstream; which would make it -difficult to consume. It doesn't generate Dockerfiles and there is a strong -dependency on Bender tool; the container image build process would therefore be -more difficult to do in a standalone environment where Bender isn't available. -[#f1]_ - -* Leave the container image build process untouched. - -We could leave the container image generate process untouched. This keeps us a -consumer of Kolla and requires we maintain our complex ancillary tooling to -ensure Kolla containers work for TripleO. - - -Security Impact ---------------- - -While security is not a primary virtue in the simple container generation -initiative, security will be improved by moving to simplified containers. If -the simple container generation initiative is ratified, all containers used -within TripleO will be vertically integrated into the stack, making it possible -to easily audit the build tools and all applications, services, and files -installed into our containerized runtimes. With simplification we'll improve -the ease of understanding and transparency which makes our project more -sustainable, thereby more secure. The proposed solution must provide layers -where we know what command has been run exactly; so we can quickly figure out -how an image was built. - - -Upgrade Impact --------------- - -There is no upgrade impact because the new container images will provide -feature parity with the previous ones; they will have the same or similar -injected scripts that are used when the containers start. - - -Other End User Impact ---------------------- - -None - - -Performance Impact ------------------- - -We should expect better performance out of our containers, as they will be -smaller. While the runtime will act the same, the software delivery will be -faster as the size of each container will smaller, with better constructed -layers. Smaller containers will decrease the mean time to ready which will have -a positive performance impact and generally improve the user experience. - - -Other Deployer Impact ---------------------- - -The simplified container generation initiative will massively help third party -integrators. With simplified container build tools we will be able to easily -articulate requirements to folks looking to build on-top of TripleO. Our -tool-chain will be capable of bootstrapping applications where required, and -simple enough to integrate with a wide variety of custom applications -constructed in bespoke formats. - - -Developer Impact ----------------- - -In the first phase, there won't be any developer impact because the produced -images will be providing the same base layers as before. For example, they will -contain all the Kolla scripts that are required to merge configuration files or -initialize the container at startup. - -These scripts will be injected in the container images for backward -compatibility: - -* kolla_extend_start -* set_configs.py -* start.sh -* copy_cacerts.sh -* httpd_setup.sh - -In a second phase, we will simplify these scripts to remove what isn't needed -by TripleO. The interface in the composable services will likely evolve over -time. For example kolla_config will become container_config. There is no plan -at this time to rewrite the configuration file merge logic. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - * Cloudnull - * EmilienM - - -Work Items ----------- - -First phase -^^^^^^^^^^^ - -* Ansible role to generate container file(s) - https://review.opendev.org/#/c/722557 -* Container images layouts - https://review.opendev.org/#/c/722486 -* Deprecate "openstack overcloud container image build" -* Implement "openstack tripleo container image build" which will reuse the - `BuildahBuilder` and the same logic as the deprecated command but without Kolla. -* Build new images and publish them. -* Switch the upstream CI to use the new images. - -Second phase: - -* Simplifying the injected scripts to only do what we need in TripleO. -* Rename the configuration interfaces in TripleO Heat Templates. - - -Dependencies -============ - -The tooling will be in existing repositories so there is no new dependency. It -will mainly be in tripleo-ansible, tripleo-common, python-tripleoclient and -tripleo-heat-templates. Like before, Buildah will be required to build the -images. - - -Testing -======= - -* The tripleo-build-containers-centos-8 job will be switched to be using - the new "openstack tripleo container image build" command. - -* A molecule job will exercise the container image build process using - the new role. - -* Some end-to-end job will also be investigated to build and deploy - a container into a running deployment. - - -Documentation Impact -==================== - -Much of the documentation impact will be focused on cleanup of the existing -documentation which references Kolla, and the creation of documentation that -highlights the use of the vertically integrated stack. - -Since the changes should be transparent for the end-users who just pull images -without rebuilding it, the manuals will still be updated with the new command -and options if anyone wants to build the images themselves. - -References -========== - -.. [#f1] https://review.opendev.org/#/c/722136/ -.. [#f2] https://review.opendev.org/#/c/722557/ -.. [#f3] https://blueprints.launchpad.net/tripleo/+spec/simplified-containers diff --git a/specs/victoria/tripleo-powerflex-integration.rst b/specs/victoria/tripleo-powerflex-integration.rst deleted file mode 100644 index 18ec35b7..00000000 --- a/specs/victoria/tripleo-powerflex-integration.rst +++ /dev/null @@ -1,262 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================================================ -Enable TripleO to deploy Dell EMC PowerFlex software defined storage via Ansible -================================================================================ - -Problem description -=================== - -There is currently no automated way to deploy VxFlexOS from within TripleO. -Goal is to provide an ease of use at the time of deployment as well as during -lifecycle operations. - -Proposed changes -================ - -Overview --------- -VxFlexOS has been rebranded to PowerFlex. - -The deployer experience to stand up PowerFlex with TripleO should be the -following: - -The deployer chooses to deploy a role containing any of the PowerFlex services: -PowerflexMDM, PowerflexLIA, PowerflexSDS and PowerflexSDC. - -At least three new Overcloud roles should be defined such as: -- Controller with PowerFlex -- Compute with PowerFlex -- Storage with PowerFlex - -Custom roles definition are used to define which service will run on which -type of nodes. We'll use this custom roles_data.yaml to deploy the overcloud. - -PowerFlex support for HCI, which combines compute and storage into a single -node, has been considered but will not be part of the first drop. - -The deployer provides the PowerFlex parameters as offered today in a Heat env -file. - -The deployer starts the deployment and gets an overcloud with PowerFlex and -appropriate services deployed on each node per its role. -Current code is available here. Still WIP. - -https://github.com/dell/tripleo-powerflex - -The following files are created in -/usr/share/openstack-tripleo-heat-templates/deployment/powerflex-ansible : -- powerflex-base.yaml -- powerflex-lia.yaml -- powerflex-mdm.yaml -- powerflex-sdc.yaml -- powerflex-sds.yaml -All of these files are responsible of the configuration of each sevice. Each -service is based upon the powerflex-base.yaml template which calls the Ansible -playbook and triggers the deployment. - -The directory /usr/share/powerflex-ansible holds the Ansible playbook which -installs and configure PowerFlex. - -A new tripleo-ansible role is created in /usr/share/ansible/roles called -tripleo-powerflex-run-ansible which prepares the variables and triggers the -execution of the PowerFlex Ansible playbook. - -An environment name powerflex-ansible.yaml file is created in -/usr/share/openstack-tripleo-heat-emplates/environments/powerflex-ansible -and defines the resource registry mapping and additional parameters required by -the PowerFlex Ansible playbook. - -Ports which have to be opened are managed by TripleO. - -PowerFlex deployment with TripleO Ansible ------------------------------------------ -Proposal to create a TripleO Ansible playbook to deploy a PowerFlex system. - -We refer to a PowerFlex system as a set of services deployed on nodes on a -per-role basis. - -The playbook described here assumes the following: - -A deployer chooses to deploy PowerFlex and includes the following Overcloud -roles which installs the PowerFlex services based upon the mapping found in -THT's roles_data.yaml: - -| Role | Associated PowerFlex service | -| ---------- | ---------------------------------------- | -| Controller | PowerflexMDM, PowerflexLIA, PowerflexSDC | -| Compute | PowerflexLIA, PowerflexSDC | -| Storage | PowerflexLIA, PowerflexSDS | - -The deployer chooses to include new Heat environment files which will be in THT -when this spec is implemented. An environment file will change the -implementation of any of the four services from the previous step. - -A new Ansible playbook is called during the deployment which triggers the -execution of the appropriate PowerFlex Ansible playbook. - -This can be identified as an cascading-ansible deployment. - -A separate Ansible playbook will be created for each goal described below: - -- Initial deployment of OpenStack and PowerFlex -- Update and upgrade PowerFlex SW -- Scaling up or down DayN operations - -This proposal only refers to a single PowerFlex system deployment. - -RPMS/Kernel dependencies ------------------------- - -Virt-Customize will be used to inject the rpms into the overcloud-full-image for -new installations. - - -Version dependencies --------------------- - -Version control is handled outside current proposal. The staging area has the -PowerFlex packages specific to the OS version of overcloud image. - -Ansible playbook -================= - -Initial deployment of OpenStack and PowerFlex ---------------------------------------------- - -The sequence of events for this new Ansible playbook to be triggered during -initial deployment with TripleO follows: - -1. Define the Overcloud on the Undercloud in Heat. This includes the Heat -parameters that are related to PowerFlex which will later be passed to -powerflex-ansible via TripleO Ansible playbook. - -2. Run `openstack overcloud deploy` with default PowerFlex options and include -a new Heat environment file to make the implementation of the service -deployment use powerflex-ansible. - -3. The undercloud assembles and uploads the deployment plan to the undercloud -Swift. - -4. TripleO starts to deploy the Overcloud and interfaces with Heat accordingly. - -5. A point in the deployment is reached where the Overcloud nodes are imaged, -booted, and networked. At that point the undercloud has access to the -provisioning or management IPs of the Overcloud nodes. - -6. The TripleO Ansible playbook responsible to Deploy PowerFlex with any of -the four PowerFlex services, including PowerflexMDM, PowerflexLIA, PowerflexSDS -and PowerflexSDC. - -7. The servers which host PowerFlex services have their relevant firewall ports -opened according to the needs of their service, e.g. the PowerflexMDM are -configured to accept traffic on TCP port 9011 and 6611. - -8. A new Heat environment file which defines additional parameters that we want -to override is passed to the TripleO Ansible playbook. - -9. The TripleO Ansible playbook translates these parameters so that they match -the parameters that powerflex-ansible expects. The translation entails building -an argument list that may be passed to the playbook by calling -`ansible-playbook --extra-vars`. An alternative location for the -/usr/share/powerflex-ansible playbook is possible via an argument. No -playbooks are run yet at this stage. - -10. The TripleO Ansible playbook is called and passed the list -of parameters as described earlier. A dynamic Ansible inventory is used with the -`-i` option. In order for powerflex-ansible to work there must be a group called -`[mdms]`, '[tbs]', '[sdss]' and '[sdcs]' in the inventory. - -11. The TripleO Ansible playbook starts the PowerFlex install using the -powerflex-ansible set of playbooks - -Update/Upgrade PowerFlex SW ---------------------------- - -TBD - -Scaling up/down ---------------- - -This implementation supports the add or remove of SDS and/or SDC at any moment -(Day+N operations) using the same deployment method. - -1. The deployer chooses which type of node he wants to add or remove from the -Powerflex system. - -2. The deployer launches an update on the Overcloud which will bring up or down -the nodes to add/remove. - -3. The nodes will be added or removed from the Overcloud. - -4. The SDS and SDC SW will be added or removed from the PowerFlex system. - -5. Storage capacity will be updated consequently. -For Scaling down operation, it will succeed only if: -- the minimum of 3 SDS nodes remains -- the free storage capacity available is enough for rebalancing the data - -PowerFlex services breakdown -============================ - -The PowerFlex system is broken down into multiple components, each of these have -to be installed on specific node types. - -Non HCI model -------------- - -- Controllers will host the PowerflexLIA, PowerflexMDM and PowerflexSDC (Glance) - components. A minimum of 3 MDMs is required. - -- Computes will host the PowerflexLIA and PowerflexSDC as they will be - responsible for accessing volumes. There is no minimum. - -- Storage will host the PowerflexLIA and PowerflexSDS as disks will be presented - as backend. A minimum of 3 SDS is required. A minimum of 1 disk per SDS is - also required to connect the SDS. - -HCI model ---------- - -- Controllers will host the PowerflexLIA, PowerflexMDM and PowerflexSDC (Glance) - components. A minimum of 3 MDMs is required. - -- Compute HCI will host the PowerflexLIA and PowerflexSDC as they will be - responsible for accessing volumes and the PowerflexSDS as disks will be - presented as backend. A minimum of 3 SDS is required. A minimum of 1 disk per - SDS is also required to connect the SDS. - -Security impact -=============== - -- A new SSH key pair will be created on the undercloud. - The public key of this pair will be installed in the heat-admin user's - authorized_keys file on all Overcloud nodes which will be MDMs, SDSs, or SDCs. - This process will follow the same pattern used to create the SSH keys used for - TripleO validations so nothing new would happen in that respect; just another - instance on the same type of process. - -- Additional firewall configuration need to include all TCP/UDP ports needed by - Powerflex services according to the following: - | Overcloud role | PowerFlex Service | Ports | - | -------------- | ----------------- | ---------------------- | - | Controller | LIA, SDC, SDS | 9099, 7072, 6611, 9011 | - | Compute | LIA, SDC | 9099 | - | Storage | LIA, SDS | 9099, 7072 | - -- Kernel modules package like scini.ko will be installed depending of the - version of the operating system of the overcloud node. - -- Question: Will there be any SELinux change needed for IP ports that vxflexOS - is using? - -Performance Impact -================== -The following applies to the undercloud: - -- TripleO Ansible will need to run an additional playbook - diff --git a/specs/wallaby/ephemeral-heat-overcloud.rst b/specs/wallaby/ephemeral-heat-overcloud.rst deleted file mode 100644 index 27100281..00000000 --- a/specs/wallaby/ephemeral-heat-overcloud.rst +++ /dev/null @@ -1,248 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -======================================== -Ephemeral Heat Stack for all deployments -======================================== - -https://blueprints.launchpad.net/tripleo/+spec/ephemeral-heat-overcloud - -This spec proposes using the ephemeral Heat stack model for all deployments -types, including the overcloud. Using ephemeral Heat is already done for -standalone deployments with the "tripleo deploy" command, and for the -undercloud install as well. Expanding its use to overcloud deployments will -align the different deployment methods into just a single method. It will also -make the installation process more stateless and with better predictability -since there is no Heat stack to get corrupted or possibly have bad state or -configuration. - - -Problem Description -=================== - -* Maintaining the Heat stack can be problematic due to corruption via either - user or software error. Backups are often not available, and even when they - exist, they are no guarantee to recover the stack. Corruption or loss of the - Heat stack, such as accidental deletion, requires custom recovery procedures - or re-deployments. - -* The Heat deployment itself must be maintained, updated, and upgraded. These - tasks are not large efforts, but they are areas of maintenance that would be - eliminated when using ephemeral Heat instead. - -* Relying on the long lived Heat process makes the deployment less portable in - that there are many assumptions in TripleO that all commands are run - directly from the undercloud. Using ephemeral Heat would at least allow for - the stack operation and config-download generation to be entirely portable - such that it could be run from any node with python-tripleoclient installed. - -* There are large unknowns in the state of each Heat stack that exists for all - current deployments. These unknowns can cause issues during update/upgrade as - we can't possibly account for all of these items, such as out of date - parameter usage or old/incorrect resource registry mappings. Having each - stack operation create a new stack will eliminate those issues. - - -Proposed Change -=============== - -Overview --------- - -The ephemeral Heat stack model involves starting a short lived heat process -using a database engine for the purposes of creating the stack. The initial -proposal assumes using the MySQL instance already present on the undercloud as -the database engine. To maintain compatibility with the already implemented -"tripleo deploy" code path, SQLite will also be supported for single node -deployments. SQLite may also be supported for other deployments of -sufficiently small size so as that SQLite is not a bottleneck. - -After the stack is created, the config-download workflow is run to download and -render the ansible project directory to complete the deployment. The short -lived heat process is killed and the database is deleted, however, enough -artifacts are saved to reproduce the Heat stack if necessary including the -database dump. The undercloud backup and restore procedure will be modified to -account for the removal of the Heat database. - -This model is already used by the "tripleo deploy" command for the standalone -and undercloud installations and is well proven for those use cases. Switching -the overcloud deployment to also use ephemeral Heat aligns all of the different -deployments to use Heat the same way. - -We can scale the ephemeral Heat processes by using a podman pod that -encapsulates containers for heat-api, heat-engine, and any other process we -needed. Running separate Heat processes containerized instead of a single -heat-all process will allow starting multiple engine workers to allow for -scale. Management and configuration of the heat pod will be fairly prescriptive -and it will use default podman networking as we do not need the Heat processes -to scale beyond a single host. Moving forward, undercloud minions will no -longer install heat-engine process as a means for scale. - -As part of this change, we will also add the ability to run Heat commands -against the saved database from a given deployment. This will give -operators a way to inspect the Heat stack that was created for debugging -purposes. - -Managing the templates used during the deployment becomes even more important -with this change, as the templates and environments passed to the "overcloud -deploy" command are the entire source of truth to recreate the deployment. We -may consider further management around the templates, such as a git repository -but that is outside the scope of this spec. - -There are some cases where the saved state in the stack is inspected before a -deployment operation. Two examples are comparing the Ceph fsid's between the -input and what exists in the stack, as well as checking for a missing -network-isolation.yaml environment. - -In cases such as these, we need a way to perform these checks outside of -inspecting the Heat stack itself. A straightforward way to do these types of -checks would be to add ansible tasks that check the existing deployed overcloud -(instead of the stack) and then cause an error that will stop the deployment if -an invalid change is detected. - -Alternatives ------------- - -The alternative is to make no changes and continue to use Heat as we do today -for the overcloud deployment. With the work that has already been done to -decouple Heat from Nova, Ironic, and now Neutron, it instead seems like the -next iterative step is to use ephemeral Heat for all of our deployment types. - -Security Impact ---------------- - -The short lived ephemeral heat process uses no authentication. This is in -contrast to the Heat process we have on the undercloud today that uses Keystone -for authentication. In reality, this change has little effect on security as -all of the sensitive data is actually passed into Heat from the templates. We -should however make sure that the generated artifacts are secured -appropriately. - -Since the Heat process is ephemeral, no change related to SRBAC (Secure RBAC) -is needed. - -Upgrade Impact --------------- - -When users upgrade to Wallaby, the Heat processes will be shutdown on the -undercloud, and further stack operations will use ephemeral Heat. - -Upgrade operations for the overcloud will work as expected as all of the update -and upgrade tasks are entirely generated with config-download on each stack -operation. We will however need to ensure proper upgrade testing to be sure -that all services can be upgraded appropriately using ephemeral Heat. - -Other End User Impact ---------------------- - -End users will no longer have a running instance of Heat to interact with or -run heat client commands against. However, we will add management around -starting an ephemeral Heat process with the previously used database for -debugging inspection purposes (stack resource list/show, etc). - -Performance Impact ------------------- - -The ephemeral Heat process is presently single threaded. Addressing this -limitation by using a podman pod for the Heat processes will allow the -deployment to scale to meet overcloud deployment needs, while keeping the -process ephemeral and easy to manage with just a few commands. - -Using the MySQL database instead of SQLite as the database engine should -alleviate any impact around the database being a bottleneck. After the -database is backed up after a deployment operation, it would be wiped from -MySQL so that no state is saved outside of the produced artifacts from the -deployment. - -Alternatively, we can finish the work started in `Scaling with the Ansible -inventory`_. That work will enable deploying the Heat stack with a count of 1 -for each role. With that change, the Heat stack operation times will scale with -the number of roles in the deployment, and not the number of nodes, which will -allow for similar performance as currently exists. Even while using the -inventory to scale, we are still likely to have worse performance with a single -heat-all process than we do today. With just a few roles, using just heat-all -becomes a bottleneck. - -Other Deployer Impact ---------------------- - -Initially, deployers will have the option to enable using the ephemeral Heat -model for overcloud deployments, until it becomes the default. - -Developer Impact ----------------- - -Developers will need to be aware of the new commands that will be added to -enable inspecting the Heat stack for debugging purposes. - -In some cases, some service template updates may be required where there are -instances that those templates rely on saved state in the Heat stack. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - james-slagle - -Work Items ----------- - -The plan is to start prototyping this effort and have the option in place to -use it for a default overcloud deployment in Wallaby. There may be additional -fine tunings that we can finish in the X release, with a plan to backport to -Wallaby. Ideally, we would like to make this the default behavior in Wallaby. -To the extent that is possible will be determined by the prototype work. - -* Add management of Heat podman pod to tripleoclient -* Add option to "overcloud deploy" to use ephemeral Heat -* Use code from "tripleo deploy" for management of ephemeral Heat -* Ensure artifacts from the deployment are saved in known locations and - reusable as needed -* Update undercloud backup/restore to account for changes related to Heat - database. -* Add commands to enable running Heat commands with a previously used - database -* Modify undercloud minion installer to no longer install heat-engine -* Switch some CI jobs over to use the optional ephemeral Heat -* Eventually make using ephemeral Heat the default in "overcloud deploy" -* Align the functionality from "tripleo deploy" into the "overcloud deploy" - command and eventually deprecate "tripleo deploy". - -Dependencies -============ - -This work depends on other ongoing work to decouple Heat from management of -other OpenStack API resources, particularly the composable networks v2 work. - -* Network Data v2 Blueprint - https://blueprints.launchpad.net/tripleo/+spec/network-data-v2-ports - -Testing -======= - -Initially, the change will be optional within the "overcloud deploy" command. -We can choose some CI jobs to switch over to opt-in. Eventually, it will become -the default behavior and all CI jobs would then be affected. - -Documentation Impact -==================== - -Documentation updates will be necessary to detail the changes around using -ephemeral Heat. Specifically: - -* User Interface changes -* How to run Heat commands to inspect the stack -* Where artifacts from the deployment were saved and how to use them - -References -========== - -* `Scaling with the Ansible inventory`_ specification - - -.. _Scaling with the Ansible inventory: https://specs.openstack.org/openstack/tripleo-specs/specs/ussuri/scaling-with-ansible-inventory.html diff --git a/specs/wallaby/excise-swift.rst b/specs/wallaby/excise-swift.rst deleted file mode 100644 index 511c7d05..00000000 --- a/specs/wallaby/excise-swift.rst +++ /dev/null @@ -1,188 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================= -Disable Swift from the Undercloud -================================= - -The goal of this proposal is to introduce the community to the idea of -disabling Swift on the TripleO Undercloud. Within this propose we intend -to provide a high-level overview of how we can accomplish this goal. - - -Problem Description -=================== - -Swift is being used to store objects related to the deployment which are -managed entirely on the Undercloud. In the past, there was an API / UI to -interact with the deployment tooling; however, with the deprecation of the UI -and the removal of Mistral this is no longer the case. The Undercloud is -assumed to be a single node which is used to deploy OpenStack clouds, and -requires the user to login to the node to run commands. Because we're no longer -attempting to make the Undercloud a distributed system there's no need for an -API'able distributed storage service. Swift, in it's current state, is -under-utilized and carries unnecessary operational and resource overhead. - - -Proposed Change -=============== - -Overview --------- - -Decommission Swift from the Undercloud. - -To decommission Swift, we'll start by removing all of the `tripleoclient` Swift -interactions. These interactions are largely storing and retrieving YAML files -which provide context to the user for current deployment status. To ensure -we're not breaking deployment expectations, we'll push everything to the local -file system and retain all of the file properties wherever possible. We will -need coordinate with tripleo-ansible to ensure we're making all direct Swift -client and module interactions optional. - -Once we're able to remove the `tripleoclient` Swift interactions, we'll move to -disable Swift interactions from tripleo-common. These interactions are similar -to the ones found within the `tripleoclient`, though tripleo-common has some -complexity; we'll need to ensure we're not breaking expectations we've created -with our puppet deployment methodologies which have some Swift assumptions. - - -Alternatives ------------- - -We keep everything as-is. - - -Security Impact ---------------- - -There should be no significant security implications when disabling Swift. -It could be argued that disabling Swift might make the deployment more secure, -it will lessen the attack surface; however, given the fact that Swift on the -Undercloud is only used by director I would consider any benefit insignificant. - - -Upgrade Impact --------------- - -There will be no upgrade impact; this change will be transparent to the -end-user. - - -Other End User Impact ---------------------- - -None. - - -Performance Impact ------------------- - -Disabling Swift could make some client interactions faster; however, the -benefit should be negligible. That said, disabling Swift would remove a -service on the Undercloud, which would make setup faster and reduce the -resources required to run the Undercloud. - - -Other Deployer Impact ---------------------- - -Operationally we should see an improvement as it will no longer be required to -explore a Swift container, and download files to debug different parts of the -deployment. All deployment related file artifacts housed within Swift will -exist on the Undercloud using the local file system, and should be easily -interacted with. - - -Developer Impact ----------------- - -None, if anything disabling Swift should make the life of a TripleO developer -easier. - - -Implementation -============== - -Excising Swift client interactions will be handled directly in as few reviews -as possible; hopefully allowing us to backport this change, should it be deemed -valuable to stable releases. - -All of the objects stored within Swift will be stored in -`/var/lib/tripleo/{named_artifact_directories}`. This will allow us to -implement all of the same core logic in our various libraries just without the -use of the API call to store the object. - -In terms of enabling us to eliminate swift without having a significant impact -on the internal API we'll first start by trying to replace the swift object -functions within tripleo-common with local file system calls. By using the -existing functions and replacing the backend we'll ensure API compatibility and -lessen the likely hood of creating regressions. - -.. note:: - - We'll need to collaborate with various groups to ensure we're porting assumed - functionality correctly. While this spec will not go into the specifics - implementation details for porting assumed functionality, it should be known - that we will be accountable for ensuring existing functionality is ported - appropriately. - - -Assignee(s) ------------ - -Primary assignee: - cloudnull - -Other contributors: - -- emilien -- ekultails - -Work Items ----------- - -The work items listed here are high level, and not meant to provide specific -implementation details or timelines. - -* Enumerate all of the Swift interactions -* Create a space on the Undercloud to house the files -* This location will be on the local file system and will be created into a - git archive; git is used for easier debug, rapid rollback, and will - provide simple versioning. -* Create an option to disable Swift on the Undercloud. -* Convert client interactions to using the local file system -* Ensure all tripleo-ansible Swift client calls are made optional -* Convert tripleo-common Swift interactions to using the local file system -* Disable Swift on the Undercloud - - -Dependencies -============ - -Before Swift can be disabled on the Undercloud we will need ensure the -deployment methodology has been changed to Metalsmith. - - -Testing -======= - -The Swift tests will need to be updated to use the local file system, however -the existing tests and test structure will be reused. - - -Documentation Impact -==================== - -There are several references to Swift in our documentation which we will need to -update. - - -References -========== - -* https://etherpad.opendev.org/p/tripleo-heat-swift-removal-undercloud -* http://paste.openstack.org/show/798208 diff --git a/specs/wallaby/mixed-operating-system-versions.rst b/specs/wallaby/mixed-operating-system-versions.rst deleted file mode 100644 index 0345a24f..00000000 --- a/specs/wallaby/mixed-operating-system-versions.rst +++ /dev/null @@ -1,267 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============================== -Mixed Operating System Versions -=============================== - -https://blueprints.launchpad.net/tripleo/+spec/mixed-operating-system-versions - -This spec proposes that a single TripleO release supports multiple operating -system versions. - -Problem Description -=================== - -Historically a single branch or release of TripleO has supported only a single -version of an operating system at a time. In the past, this has been specific -versions of Ubuntu or Fedora in the very early days, and now has standardized -on specific versions of CentOS Stream. - -In order to upgrade to a later version of OpenStack, it involves first -upgrading the TripleO undercloud, and then upgrading the TripleO overcloud to -the later version of OpenStack. The problem with supporting only a single -operating system version at a time is that such an OpenStack upgrade typically -implies an upgrade of the operating system at the same time. Combining the -OpenStack upgrade with a simultaneous operating system upgrade is problematic -due to: - -1. Upgrade complexity -2. Upgrade time resulting in extended maintenance windows -3. Operating system incompatibilities with running workloads (kernel, libvirt, - KVM, qemu, OVS/OVN, etc). -4. User impact of operating system changes (docker vs. podman, network-scripts - vs. NetworkManager, etc). - -Proposed Change -=============== - -Overview --------- - -This spec proposes that a release of TripleO support 2 major versions of an -operating system, particularly CentOS Stream. A single release of TripleO -supporting two major versions of CentOS Stream will allow for an OpenStack -upgrade while remaining on the same operating version. - -There are multiple software versions in play during an OpenStack upgrade: - -:TripleO: - The TripleO version is the version of the TripleO related packages installed - on the undercloud. While some other OpenStack software versions are used here - (Ironic, Neutron, etc), for the purposes of this spec, all TripleO and - OpenStack software on the undercloud will be referred to as the TripleO - version. The TripleO version corresponds to an OpenStack version. - Examples: Train, Wallaby, Zed. - -:OpenStack: - The OpenStack version is the version of OpenStack on the overcloud that is - being managed by the TripleO undercloud. - Examples: Train, Wallaby, Zed. - -:Operating System: - The operating system version is the version of CentOS Stream. Both the - undercloud and overcloud have operating system versions. The undercloud and - the overcloud may not have the same operating system version, and all nodes - in the overcloud may not have the same operating system version. - Examples: CentOS Stream 8, 9, 10 - -:Container Image: - The container image version is the version of the base container image used - by tcib. This is a version of the Red Hat universal base image (UBI). - Examples: UBI 8, 9, 10 - -For the purposes of this spec, the operating system versions being discussed -will be CentOS Stream 8 and 9, while the OpenStack versions will be Train and -Wallaby. However, the expectation is that TripleO continues to support 2 -operating system versions with each release going forward. Subsequently, the -Zed. release of TripleO would support CentOS Stream 9 and 10. - -With the above version definitions and considerations in mind, a TripleO -managed upgrade from Train to Wallaby would be described as the following: - -#. Upgrade the undercloud operating system version from CentOS Stream 8 to 9. -#. Upgrade the undercloud TripleO version from Train to Wallaby. - - #. The Wallaby version of the TripleO undercloud will only run on CentOS Stream - 9. - #. Implies upgrading all TripleO and OpenStack software on the undercloud to - Wallaby. - -#. Upgrade the OpenStack version on the overcloud from Train to Wallaby - - #. Does not imply upgrading the operating system version from CentOS Stream 8 - to 9. - #. Implies upgrading to new container image versions that are the images for - OpenStack Wallaby. These container image versions will likely be service - dependent. Some services may use UBI version 9, while some may remain on UBI - version 8. - -#. Upgrade the operating system version on the overcloud nodes from CentOS - Stream 8 to 9. - - #. Can happen node by node, with given constraints that might include all - control plane nodes need to be upgraded at the same time. - #. Data plane nodes could be selectively upgraded. - -The default behavior will be that users and operators can choose to upgrade to -CentOS Stream 9 separately from the OpenStack upgrade. For those operators who -want a combined OpenStack and operating system upgrade to match previous FFU -behavior, they can perform both upgrades back to back. The OpenStack and -operating system upgrades will be separate processes. There may be UX around -making the processes appear as one, but that is not prescribed by this spec. - -New TripleO deployments can choose either CentOS Stream 8 or 9 for their -Overcloud operating system version. - -The implication with such a change is that the TripleO software needs to know -how to manage OpenStack on different operating system versions. Ansible roles, -puppet modules, shell scripts, etc, all need to remove any assumptions about a -given operating system and be developed to manage both CentOS Stream 8 and 9. -This includes operating system utilities that may function quite differently -depending on the underlying version, such as podman and container-tools. - -CentOS Stream 8 support could not be dropped until the Zed. release of TripleO, -at which time, support would be needed for CentOS Stream 9 and 10. - -Alternatives ------------- - -:Alternative 1: - The TripleO undercloud Wallaby version could support running on both CentOS - Stream 8 and 9. There does not seem to be much benefit in supporting both. - Some users may refuse to introduce 9 into their environments at all, but - TripleO has not encountered similar resistance in the past. - -:Alternative 2: - When upgrading the overcloud to the OpenStack Wallaby version, it could be - required that all control plane nodes go through an operating system upgrade - as well. Superficially, this appears to reduce the complexity of the - development and test matrix. However, given the nature of composable roles, - this requirement would really need to be prescribed per-service, and not - per-role. Enforcing such a requirement would be problematic given the - flexibility of running any service on any role. It would instead be better - that TripleO document what roles need to be upgraded to a newer operating - system version at the same time, by documenting a set of already provided - roles or services. E.g., all nodes running a pacemaker managed service need - to be upgraded to the same operating system version at the same time. - -:Alternative 3: - A single container image version could be used for all of OpenStack Wallaby. In - order to support running those containers on both CentOS Stream 8 and 9, the - single UBI container image would likely need to be 8, as anticipated support - statements may preclude support for running UBI 9 images on 8. - -:Alternative 4: - New deployments could be forced to use CentOS Stream 9 only for their - overcloud operating system version. However, some users may have workloads - that have technical or certification requirements that could require CentOS - Stream 8. - -Security Impact ---------------- - -None. - -Upgrade Impact --------------- - -This proposal is meant to improve the FFU process by separating the OpenStack -and operating system upgrades. - -Most users and operators will welcome this change. Some may prefer the old -method which offered a more simultaneous and intertwined upgrade. While the new -process could be implemented in such a way to offer a similar simultaneous -experience, it will still be different and likely appear as 2 distinct steps. - -Distinct steps should result in an overall simplification of the upgrade -process. - -Other End User Impact ---------------------- - -None. - -Performance Impact ------------------- - -The previous implementations of FFU had the OpenStack and operating system -upgrades intertwined in the way that they were performed. With the separation -of the upgrade processes, the overall upgrade of both OpenStack and the -operating system may take a longer amount of time overall. Operators would need -to plan for longer maintenance windows in the cases where they still want to -upgrade both during the same windows. - -Otherwise, operators can choose to upgrade just OpenStack first, and then the -operating system at a later date, resulting in multiple, but shorter, -maintenance windows. - -Other Deployer Impact ---------------------- - -None. - -Developer Impact ----------------- - -TripleO developers will need support managing OpenStack software across -multiple operating system versions. - -Service developers responsible for TripleO integrations, will need to decide -upgrade requirements around their individual services when it comes to -container image versions and supporting different operating system versions. - -Given that the roll out of CentOS Stream 9 support in TripleO has happened in a -way that overlaps with supporting 8, it is largely true today that TripleO -Wallaby already supports both 8 and 9. CI jobs exist that test Wallaby on both -8 and 9. Going forward, that needs to remain true. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -1. tripleo-ansible - CentOS Stream 8 and 9 support -2. tripleo-heat-templates - CentOS Stream 8 and 9 support -3. puppet-tripleo - CentOS Stream 8 and 9 support -4. puppet-* - CentOS Stream 8 and 9 support -5. tcib - build right container image versions per service - - -Dependencies -============ - -* CentOS Stream 9 builds will be required to fully test and develop - -Testing -======= - -FFU is not typically tested in upstream CI. However, CI will be needed that -tests deploying OpenStack Wallaby on both CentOS Stream 8 -and 9 in order to verify that TripleO Wallaby is compatible with both operating -system versions. - - -Documentation Impact -==================== - -The matrix of supported versions will need to be documented within -tripleo-docs. - -References -========== - -None. diff --git a/specs/wallaby/tripleo-ceph-client.rst b/specs/wallaby/tripleo-ceph-client.rst deleted file mode 100644 index 456b09a6..00000000 --- a/specs/wallaby/tripleo-ceph-client.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=================== -TripleO Ceph Client -=================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ceph-client - -Native Ansible roles for TripleO integration with Ceph clusters. - - -Problem Description -=================== - -Starting in the Octopus release, Ceph has its own day1 tool called -cephadm [1]_ and it's own day2 tool called orchestrator [2]_ which -will replace ceph-ansible [3]_. While ceph-ansible had the necessary -features to configure Ceph clients, distributing for example config file -and keyrings as necessary on nodes which aren't members of the Ceph cluster, -neither cephadm or the orchestrator will manage Ceph clients configuration. - -Goal is to create some new ansible roles in TripleO to perform the -Ceph clients (Nova, Cinder, Glance, Manila) configuration, which is of special -importance in TripleO to support deployment scenarios where the Ceph cluster -is externally managed, not controlled by the undercloud, yet the OpenStack -services configuration remains a responsibility of TripleO. - - -.. _proposed-change: - -Proposed Change -=============== - -Overview --------- - -Introduce a new role into tripleo-ansible for Ceph client configuration. - -The new role will: - -- Configure OpenStack services as clients of an external Ceph cluster - (in the case of collocation, the ceph cluster is still logically - external) -- Provide Ceph configuration files and cephx keys for OpenStack - clients of RBD and CephFS (Nova, Cinder, Glance, Manila) -- Full multiclient support, e.g. one OpenStack deployment may use - multiple Ceph clusters, e.g. multibackend Glance -- Configure clients quickly, e.g. generate the key in one place - and copy it efficiently -- This is a standalone role which is reusable to configure OpenStack - against an externally managed Ceph cluster -- Not break existing support for CephExternalMultiConfig which is used - for configuring OpenStack to work with more than one Ceph cluster - when deploying Ceph in DCN environments (Deployment of dashboard on - DCN sites is not in scope with this proposal). - - -Alternatives ------------- - -Support for clients configuration might be added in future versions -of cephadm, yet there are some reasons why we won't be able to use this -feature as-is even if it was available today: - -- it assumes the for the cephadm tool to be configured with admin privileges - for the external Ceph cluster, which we don't have when Ceph is not - managed by TripleO; -- it also assumes that each and every client node has been provisioned into - the external Ceph orchestrator inventory so that evey Ceph MON is able to - log into the client node (overcloud nodes) via SSH; -- while offering the necessary functionalities to copy the config - files and cephx keyrings over to remote client nodes, it won't be able to - configure for example Nova with the libvirtd secret for qemu-kvm, which is - a task only relevant when the client is OpenStack; - -Security Impact ---------------- - -None derived directly from the decision to create new ansible roles. The -distribution of the cephx keyrings itself though should be implemented using -a TripleO service, like the existing CephClient service, so that keyrings -are only deployed on those nodes which actually need those. - -Upgrade Impact --------------- - -The goal is to preserve and reuse any existing Heat parameter which is -currently consumed to drive ceph-ansible; from operators' perspective the -problem of configuring a Ceph client isn't changed and there shouldn't be -a need to change the existing parameters, it's just the implementation -which will change. - -Performance Impact ------------------- - -As described in the :ref:`proposed-change` section, the purpose of this -role is to proper configure clients and it allows OpenStack services to -connect to an internal or external Ceph cluster, as well as multiple Ceph -cluster in a DCN context. -Since both config files and keys are necessary for many OpenStack services -(Nova, Cinder, Glance, Manila) to make them able to properly interact with -the Ceph cluster, at least two actions should be performed: - -- generate keys in one place -- copy the generated keys efficiently - -The `ceph_client` role should be very small, and a first improvement -in terms of performances can be found on key generation since they are -created in one, centralized place. -The generated keys, then, just need to be distributed across the nodes -of the Ceph cluster, as well as the Ceph cluster config file. -Adding this role to tripleo-ansible avoid adding an extra calls from -a pure deployment perspective; in fact, no additional ansible playbooks -will be triggered and we expect to see performances improved since no -additional layers are involved here. - -Developer Impact ----------------- - -How Ceph is deployed could change for anyone maintaining TripleO code -for OpenStack services which use Ceph. In theory there should be no -change as the CephClient service will still configure the Ceph -configuration and Ceph key files in the same locations. Those -developers will just need to switch to the new templates when they are -stable. - - -Implementation -============== - -The new role should be enabled by a TripleO service, like it happens -today with the CephClient service. -Depending on the environment file chosen at deployment time, the -actual implementation of such a service could either be based on -ceph-ansible or on the new role. - -When the Ceph cluster is not external, the role will also create -pools and the cephx keyrings into the Ceph cluster; these steps -will be skipped instead when Ceph is external precisely because we won't -have admin privileges to change the cluster configuration in that case. - -TripleO Heat Templates ----------------------- - -The existing implementation which depends on ceph-ansible will remain -in-tree for at least 1 deprecation cycle. By reusing the existing Heat -input parameters we should be able to transparently make the clients -configuration happen with ceph-ansible or the new role just by -switching the environment file used at deployment time. -TripleO users who currently use -`environments/ceph-ansible/ceph-ansible-external.yaml` in order to -have their Overcloud use an existing Ceph cluster, should be able to -apply the same templates to the new template for configuring Ceph -clients, e.g. `environments/ceph-client.yaml`. This will result in -the new tripleo-ansible/roles/ceph_client role being executed. - -Assignee(s) ------------ - -- fmount -- fultonj -- gfidente -- jmolmo - -Work Items ----------- - -Proposed Schedule ------------------ - -- OpenStack W: start tripleo-ansible/roles/ceph_client as experimental - and then set it as default in scenarios 001/004. We expect to to - become stable during the W cycle. - -Dependencies -============ - -The `ceph_client` role will be added in tripleo-ansible and allow -configuring the OpenStack services as clients of an external or TripleO -managed Ceph cluster; no new dependencies are added for tripleo-ansible -project. The `ceph_client` role will work with External Ceph, Internal -Ceph deployed by ceph-ansible, and the Ceph deployment described in -[4]_. - -Testing -======= - -It should be possible to reconfigure one of the existing CI scenarios -already deploying with Ceph to use the newer `ceph_client` role, -making it non-voting until the code is stable. Then switch the other -existing CI scenario to it. - - -Documentation Impact -==================== - -No doc changes should be needed. - - -References -========== - -.. [1] `cephadm `_ -.. [2] `orchestrator `_ -.. [3] `ceph-ansible `_ -.. [4] `tripleo-ceph `_ diff --git a/specs/wallaby/tripleo-ceph-ganesha.rst b/specs/wallaby/tripleo-ceph-ganesha.rst deleted file mode 100644 index 19e164bd..00000000 --- a/specs/wallaby/tripleo-ceph-ganesha.rst +++ /dev/null @@ -1,158 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================================== -TripleO Ceph Ganesha Integration for Manila -=========================================== - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ceph-ganesha - -Starting in the Octopus release, Ceph has its own day1 tool called cephadm and -its own day2 tool called orchestrator which will replace ceph-ansible. - -During the Wallaby cycle TripleO will no longer use ceph-ansible for Ceph -deployment and instead use cephadm [2]_ as described in [1]_. Ganesha deserves -special attention because for its deployment we will use special functionalities -in cephadm [2]_ meant to deploy the Ganesha service standalone when the Ceph -cluster is external. - -Problem Description -=================== - -In TripleO we support deployment of Ganesha both when the Ceph cluster is itself -managed by TripleO and when the Ceph cluster is itself not managed by TripleO. - -When the Ceph cluster is *not* managed by Tripleo, the Ganesha service must be -deployed standalone; that is, without any additional core Ceph daemon and it -should instead be configured to use the external Ceph MON and MDS daemons. - -Proposed Change -=============== - -Overview --------- - -An ansible task will trigger cephadm [2]_ with special arguments for it to stand -up a standalone Ganesha container and to it we will provide: - -- the Ceph cluster config file, generated using tripleo-ceph-client [3]_ role -- the Ceph cluster keyring to interact with MDS -- the Ganesha config file with pointers to the Ceph config/keyring to use - -The container will then be controlled by pacemaker, as it is today and reusing -the same code which today manages the ceph-nfs systemd service created by -ceph-ansible. - -Alternatives ------------- - -Forking and reusing the existing ceph-ansible role for ceph-nfs has been -discussed but ultimately discarded as that would have moved ownership of the -Ganesha deployment tasks in TripleO, while our goal remaing to keep ownership -where subject expertise is, in the Ceph deployment tool. - -Security Impact ---------------- - -None, the same code which TripleO would already use for the generation of the -Ceph cluster config and keyrings will be consumed. - -Upgrade Impact --------------- - -Some upgrade tasks which stop and remove the pre-existing ceph-nfs container -and systemd unit will be added to clean up the system from the ceph-ansible -managed resources. - -Other End User Impact ---------------------- - -None, the existing input parameters will be reused to drive the newer deployment -tool. - -Performance Impact ------------------- - -No changes. - -Other Deployer Impact ---------------------- - -No impact on users. - -Developer Impact ----------------- - -The Ganesha config file will be generated using a specific tripleo-ceph task -while previously, with ceph-ansible, this was created by ceph-ansible itself. - -Implementation -============== - -The existing implementation which depends on ceph-ansible will remain -in-tree for at least 1 deprecation cycle. By reusing the existing Heat -input parameters we should be able to transparently make the Ganesha -deployment happen with ceph-ansible or the new role just by switching -the environment file used at deployment time. - -Deployment Flow ---------------- - -The deployment and configuration described in this spec will -happen before `openstack overcloud deploy`, as described in -[1]_. This is consistent with how ceph-ansible used to run during -step2 to configure these services. However, parts of the Manila -configuration which use Ganesha will still happen when `openstack -overcloud deploy` is run. This is because some of the configuration -for Ganesha and Manila needs to happen during step 5. Thus, files like -`environments/manila-cephfsganesha-config.yaml` will be updated to -trigger the new required actions. - -Assignee(s) ------------ - -- fmount -- fultonj -- gfidente - -Work Items ----------- - -- Create a set of tasks to deploy on overcloud nodes the Ganesha config file -- Create a set of tasks to trigger cephadm with special arguments - -Dependencies -============ - -- The tripleo-ceph spec [1]_ - -Testing -======= - -Testing is currently impossible as we only have one network while for Ganesha -we require at least two, one which connects it to the Ceph public network and -another where the NFS proxy service is exposed to tenants. - -This is a design decision, one of the values added by the use of an NFS proxy -for CephFS is to implement network isolation in between the tenant guests and -the actual Ceph cluster. - -Such a limitation does not come from the migration to cephadm [2]_ but it has -always existed; the code which enforces the use of two isolated networks is in -fact in TripleO, not in the Ceph tool itself. We might revisit this in the -future but it is not a goal of this spec to change this. - -Documentation Impact -==================== - -No changes should be necessary to the TripleO documentation. - -References -========== - -.. [1] `tripleo-ceph `_ -.. [2] `cephadm `_ -.. [3] `tripleo-ceph-client `_ diff --git a/specs/wallaby/tripleo-ceph.rst b/specs/wallaby/tripleo-ceph.rst deleted file mode 100644 index c7828c01..00000000 --- a/specs/wallaby/tripleo-ceph.rst +++ /dev/null @@ -1,832 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============ -TripleO Ceph -============ - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-ceph - -A light Ansible framework for TripleO integration with Ceph clusters -deployed with cephadm_ and managed with Ceph orchestrator_. - - -Problem Description -=================== - -Starting in the Octopus release, Ceph has its own day1 tool called -cephadm_ and it's own day2 tool called orchestrator_ which will -replace ceph-ansible_. What should TripleO's Ceph integration -do about this? We currently provide the following user experience: - - Describe an OpenStack deployment, which includes Ceph, and TripleO - will "make it so" - -The above has been true for TripleO since Kilo and should -continue. TripleO should also continue hyper-converged support -(collocation of OpenStack and Ceph containers). There is sufficient -value in both of these (one tool and hyper-convergence) to justify -this project. At the same time we want to deploy Ceph in a way -consistent with the way the Ceph project is moving and decouple the -complexity of day2 management of Ceph from TripleO. - - -Proposed Change -=============== - -Overview --------- - -Modify tripleo-ansible, tripleo-heat-templates, and -python-tripleoclient in support of the following goals: - -- Provide Ansible roles which deploy Ceph by calling cephadm_ and Ceph - orchestrator -- Focus on the day1 problem for Ceph RBD, RGW, CephFS, and Dashboard - deployment by leveraging `cephadm bootstrap --apply-spec` as - described in Ceph issue 44873_ -- By default, day2 Ceph operations should be done directly with Ceph - orchestrator_ or Ceph Dashboard and not by running `openstack - overcloud deploy` -- TripleO stack updates do not trigger the new Ansible roles - introduced by this spec. -- Provide an opinionated Ceph installation based on parameters from - TripleO (including hardware details from Ironic) -- Configure cephx keyrings and pools for OpenStack on a deployed Ceph - cluster -- Support collocation (hyperconvergence) of OpenStack/Ceph containers - on same host - - cephadm_ reconciliation loop must not break OpenStack configuration - - TripleO configuration updates must not break Ceph configuration -- Provide Ceph integration but maximize orthogonality between - OpenStack and Ceph - -The implementation of the TripleO CephClient service during the W -cycle is covered in a different spec in review 757644_. This work will -be merged before the work described in this spec as it will be -compatible with the current Ceph deployment methods. It will also be -compatible with the future deployment methods described in this spec. - -Integration Points ------------------- - -The default deployment method of OpenStack/Ceph for TripleO Victoria -is the following 2-step-process: - -1. Deploy nodes with metalsmith_ -2. Deploy OpenStack and Ceph with `openstack overcloud deploy` - -The Ceph portion of item 2 uses external_deploy_steps_tasks to call -ceph-ansible by using the tripleo-ansible roles: tripleo_ceph_common, -tripleo_ceph_uuid, tripleo_ceph_work_dir, tripleo_ceph_run_ansible. - -The ultimate goal for this spec is to support the following -4-step-process: - -1. Deploy the hardware with metalsmith_ -2. Configure networking (including storage networks) -3. Deploy Ceph with the roles and interface provided by tripleo-ansible/python-tripleoclient -4. Deploy OpenStack with `openstack overcloud deploy` - -Item 2 above depends on the spec for network data v2 format described -in review 752437_ and a subsequent network-related feature which moves -port management out of Heat, and supports applying network -configuration prior to Heat stack deployment described in review -760536_. - -Item 3 above is the focus of this spec but it is not necessarily -the only integration point. If it is not possible to configure the -storage networks prior to deploying OpenStack, then the new method -of Ceph deployment will still happen via external_deploy_steps_tasks -as it currently does in Victoria via the 2-step-process. Another way -to say this is that Ceph may be deployed *during* the overcloud -deployment in the 2-step process or Ceph may be deployed *before* the -overcloud during the 4-step process; in either case we will change how -Ceph is deployed. - -The benefit of deploying Ceph before deploying the overcloud is that -the complexity of the Ceph deployment is decoupled from the complexity -of the OpenStack deployment. Even if Ceph is deployed before the -overcloud, its deployment remains a part of TripleO the same way that -the bare metal deployment remains a part of TripleO; even though a -separate tool, e.g. metalsmith_ or cephadm_ is used to deploy the -resources which are not deployed when `openstack overcloud deploy` -is run. - -Additional details on how Ceph is deployed before vs during the -overcloud deployment are covered in the implementation section. - -Alternatives ------------- - -We could ask deployers to do this: - -- Deploy hardware and configure networking -- Use cephadm_ and orchestrator_ directly to configure that hardware - with Ceph and create OpenStack pools accessible by CephX clients -- Use TripleO to configure OpenStack - -We have completed a POC of the above using Ussuri and config-download -tags to only run certain steps but would prefer to offer an option to -automate the Ceph deployment. The TripleO project has already ensured -that the move from one to three is automated and requires only two -commands because the tripleo python client now has an option to call -metalsmith_. The alternative is to not automate step two, but that is -user unfriendly. - -Another alternative is to continue using ceph-ansible_ as we do today. -However, even though ceph-ansible_ can deploy Octopus today and will -continue to support deployment of Luminous and Nautilus, the project -has a cephadm-adopt_ playbook for converting Ceph clusters that it has -deployed to mangement by cephadm_ orchestrator_ so seems to be moving -away from true Octopus support. ceph-ansible_ has lot of code and day2 -support; porting ceph-ansible itself to cephadm_ or orchestrator_ is -more work than completing this project with a smaller scope and looser -coupling. - -Security Impact ---------------- - -The cephadm_ tool is imperative and requires SSH access to the Ceph -cluster nodes in order to execute remote commands and deploy the -specified services. This command will need to be installed on one of -the overcloud nodes which will host the composable CephMon service. -From the cephadm_ point of view, that node will be a bootstrap node -on which the Ceph cluster is created. - -For this reason the Ceph cluster nodes must be SSH accessible and -provide a user with root privileges to perform some tasks. For -example, the standard way to add a new host when using cephadm_ is to -run the following: - -- `ssh-copy-id -f -i /etc/ceph/ceph.pub root@**` -- `ceph orch host add **` - -The TripleO deployment flow, and in particular config-download, -already provides the key elements to properly configure and run -the two actions described above, hence the impact from a security -point of view is unchanged compared to the previous deployment model. - -We will create a user like ceph-admin using the same process -config-download uses to create the tripleo-admin user and then -cephadm_ will use this user when it runs commands to add other -hosts. - -Upgrade Impact --------------- - -Ceph Nautilus clusters are still managed by ceph-ansible, and cephadm_ -can be enabled, as the new, default backend, once the Octopus release -is reached. Therefore, starting from Nautilus, two main steps are -identified in the upgrade process: - -- Upgrade the cluster using ceph-ansible_ `rolling_update`: - ceph-ansible_ should provide, as already done in the past, a rolling - update playbook that can be executed to upgrade all the services to - the Octopus release -- Migrate the existing cluster to cephadm/orchestrator: when all the - services are updated to Octopus cephadm-adopt_ will be executed as - an additional step - -New Ceph Octopus deployed clusters will use cephadm_ and ceph -orchestrator_ by default, and the future upgrade path will be provided -by cephadm_upgrade_, which will be able to run, stop and resume all -the Ceph upgrade phases. At that point day2 ceph operations will need -to be carried out directly with ceph orchestrator. Thus, it will no -longer be necessary to include the -`tripleo-heat-templates/environments/ceph-ansible/*` files in the -`openstack overcloud deploy` command with the exception of the Ceph -client configuration as described in review 757644_, which will have a -new environment file. - -.. note:: - - The Upgrade process for future releases can be subject of slight - modifications according to the OpenStack requirements. - - -Other End User Impact ---------------------- - -The main benefit from the operator perspective is the ability to take -advantage of the clear separation between the deployment phase and -day2 operations as well as the separation between the Ceph deployment -and the OpenStack deployment. At the same time TripleO can still -address all the deployment phase operations with a single tool but -leave and rely on orchestrator_ for what concerns day2 tasks. - -Many common tasks can now be performed the same way regardless of if -the Ceph cluster is internal (deployed by) or external to TripleO. -The operator can use the cephadm_ and orchestrator_ tools which will -be accessible from one of the Ceph cluster monitor nodes. - -For instance, since cephadm_ maintains the status of the cluster, the -operator is now able to perform the following tasks without interacting -with TripleO at all: - -1. Monitor replacement -2. OSD replacement (if a hardware change is necessary then Ironic - might be involved) - -.. note:: - - Even though cephadm_ standalone, when combined with Ceph - orchestrator_, should support all the commands required to the - carry out day2 operations, our plan is for tripleo-ceph to - continue to manage and orchestrate other actions that can - be taken by an operator when TripleO should be involved. E.g. - a CephStorage node is added as a scale-up operation, then - the tripleo-ceph Ansible roles should make calls to add the OSDs. - -Performance Impact ------------------- - -Stack updates will not trigger Ceph tools so "OpenStack only" changes -won't be delayed by Ceph operations. Ceph client configuration will -take less time though this benefit is covered in review 757644_. - -Other Deployer Impact ---------------------- - -Like ceph-ansible, cephadm_ is distributed as an RPM and can be -installed from Ceph repositories. However, since the deployment -approach is changed and cephadm_ requires a Ceph monitor node to -bootstrap a minimal cluster, we would like to install the cephadm_ -RPM on the overcloud image. As of today this RPM is approximately 46K -and we expect this to simplify the installation process. When cephadm_ -bootstraps the first Ceph monitor (on the first Controller node by -default) it will download the necessary Ceph containers. To contrast -this proposal with the current Ceph integration, ceph-ansible_ needs -to be installed on the undercloud and it then manages the download of -Ceph containers to overcloud nodes. In the case of both cephadm_ and -ceph-ansible, no other package changes are needed for the overcloud -nodes as both tools run Ceph in containers. - -This change affects all TripleO users who deploy an Overcloud which -interfaces with Ceph. Any TripleO users who does not interface with -Ceph will not be directly impacted by this project. - -TripleO users who currently use -`environments/ceph-ansible/ceph-ansible.yaml` in order to have their -overcloud deploy an internal Ceph cluster will need to migrate to the -new method when deploying W. This file and others will deprecated as -described in more detail below. - -The proposed changes do not take immediate effect after they are -merged because both the ceph-ansible_ and cephadm_ interfaces will -exist intree concurrently. - -Developer Impact ----------------- - -How Ceph is deployed could change for anyone maintaining TripleO code -for OpenStack services which use Ceph. In theory there should be no -change as the CephClient service will still configure the Ceph -configuration and Ceph key files in the same locations. Those -developers will just need to switch to the new interfaces when they -are stable. - -Implementation -============== - -How configuration data is passed to the new tooling when Ceph is -deployed *before* or *during* the overcloud deployment, as described -in the Integration Points section of the beginning of this spec, will -be covered in more detail in this section. - -Deprecations ------------- - -Files in `tripleo-heat-templates/environments/ceph-ansible/*` and -`tripleo-heat-templates/deployment/ceph-ansible/*` will be deprecated -in W and removed in X. They will be obsoleted by the new THT -parameters covered in the next section with the exception of -`ceph-ansible/ceph-ansible-external.yaml` which will be replaced by -`environments/ceph-client.yaml` as described in review 757644_. - -The following tripleo-ansible roles will be deprecated at the start -of W: tripleo_ceph_common, tripleo_ceph_uuid, tripleo_ceph_work_dir, -and tripleo_ceph_run_ansible. The ceph_client role will not be -deprecated but it will be re-implemented as described in review -757644_. New roles will be introduced to tripleo-ansible to replace -them. - -Until the project described here is complete during X we will -continue to maintain the deprecated ceph-ansible_ roles and -Heat templates for the duration of W and so it is likely that during -one release we will have intree support both ceph-ansible_ and -cephadm_. - -New THT Templates ------------------ - -Not all THT configuration for Ceph can be removed. The firewall is -still configured based on THT as descrbed in the next section and THT -also controls which composable service is deployed and where. The -following new files will be created in -`tripleo-heat-templates/environments/`: - -- cephadm.yaml: triggers new cephadm Ansible roles until `openstack - overcloud ceph ...` makes it unnecessary. Contains the paths to the - files described in the Ceph End State Definition YAML Input section. -- ceph-rbd.yaml: RBD firewall ports, pools and cephx key defaults -- ceph-rgw.yaml: RGW firewall ports, pools and cephx key defaults -- ceph-mds.yaml: MDS firewall ports, pools and cephx key defaults -- ceph-dashboard.yaml: defaults for Ceph Dashboard firewall ports - -All of the above (except cephadm.yaml) will result in the appropriate -firewall ports being opened as well as a new idempotent Ansible role -connecting to the Ceph cluster in order to create the Ceph pools and -cephx keys to access those pools. Which ports, pools and keys are -created will depend on which files are included. E.g. if the deployer -ran `openstack overcloud deploy ... -e ceph-rbd.yaml -e cep-rgw.yaml` -then the ports, pools and cephx keys would be configured for Nova, -Cinder, and Glance to use Ceph RBD and RGW would be configured with -Keystone, but no firewall ports, pools and keys for the MDS service -would be created and the firewall would not be opened for the Ceph -dashboard. - -None of the above files, except cephadm.yaml, will result in Ceph -itself being deployed and none of the parameters needed to deploy Ceph -itself will be in the above files. E.g. PG numbers and OSD devices -will not be defined in THT anymore. Instead the parameters which are -needed to deploy Ceph itself will be in tripleo_ceph_config.yaml as -described in the Ceph End State Definition YAML Input section and -cephadm.yaml will only contain references to those files. - -The cephx keys and pools, created as described above, will result in -output data which looks like the following:: - - pools: - - volumes - - vms - - images - - backups - openstack_keys: - - caps: - mgr: allow * - mon: profile rbd - osd: 'osd: profile rbd pool=volumes, profile rbd pool=backups, - profile rbd pool=vms, profile rbd pool=images' - key: AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q== - mode: '0600' - name: client.openstack - -The above can be written to a file, e.g. ceph_client.yaml, and passed -as input to the the new ceph client role described in review 757644_ -(along with the ceph_data.yaml file produced as output as described in -Ceph End State Definition YAML Output). - -In DCN deployments this type of information is extracted from the Heat -stack with `overcloud export ceph`. When the new method of deployment -is used this information can come directly from each genereated yaml -file (e.g. ceph_data.yaml and ceph_client.yaml) per Ceph cluster. - -Firewall --------- - -Today the firewall is not configured by ceph-ansible_ and it won't be -configured by cephadm_ as its `--skip-firewalld` will be used. We -expect the default overcloud to not have firewall rules until -`openstack overcloud deploy` introduces them. The THT parameters -described in the previous section will have the same firewall ports as -the ones they will deprecate (`environments/ceph-ansible/*`) so that -the appropriate ports per service and based on composable roles will -be opened in the firewall as they are today. - -OSD Devices ------------ - -The current defaults will always be wrong for someone because the -`devices` list of available disks will always vary based on hardware. -The new default will use all available devices when creating OSDs by -running `ceph orch apply osd --all-available-devices`. It will still -be possible to override this default though the ceph-ansible_ syntax of -the `devices` list will be deprecated. In its place the OSD Service -Specification defined by cephadm_ drivegroups will be used and the tool -will apply it by running `ceph orch apply osd -i osd_spec.yml`. More -information on the `osd_spec.yaml` is covered in the Ceph End State -Definition YAML Input section. - -Ceph Placement Group Parameters -------------------------------- - -The new tool will deploy Ceph with the pg autotuner feature enabled. -Parameters to set the placement groups will be deprecated. Those who -wish to disable the pg autotuner may do so using Ceph CLI tools after -Ceph is deployed. - -Ceph End State Definition YAML Input ------------------------------------- - -Regardless of if Ceph is deployed *before* or *during* overcloud -deployment, a new playbook which deploys Ceph using cephadm_ will be -created and it will accept the following files as input: - -- deployed-metal.yaml: this file is generated by running a command - like `openstack overcloud node provision ... --output - deployed-metal.yaml` when using metalsmith_. - -- (Optional) "deployed-network-env": the file that is generated by - `openstack network provision` as described in review 752437_. This - file is used when deploying Ceph before the overcloud to identify - the storage networks. This will not be necessary when deploying Ceph - during overcloud deployment so it is optional and the storage - network will be identified instead as it is today. - -- (Optional) Any valid cephadm_ config.yml spec file as described in - Ceph issue 44205_ may be directly passed to the cephadm_ execution - and where applicable will override all relevant settings in the file - described at the end of this list. - -- (Optional) Any valid drivegroup_ YAML file (e.g. osd_spec.yml) may - be passed and the tooling will apply it with `ceph orch apply osd -i - osd_spec.yml`. This setting will override all relevant settings in - the file described at the end of this list. - -- tripleo_ceph_config.yaml: This file will contain configuration data - compatible with nearly all Ceph options supported today by TripleO - Heat Templates with the exception of the firewall, ceph pools and - cephx keys. A template of this file will be provided in as a default - in one of the new tripleo-ansible roles (e.g. tripleo_cephadm_common) - -Another source of data which is input into the new playbook is the -inventory which is covered next section. - -Ansible Inventory and Ansible User ----------------------------------- - -The current Ceph implementation uses the Ansible user tripleo-admin. -That user and the corresponding SSH keys are created by the -tripleo-ansible role tripleo_create_admin. This role uses the -heat-admin account which is the default account if `openstack -overcloud node provision` is not passed the `--overcloud-ssh-user` -option. The current implementation also uses the inventory generated -by tripleo-ansible-inventory. These resources will not be available -if Ceph is deployed *before* the overcloud and there's no reason they -are needed if Ceph is deployed *during* the overcloud deployment. - -Regardless if Ceph is deployed *before* or *during* overcloud, prior -to deploying Ceph, `openstack overcloud admin authorize` should be run -and it should pass options to enable a ceph-admin user which can be -used by cephadm_ and to allow SSH access for the ansible roles -described in this spec. - -A new command, `openstack overcloud ceph inventory` will be -implemented which creates an Ansible inventory for the new playbook -and roles described in this spec. This command will require the -following input: - -- deployed-metal.yaml: this file is generated by running a command - like `openstack overcloud node provision ... --output - deployed-metal.yaml` when using metalsmith_. - -- (Optional) roles.yaml: If this file is not passed then - /usr/share/openstack-tripleo-heat-templates/roles_data.yaml will be - used in its place. If the roles in deployed-metal.yaml do not have a - definition found in roles.yaml, then an error is thrown that a role - being used is undefined. By using this file, the TripleO composable - roles will continue to work as they to today. The services matching - "OS::TripleO::Services::Ceph*" will correspond to a new Ansible - inventory group and the hosts in that group will correspond to the - hosts found in deployed-metal.yaml. - -- (Options) `-u --ssh-user `: this is not a file but an option - which defaults to "ceph-admin". This represents the user which was - created created on all overcloud nodes by `openstack overcloud admin - authorize`. - -- (Options) `-i --inventory `: this is not a file but an option - which defaults to "/home/stack/inventory.yaml". This represents the - inventory which will be created. - -If Ceph is deployed before the overcloud, users will need to run this -command to generate an Ansible inventory file. They will also need to -pass the path to the generated inventory file to `openstack overcloud -ceph provision` as input. - -If Ceph is deployed *during* overcloud deployment, users do not need -to know about this command as external_deploy_steps_tasks will run -this command directly to generate the inventory before running the new -tripleo ceph playbook with this inventory. - -Ceph End State Definition YAML Output -------------------------------------- - -The new playbook will write output data to one yaml file which -contains information about the Ceph cluster and may be used as -input to other processes. - -In the case that Ceph is deployed before the overcloud, if `openstack -overcloud ceph provision --output ceph_data.yaml` were run, then -`ceph_data.yaml` would then be passed to `openstack overcloud deploy -... -e ceph_data.yaml`. The `ceph_data.yaml` file will contain -key/value pairs such as the Ceph FSID, Name, and the Ceph monitor IPs. - -In the case that Ceph is deployed with the overcloud, if -external_deploy_steps_tasks calls the new playbook, then the same file -will be written to it's default location (/home/stack/ceph_data.yaml) -and the new client role will directly read the parameters from this file. - -An example of what this file, e.g. `ceph_data.yaml`, looks like is:: - - cluster: ceph - fsid: af25554b-42f6-4d2b-9b9b-d08a1132d3e899 - ceph_mon_ips: - - 172.18.0.5 - - 172.18.0.6 - - 172.18.0.7 - -In DCN deployments this type of information is extracted from the Heat -stack with `overcloud export ceph`. When the new method of deployment -is used this information can come directly from the `ceph_data.yaml` -file per Ceph cluster. This file will be passed as input to the new -ceph client role described in review 757644_. - -Requirements for deploying Ceph during Overcloud deployment ------------------------------------------------------------ - -If Ceph is deployed *during* the overcloud deployment, the following -should be the case: - -- The external_deploy_steps_tasks playbook will execute the new - Ansible roles after `openstack overcloud deploy` is executed. -- If `openstack overcloud node provision .. --output - deployed-metal.yaml` were run, then `deployed-metal.yaml` would be - input to `openstack overcloud deploy`. This is the current behavior - we have in V. -- Node scale up operations for day2 Ceph should be done by running - `openstack overcloud node provision` and then `openstack overcloud - deploy`. This will include reasserting the configuration of - OpenStack services unless those operations are specifically set to - "noop". -- Creates its own Ansible inventory and user -- The path to the "Ceph End State Definition YAML Input" is referenced - via a THT parameter so that when external_deploy_steps_tasks runs it - will pass this file to the new playbook. - -Requirements for deploying Ceph before Overcloud deployment ------------------------------------------------------------ - -If Ceph is deployed *before* the overcloud deployment, the following -should be the case: - -- The new Ansible roles will be triggered when the user runs a command - like `openstack overcloud ceph ...`; this command is meant - to be run after running `openstack overcloud node provision` to - trigger metalsmith_ but before running `openstack overcloud deploy`. -- If `openstack overcloud node provision .. --output - deployed-metal.yaml` were run, then `deployed-metal.yaml` would be - input to `openstack overcloud ceph provision`. -- Node scale up operations for day2 Ceph should be done by running - `openstack overcloud node provision`, `openstack overcloud network - provision`, and `openstack overcloud admin authorize` to enable a - ceph-admin user. However it isn't necessary to run `openstack - overcloud ceph ...` because the operator should connect to the Ceph - cluster itself to add the extra resources, e.g. use a cephadm shell - to add the new hardware as OSDs or other Ceph resource. If the - operation includes adding hyperconverged node with both Ceph and - OpenStack services then the third step will be to run `openstack - overcloud deploy`. -- Requires the user to create an inventory (and user) before running - using new Ceph deployment tools. -- "Ceph End State Definition YAML Input" is directly passed. - -Container Registry Support --------------------------- - -It is already supported to host a container registry on the -undercloud. This registry contains Ceph and OpenStack containers -and it may be populated before deployment or during deployment. -When deploying ceph before overcloud deployment it will need to be -populated before deployment. The new integration described in this -spec will direct cephadm_ to pull the Ceph containers from the same -source identified by `ContainerCephDaemonImage`. For example:: - - ContainerCephDaemonImage: undercloud.ctlplane.mydomain.tld:8787/ceph-ci/daemon:v4.0.13-stable-4.0-nautilus-centos-7-x86_64 - -Network Requirements for Ceph to be deployed before the Overcloud ------------------------------------------------------------------ - -The deployment will be completed by running the following commands: - -- `openstack overcloud node provision ...` -- `openstack overcloud network provision ...` (see review 751875_) -- `openstack overcloud ceph ...` (triggers cephadm/orchestrator) -- `openstack overcloud deploy ...` - -In the past stack updates did everything, but the split for -metalsmith_ established a new pattern. As per review 752437_ and a -follow up spec to move port management out of Heat, and apply network -configuration prior to the Heat stack deployment, it will eventually -be possible for the network to be configured before `openstack -overcloud deploy` is run. This creates an opening for the larger goal -of this spec which is a looser coupling between Ceph and OpenStack -deployment while retaining full integration. After the storage and -storage management networks are configured, then Ceph can be deployed -before any OpenStack services are configured. This should be possible -regardless of if the same node hosts both Ceph and OpenStack -containers. - -Development work on for deploying Ceph before overcloud deployment -can begin before the work described in reviews 752437_ and 760536_ -is completed by either of the following methods: - -Option 1: -- `openstack overcloud deploy --skip-tags step2,step3,step4,step5` -- use tripleo-ceph development code to stand up Ceph -- `openstack overcloud deploy --tags step2,step3,step4,step5` - -The last step will also configure the ceph clients. This sequence has -been verified to work in a proof of concept of this proposal. - -Option 2: -- Create the storage and storage management networks from the undercloud (using review 751875_) -- Create the Ironic ports for each node as per review 760536_ -- Use instances Nics Properties to pass a list of dicts to provision the node not just on the ctlplane network but also the storage and storage-management networks when the node is provisioned with metalsmith_ -- Metalsmith/Ironic should attach the VIFs so that the nodes are connected to the Storage and Storage Management networks so that Ceph can then be deployed. - -PID1 services used by Ceph --------------------------- - -During the W cycle we will not be able to fully deploy an HA Dashboard -and HA RGW service before the overcloud is deployed. Thus, we will -deploy these services as we do today; by using a ceph tool, though -we'll use cephadm_ in place of ceph-ansible_, and then complete the -configuration of these services during overcloud deployment. Though -the work to deploy the service itself will be done before overcloud -deployment, the service won't be accessible in HA until after the -overcloud deployment. - -Why can't we fully deploy the HA RGW service before the overcloud? -Though cephadm_ can deploy an HA RGW service without TripleO its -implementation uses keepalived which cannot be collocated with -pacemaker, which is required on controller nodes. Thus, during the -W cycle we will keep using the RGW service with haproxy and revisit -making it a separate deployment with collaboration with the PID1 team -in a future cycle. - -Why can't we fully deploy the HA Dashboard service before the -overcloud? cephadm_ does not currently have a builtin HA model for -its dashboard and the HA Dashboard is only available today when it -is deployed by TripleO (unless it's configured manually). - -Ceph services which need VIPs (Dashbard and RGW) need to know what the -VIPs will be in advance but the VIPs do not need to be pingable before -those Ceph services are deployed. Instead we will be able to know what -the VIPs are before deploying Ceph per the work related to reviews -751875_ and 760536_. We will pass these VIPs as input to cephadm_. - -For example, if we know the Dashboard VIP in advance, we can run the -following:: - - ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ VIP }}:{{ grafana_port }}" - -The new automation could then save the VIP parameter in the ceph mgr -global config. A deployer could then and wait for haproxy to be -available from the overcloud deploy so that an HA dashbard similar to -the one Victoria deploys is available. - -It would be simpler if we could address the above issues before -overcloud deployment but doing so is out of the scope of this spec. -However, we can aim to offer the dashboard in HA with the new tooling -around the time of the X cycle and we hope to do so through -collaboration with the Ceph orchestrator community. - -TripleO today also supports deploying the Ceph dashboard on any -composed network. If the work included in review 760536_ allows us to -compose and deploy the overcloud networks in advance, then we plan to -pass parameters to cephadm to continue support of the dashboard on its -own private network. - -TLS-Everywhere --------------- - -If Ceph is provisioned before the overcloud, then we will not have -the certificates and keys generated by certmonger via TripleO's -tls-everywhere framework. We expect cephadm to be able to deploy the -Ceph Dashboard (with Grafana), RGW (with HA via haproxy) with TLS -enabled. For the sake of orthogonality we could require that the -certificates and keys for RGW and Dashboard be generated outside of -TripleO so that these services could be fully deployed without the -overcloud. However, because we still need to use PID1 services as -described in the previous section, we will continue to use TripleO's -TLS-e framework. - -Assignee(s) ------------ - -- fmount -- fultonj -- gfidente -- jmolmo - -Work Items ----------- - -- Create a set of roles matching tripleo_ansible/roles/tripleo_cephadm_* - which can coexist with the current tripleo_ceph_common, - tripleo_ceph_uuid, tripleo_ceph_work_dir, tripleo_ceph_run_ansible, - roles. -- Patch the python tripleo client to support the new command options -- Create a new external_deploy_steps_tasks interface for deploying - Ceph using the new method during overcloud deployment -- Update THT scenario001/004 to use new method of ceph deployment - -Proposed Schedule ------------------ - -- OpenStack W: merge tripleo-ansible/roles/ceph_client descrbed in - review 757644_ early as it will work with ceph-ansible_ internal - ceph deployments too. Create tripleo-ansible/roles/cephadm_* roles - and tripleo client work to deploy Octopus as experimental and then - default (only if stable). If new tripleo-ceph is not yet stable, - then Wallaby will release with Nautilus support as deployed by - ceph-ansible_ just like Victoria. Either way Nautilus support via - current THT and tripleo-ansible triggering ceph-ansible_ will be - deprecated. - -- OpenStack X: tripleo-ansible/roles/cephadm_* become the default, - tripleo-ansible/roles/ceph_* are removed except the new ceph_client, - tripleo-heat-templates/environments/ceph-ansible/* removed. Migrate - to Ceph Pacific which GAs upstream in March 2021. - -Dependencies -============ - -- The spec for tripleo-ceph-client described in review 757644_ -- The spec for network data v2 format described in review 752437_ -- The spec for node ports described in review 760536_ - -The last two items above are not required if we deploy Ceph during -overcloud deployment. - -Testing -======= - -This project will be tested against at least two different scenarios. -This will ensure enough coverage on different use cases and cluster -configurations, which is pretty similar to the status of the job -definition currently present in the TripleO CI. -The defined scenarios will test different features that can be enabled -at day1. -As part of the implementation plan, the definition of the -tripleo-heat-templates environment CI files, which contain the testing job -parameters, is one of the goals of this project, and we should make sure -to have: - -- a basic scenario that covers the ceph cluster deployment using cephadm_; - we will gate the tripleo-ceph project against this scenario, as well - as the related tripleo heat templates deployment flow; - -- a more advanced use case with the purpose of testing the configuration - that can be applied to the ceph cluster and are orchestrated by the - tripleo-ceph project. - -The two items described above are pretty similar to the test suite that -today is maintained in the TripleO CI, and they can be implemented -reworking the existing scenarios, adding the proper support to the -cephadm_ deployment model. -A WIP patch can be created and submitted with the purpose of testing -and gating the tripleo-ceph project, and, when it becomes stable -enough, the scenario001 will be able to be officially merged. -The same approach can be applied to the existing scenario004, which -can be seen as an improvement of the first testing job. -This is mostly used to test the Rados Gateway service deployment and -the manila pools and key configuration. -An important aspect of the job definition process is related to -standalone vs multinode. -As seen in the past, multinode can help catching issues that are not -visible in a standalone environment, but of course the job -configuration can be improved in the next cycles, and we can start -with standalone testing, which is what is present today in CI. -Maintaining the CI jobs green will be always one of the goals of the -ceph integration project, providing a smooth path and a good experience -moving from ceph-ansible_ to cephadm_, continuously improving the testing -area to ensure enough coverage of the implemented features. - -Documentation Impact -==================== - -tripleo-docs will be updated to cover Ceph integration with the new tool. - - -.. Indirect Hyperlink Targets - -.. _cephadm: https://docs.ceph.com/en/latest/cephadm/ -.. _orchestrator: https://docs.ceph.com/en/latest/mgr/orchestrator/ -.. _ceph-ansible: https://github.com/ceph/ceph-ansible -.. _metalsmith: https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/provisioning/baremetal_provision.html -.. _cephadm-adopt: https://github.com/ceph/ceph-ansible/blob/master/infrastructure-playbooks/cephadm-adopt.yml -.. _drivegroup: https://docs.ceph.com/en/latest/cephadm/drivegroups -.. _cephadm_upgrade: https://docs.ceph.com/docs/master/cephadm/upgrade -.. _44205: https://tracker.ceph.com/issues/44205 -.. _44873: https://tracker.ceph.com/issues/44873 -.. _757644: https://review.opendev.org/#/c/757644 -.. _752437: https://review.opendev.org/#/c/752437 -.. _751875: https://review.opendev.org/#/c/751875 -.. _757644: https://review.opendev.org/#/c/757644 -.. _760536: https://review.opendev.org/#/c/760536 diff --git a/specs/wallaby/triplo-bgp-frrouter.rst b/specs/wallaby/triplo-bgp-frrouter.rst deleted file mode 100644 index 50ed079d..00000000 --- a/specs/wallaby/triplo-bgp-frrouter.rst +++ /dev/null @@ -1,245 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -============================== -Install and Configure FRRouter -============================== - -The goal of this spec is to design and plan requirements for adding support to -TripleO to install and provide a basic configuration of Free Range Router (FRR) -on overcloud nodes in order to support BGP dynamic routing. There are multiple -reasons why an administrator might want to run FRR, including to obtain -multiple routes on multiple uplinks to northbound switches, or to advertise -routes to networks or IP addresses via dynamic routing protocols. - -Problem description -=================== - -There are several use cases for using BGP, and in fact there are separate -efforts underway to utilize BGP for the control plane and data plane. - -BGP may be used for equal-cost multipath (ECMP) load balancing of outbound -links, and bi-directional forwarding detection (BFD) for resiliency to ensure -that a path provides connectivity. For outbound connectivity BGP will learn -routes from BGP peers. - -BGP may be used for advertising routes to API endpoints. In this model HAProxy -will listen on an IP address and FRR will advertise routes to that IP to BGP -peers. High availability for HAProxy is provided via other means such as -Pacemaker, and FRR will simply advertise the virtual IP address when it is -active on an API controller. - -BGP may also be used for routing inbound traffic to provider network IPs or -floating IPs for instance connectivity. The Compute nodes will run FRR to -advertise routes to the local VM IPs or floating IPs hosted on the node. FRR -has a daemon named Zebra that is responsible for exchanging routes between -routing daemons such as BGP and the kernel. The *redistribute connected* -statement in the FRR configuration will cause local IP addresses on the host -to be advertised via BGP. Floating IP addresses are attached to a loopback -interface in a namespace, so they will be redistributed using this method. -Changes to OVN will be required to ensure provider network IPs assigned to VMs -will be assigned to a loopback interface in a namespace in a similar fashion. - -Proposed Change -=============== - -Overview --------- - -Create a container with FRR. The container will run the BGP daemon, BFD -daemon, and Zebra daemon (which copies routes to/from the kernel). Provide a -basic configuration that would allow BGP peering with multiple peers. In the -control plane use case the FRR container needs to be started along with the HA -components, but in the data plane use case the container will be a sidecar -container supporting Neutron. The container is defined in a change proposed -here: [1]_ - -The container will be deployed using a TripleO Deployment Service. The service -will use Ansible to template the FRR configuration file, and a simple -implementation exists in a proposed change here: [2]_ - -The current FRR Ansible module is insufficient to configure BGP parameters and -would need to be extended. At this time the Ansible Networking development -team is not interested in extending the FRR module, so the configuration will -be provided using TripleO templates for the FRR main configuration file and -daemon configuration file. Those templates are defined in a change proposed -here: [3]_ - -A user-modifiable environment file will need to be provided so the installer -can provide the configuration data needed for FRR (see User Experience below). - -OVN will need to be modified to enable the Compute node to assign VM provider -network IPs to a loopback interface inside a namespace. These IP address will -not be used for sending or receiving traffic, only for redistributing routes -to the IPs to BGP peers. Traffic which is sent to those IP addresses will be -forwarded to the VM using OVS flows on the hypervisor. An example agent for -OVN has been written to demonstrate how to monitor the southbound OVN DB and -create loopback IP addresses when a VM is started on a Compute node. The OVN -changes will be detailed in a separate OVN spec. Demonstration code is -available on Github: [4]_ - -User Experience -^^^^^^^^^^^^^^^ - -The installer will need to provide some basic information for the FRR -configuration, including whether to enable BFD, BGP IPv4, BGP IPv6, -and other settings. See the Example Configuration Data section below. - -Additional user-provided data may include inbound or outbound filter prefixes. -The default filter prefixes will accept only default routes via BGP, and will -export only loopback IPs, which have a /32 subnet mask for IPv4 or /128 subnet -mask for IPv6. - -Example Configuration Data -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - tripleo_frr_bfd: false - tripleo_frr_bgp: false - tripleo_frr_bgp_ipv4: true - tripleo_frr_bgp_ipv4_allowas_in: false - tripleo_frr_bgp_ipv6: true - tripleo_frr_bgp_ipv6_allowas_in: false - tripleo_frr_config_basedir: "/var/lib/config-data/ansible-generated/frr" - tripleo_frr_hostname: "{{ ansible_hostname }}" - tripleo_frr_log_level: informational - tripleo_frr_watchfrr: true - tripleo_frr_zebra: false - -Alternatives -============ - -1. Routing outbound traffic via multiple uplinks - - Fault-tolerance and load-balancing for outbound traffic is typically - provided by bonding Ethernet interfaces. This works for most cases, but - is susceptible to unidirectional interface failure, a situation where - traffic works in only one direction. The LACP protocol for bonding does - provide some protection against unidirectional traffic failures, but is not - as robust as bi-directional forwarding detection (BFD) provided by FRR. - -2. Routing inbound traffic to highly-available API endpoints - - The most common method currently used to provide HA for API endpoints is - to use a virtual IP that fails over from active to standby nodes using a - shared Ethernet MAC address. The drawback to this method is that all - standby API controllers must reside on the same layer 2 segment (VLAN) as - the active controller. This presents a challenge if the operator wishes - to place API controllers in different failure domains for power and/or - networking. A BGP daemon avoids this limitation by advertising a route - to the shared IP address directly to the BGP peering router over a routed - layer 3 link. - - -3. Routing to Neutron IP addresses - - Data plane traffic is usually delivered to provider network or floating - IP addresses via the Ethernet MAC address associated with the IP and - determined via ARP requests on a shared VLAN. This requires that every - Compute node which may host a provider network IP or floating IP has - the appropriate VLAN trunked to a provider bridge attached to an interface - or bond. This makes it impossible to migrate VMs or floating IPs across - layer 3 boundaries in edge computing topologies or in a fully layer 3 - routed datacenter. - - -Security Impact -=============== - -There have been no direct security impacts identified with this approach. The -installer should ensure that security policy on the network as whole prevents -IP spoofing which could divert legitimate traffic to an unintended host. This -is a concern whether or not the OpenStack nodes are using BGP themselves, and -may be an issue in environments using traditional routing architecture or -static routes. - - -Upgrade Impact -============== - -When (if) we remove the capability to manage network resources in the -overcloud heat stack, we will need to evaluate whether we want to continue -to provide BGP configuration as a part of the overcloud configuration. - -If an operator wishes to begin using BGP routing at the same time as -upgrading the version of OpenStack used they will need to provide the -required configuration parameters if they differ from the defaults provided -in the TripleO deployment service. - - -Performance Impact -================== - -No performance impacts are expected, either positive or negative by using -this approach. Attempts have been made to minimize memory and CPU usage by -using conservative defaults in the configuration. - - -Documentation Impact -==================== - -This is a new TripleO deployment service and should be properly documented -to instruct installers in the configuration of FRR for their environment. - -The TripleO docs will need updates in many sections, including: - -* `TripleO OpenStack Deployment - `_ -* `Provisioning Baremetal Before Overcloud Deploy - `_ -* `Deploying with Custom Networks - `_ -* `Configuring Network Isolation - `_ -* `Deploying Overcloud with L3 routed networking - `_ - -The FRR daemons are documented elsewhere, and we should not need to document -usage of BGP in general, as this is a standard protocol. The configuration of -top-of-rack switches is different depending on the make and model of routing -switch used, and we should not expect to provide configuration examples for -network hardware. - -Implementation -============== - -The implementation will require a new TripleO deployment service, container -definition, and modifications to the existing role definitions. Those changes -are proposed upstream, see the References section for URL links. - - -Assignee(s) -=========== - -Primary assignee: - * Dan Sneddon - -Secondary assignees: - * Michele Baldessari - * Carlos Gonclaves - * Daniel Alvarez Sanchez - * Luis Tomas Bolivar - - -Work Items -========== - -* Develop the container definition -* Define the TripleO deployment service templates -* Define the TripleO Ansible role -* Modify the existing TripleO roles to support the above changes -* Merge the changes to the container, deployment service, and Ansible role -* Ensure FRR packages are available for supported OS versions - - -References -========== - -.. [1] `Review: DNR/DNM Frr support `_. -.. [2] `Review: Add tripleo_frr role `_. -.. [3] `Review: WIP/DNR/DNM FRR service `_. -.. [4] `OVN BGP Agent `_. diff --git a/specs/wallaby/triplo-network-data-v2-node-ports.rst b/specs/wallaby/triplo-network-data-v2-node-ports.rst deleted file mode 100644 index 79b1f51d..00000000 --- a/specs/wallaby/triplo-network-data-v2-node-ports.rst +++ /dev/null @@ -1,675 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -==================================================== -Network Data v2 - node ports and node network config -==================================================== - -With "Network Data v2" the goal is to move management of network resources -out of the heat stack. The schema spec [1]_ talked about the -``network_data.yaml`` format and managing networks, segments and subnets. This -spec follows up with node ports for composable networks and moving the node -network configuration action to the baremetal/network configuration workflow. - - -Problem description -=================== - -Applying a network change on day 2, currently requires a full stack update -since network resources such as ports are managed by heat. It has also been -problematic to create ports for large scale deployments; neutron on the single -node undercloud gets overwhelmed and it is difficult to throttle port creation -in Heat. As an early indication on the performance of port creation with the -proposed ansible module: - -Performance stats: 100 nodes x 3 networks = 300 ports - -.. code-block:: text - - 4xCPU 1.8 GHz (8GB) 8x CPU 2.6 GHz (12GB) - ------------------- -------------------------------- - Concurr: 10 20 10 4 - ........ .............. ......... ......... ......... - Create real 5m58.006s 1m48.518s 1m51.998s 1m25.022s - Delete: real 4m12.812s 0m47.475s 0m48.956s 1m19.543s - Re-run: real 0m19.386s 0m4.389s 0m4.453s 0m4.977s - - -Proposed Change -=============== - -Extend the baremetal provisioning workflow that runs before overcloud -deployment to also create ports for composable networks. The baremetal -provisioning step already create ports for the provisioning network. Moving -the management of ports for composable networks to this workflow will -consolidate all port management into one workflow. - -Also make baremetal provisioning workflow execute the tripleo-ansible -``tripleo_network_config`` role to configure node networking after -node provisioning. - -The deploy workflow would be: - -#. Operator defines composable networks in network data YAML file. -#. Operator provisions composable networks by running the - ``openstack overcloud network provision`` command, providing the network - data YAML file as input. -#. Operator defines roles and nodes in the baremetal deployment YAML file. This - YAML also defines the networks for each role. -#. Operator deploys baremetal nodes by running the - ``openstack overcloud node provision`` command. This step creates ports in - neutron, and also configures networking; including composable networks; on - the nodes using ansible role to apply network config with os-net-config - [2]_. -#. Operator deploys heat stack including the environment files produced by the - commands executed in the previous steps by running the - ``openstack overcloud deploy`` command. -#. Operator executes config-download to install and configure openstack on the - overcloud nodes. *(optional - only if overcloud deploy command executed with - ``-stack-only``)* - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Harald Jensås - -Approver(s) ------------ - -Primary approver: - TODO - - -Implementation Details ----------------------- - -The baremetal YAML definition will be extended, adding the ``networks`` and the -``network_config`` keys in role ``defaults`` as well as per-instance to support -``fixed_ip`` addressing, manually pre-created port resource and per-node -network configuration template. - -The ``networks`` will replace the current ``nic`` key, until the ``nic`` key is -deprecated either can be used but not both at the same time. Networks in -``networks`` will support a boolean key ``vif`` which indicate if the port -should be attached in Ironic or not. If no network with ``vif: true`` is -specified an implicit one for the control plane will be appended: - -.. code-block:: yaml - - - network: ctlplane - vif: true - -For networks with ``vif: true``, ports will be created by metalsmith. For -networks with ``vif: false`` (or ``vif`` not specified) the workflow will -create neutron ports based on the YAML definition. - -The neutron ports will initially be tagged with the *stack name* and the -instance *hostname*, these tags are used for idempotency. The ansible module -managing ports will get all ports with the relevant tags and then add/remove -ports based on the expanded roles defined in the Baremetal YAML definition. -(The *hostname* and *stack_name* tags are also added to ports created with heat -in this tripleo-heat-templates change [4]_, to enable *adoption* of neutron -ports created by heat for the upgrade scenario.) - -Additionally the ports will be tagged with the ironic node uuid when this is -available. Full set of tags are shown in the example below. - -.. code-block:: json - - { - "port": { - "name": "controller-1-External", - "tags": ["tripleo_ironic_uuid=", - "tripleo_hostname=controller-1", - "tripleo_stack_name=overcloud"], - } - } - -.. Note:: In deployments where baremetal nodes have multiple physical NIC's - multiple networks can have ``vif: true``, so that VIF attach - in ironic and proper neutron port binding happens. In a scenario - where neutron on the Undercloud is managing the switch this would - enable automation of the Top-of-Rack switch configuration. - -Mapping of the port data for overcloud nodes will go into a ``NodePortMap`` -parameter in tripleo-heat-tempaltes. The map will contain submaps for each -node, keyed by the node name. Initially the ``NodePortMap`` will be consumed by -alternative *fake-port* -``OS::TripleO::{{role.name}}::Ports::{{network.name}}Port`` resource templates. -In the final implementation the environment file created can be extended and -the entire ``OS::TripleO::{{role.name}}`` resource can be replaced with a -template that references parameter in the generated environment directly, i.e a -re-implemented ``puppet/role.role.j2.yaml`` without the server and port -resources. The ``NodePortMap`` will be added to the -*overcloud-baremetal-deployed.yaml* created by the workflow creating the -overcloud node port resources. - -Network ports for ``vif: false`` networks, will be managed by a new ansible -module ``tripleo_overcloud_network_ports``, the input for this role will be a -list of instance definitions as generated by the -``tripleo_baremetal_expand_roles`` ansible module. The -``tripleo_baremetal_expand_roles`` ansible module will be extended to add -network/subnet information from the baremetal deployment YAML definition. - -The baremetal provision workflow will be extended to write a ansible inventory, -we should try extend tripleo-ansible-inventory so that the baremetal -provisioning workflow can re-use existing code to create the inventory. -The inventory will be used to configure networking on the provisioned nodes -using the **triple-ansible** ``tripleo_network_config`` ansible role. - - -Already Deployed Servers -~~~~~~~~~~~~~~~~~~~~~~~~ - -The Baremetal YAML definition will be used to describe the **pre-deployed** -servers baremetal deployment. In this scenario there is no Ironic node to -update, no ironic UUID to add to a port's tags and no ironic node to attach -VIFs to. - -All ports, including the ctlplane port will be managed by the -``tripleo_overcloud_network_ports`` ansible module. The Baremetal YAML -definition for a deployment with pre-deployed servers will have to include an -``instance`` entry for each pre-deployed server. This entry will have the -``managed`` key set to ``false``. - -It should be possible for an already deployed server to have a management -address that is completely separate from the tripleo managed addreses. The -Baremetal YAML definition can be extended to carry a ``management_ip`` field -for this purpose. In the case no managment address is available the ctlplane -network entry for pre-deployed instances must have ``fixed_ip`` configured. - -The deployment workflow will *short circuit* the baremetal provisioning of -``managed: false`` instances. The Baremetal YAML definition can define a -mix of *already deployed server* instances, and instances that should be -provisioned via metalsmith. See :ref:`baremetal_yaml_pre_provsioned`. - -YAML Examples -~~~~~~~~~~~~~ - -Example: Baremetal YAML definition with defaults properties -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - - name: Controller - count: 1 - hostname_format: controller-%index% - defaults: - profile: control - network_config: - template: templates/multiple_nics/multiple_nics.j2 - physical_bridge_name: br-ex - public_interface_name: nic1 - network_deployment_actions: ['CREATE'] - net_config_data_lookup: {} - networks: - - network: ctlplane - vif: true - - network: external - subnet: external_subnet - - network: internal_api - subnet: internal_api_subnet - - network: storage - subnet: storage_subnet - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: Tenant - subnet: tenant_subnet - - name: Compute - count: 1 - hostname_format: compute-%index% - defaults: - profile: compute - network_config: - template: templates/multiple_nics/multiple_nics.j2 - physical_bridge_name: br-ex - public_interface_name: nic1 - network_deployment_actions: ['CREATE'] - net_config_data_lookup: {} - networks: - - network: ctlplane - vif: true - - network: internal_api - subnet: internal_api_subnet - - network: tenant - subnet: tenant_subnet - - network: storage - subnet: storage_subnet - -Example: Baremetal YAML definition with per-instance overrides -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - - name: Controller - count: 1 - hostname_format: controller-%index% - defaults: - profile: control - network_config: - template: templates/multiple_nics/multiple_nics.j2 - physical_bridge_name: br-ex - public_interface_name: nic1 - network_deployment_actions: ['CREATE'] - net_config_data_lookup: {} - bond_interface_ovs_options: - networks: - - network: ctlplane - vif: true - - network: external - subnet: external_subnet - - network: internal_api - subnet: internal_api_subnet - - network: storage - subnet: storage_subnet - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - instances: - - hostname: controller-0 - name: node00 - networks: - - network: ctlplane - vif: true - - network: internal_api: - fixed_ip: 172.21.11.100 - - hostname: controller-1 - name: node01 - networks: - External: - port: controller-1-external - - hostname: controller-2 - name: node02 - - name: ComputeLeaf1 - count: 1 - hostname_format: compute-leaf1-%index% - defaults: - profile: compute-leaf1 - networks: - - network: internal_api - subnet: internal_api_subnet - - network: tenant - subnet: tenant_subnet - - network: storage - subnet: storage_subnet - instances: - - hostname: compute-leaf1-0 - name: node03 - network_config: - template: templates/multiple_nics/multiple_nics_dpdk.j2 - physical_bridge_name: br-ex - public_interface_name: nic1 - network_deployment_actions: ['CREATE'] - net_config_data_lookup: {} - num_dpdk_interface_rx_queues: 1 - networks: - - network: ctlplane - vif: true - - network: internal_api - fixed_ip: 172.21.12.105 - - network: tenant - port: compute-leaf1-0-tenant - - network: storage - subnet: storage_subnet - - -.. _baremetal_yaml_pre_provsioned: - -Example: Baremetal YAML for Already Deployed Servers -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - - name: Controller - count: 3 - hostname_format: controller-%index% - defaults: - profile: control - network_config: - template: templates/multiple_nics/multiple_nics.j2 - networks: - - network: ctlplane - - network: external - subnet: external_subnet - - network: internal_api - subnet: internal_api_subnet - - network: storage - subnet: storage_subnet - - network: storage_mgmt - subnet: storage_mgmt_subnet - - network: tenant - subnet: tenant_subnet - managed: false - instances: - - hostname: controller-0 - networks: - - network: ctlplane - fixed_ip: 192.168.24.10 - - hostname: controller-1 - networks: - - network: ctlplane - fixed_ip: 192.168.24.11 - - hostname: controller-2 - networks: - - network: ctlplane - fixed_ip: 192.168.24.12 - - name: Compute - count: 2 - hostname_format: compute-%index% - defaults: - profile: compute - network_config: - template: templates/multiple_nics/multiple_nics.j2 - networks: - - network: ctlplane - - network: internal_api - subnet: internal_api_subnet - - network: tenant - subnet: tenant_subnet - - network: storage - subnet: storage_subnet - instances: - - hostname: compute-0 - managed: false - networks: - - network: ctlplane - fixed_ip: 192.168.24.100 - - hostname: compute-1 - managed: false - networks: - - network: ctlplane - fixed_ip: 192.168.24.101 - -Example: NodeNetworkDataMappings -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - NodePortMap: - controller-0: - ctlplane: - ip_address: 192.168.24.9 (2001:DB8:24::9) - ip_subnet: 192.168.24.9/24 (2001:DB8:24::9/64) - ip_address_uri: 192.168.24.9 ([2001:DB8:24::9]) - internal_api: - ip_address: 172.18.0.9 (2001:DB8:18::9) - ip_subnet: 172.18.0.9/24 (2001:DB8:18::9/64) - ip_address_uri: 172.18.0.9 ([2001:DB8:18::9]) - tenant: - ip_address: 172.19.0.9 (2001:DB8:19::9) - ip_subnet: 172.19.0.9/24 (2001:DB8:19::9/64) - ip_address_uri: 172.19.0.9 ([2001:DB8:19::9]) - compute-0: - ctlplane: - ip_address: 192.168.24.15 (2001:DB8:24::15) - ip_subnet: 192.168.24.15/24 (2001:DB8:24::15/64) - ip_address_uri: 192.168.24.15 ([2001:DB8:24::15]) - internal_api: - ip_address: 172.18.0.15 (2001:DB8:18::1) - ip_subnet: 172.18.0.15/24 (2001:DB8:18::1/64) - ip_address_uri: 172.18.0.15 ([2001:DB8:18::1]) - tenant: - ip_address: 172.19.0.15 (2001:DB8:19::15) - ip_subnet: 172.19.0.15/24 (2001:DB8:19::15/64) - ip_address_uri: 172.19.0.15 ([2001:DB8:19::15]) - -Example: Ansible inventory -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - Controller: - vars: - role_networks: - - External - - InternalApi - - Tenant - role_networks_lower: - External: external - InternalApi: internal_api - Tenant: tenant - networks_all: - - External - - InternalApi - - Tenant - neutron_physical_bridge_name: br-ex - neutron_public_interface_name: nic1 - tripleo_network_config_os_net_config_mappings: {} - network_deployment_actions: ['CREATE', 'UPDATE'] - ctlplane_subnet_cidr: 24 - ctlplane_mtu: 1500 - ctlplane_gateway_ip: 192.168.24.254 - ctlplane_dns_nameservers: [] - dns_search_domains: [] - ctlplane_host_routes: {} - internal_api_cidr: 24 - internal_api_gateway_ip: 172.18.0.254 - internal_api_host_routes: [] - internal_api_mtu: 1500 - internal_api_vlan_id: 20 - tenant_cidr: 24 - tenant_api_gateway_ip: 172.19.0.254 - tenant_host_routes: [] - tenant_mtu: 1500 - hosts: - controller-0: - ansible_host: 192.168.24.9 - ctlplane_ip: 192.168.24.9 - internal_api_ip: 172.18.0.9 - tenant_ip: 172.19.0.9 - Compute: - vars: - role_networks: - - InternalApi - - Tenant - role_networks_lower: - InternalApi: internal_api - Tenant: tenant - networks_all: - - External - - InternalApi - - Tenant - neutron_physical_bridge_name: br-ex - neutron_public_interface_name: nic1 - tripleo_network_config_os_net_config_mappings: {} - network_deployment_actions: ['CREATE', 'UPDATE'] - ctlplane_subnet_cidr: 24 - ctlplane_mtu: 1500 - ctlplane_gateway_ip: 192.168.25.254 - ctlplane_dns_nameservers: [] - dns_search_domains: [] - ctlplane_host_routes: {} - internal_api_cidr: 24 - internal_api_gateway_ip: 172.18.1.254 - internal_api_host_routes: [] - internal_api_mtu: 1500 - internal_api_vlan_id: 20 - tenant_cidr: 24 - tenant_api_gateway_ip: 172.19.1.254 - tenant_host_routes: [] - tenant_mtu: 1500 - hosts: - compute-0: - ansible_host: 192.168.25.15 - ctlplane_ip: 192.168.25.15 - internal_ip: 172.18.1.15 - tenant_ip: 172.19.1.15 - - -TODO ----- - -* Constraint validation, for example ``BondInterfaceOvsOptions`` uses - ``allowed_pattern: ^((?!balance.tcp).)*$`` to ensure balance-tcp bond mode is - not used, as it is known to cause packet loss. - -Work Items ----------- - -#. Write ansible inventory after baremetal provisioning - - Create an ansible inventory, similar to the inventory created by config- - download. The ansible inventory is required to apply network - configuration to the deployed nodes. - - We should try to extend tripleo-ansible-inventory so that the baremetal - provisioning workflow can re-use existing code to create the inventory. - - It is likely that it makes sense for the workflow to also run the - tripleo-ansible role tripleo_create_admin to create the *tripleo-admin* - ansible user. - -#. Extend baremetal provisioning workflow to create neutron ports and - update the ironic node ``extra`` field with the ``tripleo_networks`` map. - -#. The baremetal provisioning workflow needs a *pre-deployed-server* option - that cause it to not deploy baremetal nodes, only create network ports. - When this option is used the baremetal deployment YAML file will also - describe the already provisioned nodes. - -#. Apply and validate network configuration using the **triple-ansible** - ``tripleo_network_config`` ansible role. This step will be integrated in - the provisioning command. - -#. Disable and remove management of composable network ports in - tripleo-heat-templates. - -#. Change the Undercloud and Standalone deploy to apply network configuration - prior to the creating the ephemeral heat stack using the - ``tripleo_network_config`` ansible role. - -Testing -======= - -Multinode OVB CI job's with network-isolation will be updated to test the new -workflow. - -Upgrade Impact -============== - -During upgrade switching to use network ports managed outside of the heat stack -the ``PortDeletionPolicy`` must be set to ``retain`` during the update/upgrade -*prepare* step, so that the existing neutron ports (which will be adopted by -the pre-heat port management workflow) are not deleted when running the update/ -upgrade *converge* step. - -Moving node network configuration out of tripleo-heat-templates will require -manual (or scripted) migration of settings controlled by heat template -parameters to the input file used for baremetal/network provisioning. At least -the following parameters are affected: - -* NeutronPhysicalBridge -* NeutronPublicInterface -* NetConfigDataLookup -* NetworkDeploymentActions - -Parameters that will be deprecated: - -* NetworkConfigWithAnsible -* {{role.name}}NetworkConfigTemplate -* NetworkDeploymentActions -* {{role.name}}NetworkDeploymentActions -* BondInterfaceOvsOptions -* NumDpdkInterfaceRxQueues -* {{role.name}}LocalMtu -* NetConfigDataLookup -* DnsServers -* DnsSearchDomains -* ControlPlaneSubnetCidr -* HypervisorNeutronPublicInterface -* HypervisorNeutronPhysicalBridge - -The environment files used to select one of the pre-defined nic config -templates will no longer work. The template to use must be set in the YAML -defining the baremetal/network deployment. This affect the following -environment files: - -* environments/net-2-linux-bonds-with-vlans.j2.yaml -* environments/net-bond-with-vlans.j2.yaml -* environments/net-bond-with-vlans-no-external.j2.yaml -* environments/net-dpdkbond-with-vlans.j2.yaml -* environments/net-multiple-nics.j2.yaml -* environments/net-multiple-nics-vlans.j2.yaml -* environments/net-noop.j2.yaml -* environments/net-single-nic-linux-bridge-with-vlans.j2.yaml -* environments/net-single-nic-with-vlans.j2.yaml -* environments/net-single-nic-with-vlans-no-external.j2.yaml - -Documentation Impact -==================== - -The documentation effort is **heavy** and will need to be incrementally -updated. As a minumum, a separate page explaining the new process must be -created. - -The TripleO docs will need updates in many sections, including: - -* `TripleO OpenStack Deployment - `_ -* `Provisioning Baremetal Before Overcloud Deploy - `_ -* `Deploying with Custom Networks - `_ -* `Configuring Network Isolation - `_ -* `Deploying Overcloud with L3 routed networking - `_ - - -Alternatives -============ - -#. **Not changing how ports are created** - - In this case we keep creating the ports with heat, the do nothing - alternative. - -#. **Create a completely separate workflow for composable network ports** - - A separate workflow that can run before/after node provisioning. It can read - the same YAML format as baremetal provisioning, or it can have it's own YAML - format. - - The problem with this approach is that we loose the possibility to store - relations between neutron-port and baremetal node in a database. As in, we'd - need our own database (a file) maintaining the relationships. - - .. Note:: We need to implement this workflow anyway for a pre-deployed - server scenario, but instead of a completely separate workflow - the baremetal deploy workflow can take an option to not - provision nodes. - -#. **Create ports in ironic and bind neutron ports** - - Instead of creating ports unknown to ironic, create ports for the ironic - nodes in the baremetal service. - - The issue is that ironic does not have a concept of virtual port's, so we - would have to either add this support in ironic, switch TripleO to use - neutron trunk ports or create *fake* ironic ports that don't actually - reflect NICs on the baremetal node. (This abandoned ironic spec [3]_ discuss - one approach for virtual port support, but it was abandoned in favor of - neutron trunk ports.) - - With each PTG there is a re-occurring suggestion to replace neutron with a - more light weight IPAM solution. However, the effort to actually integrate - it properly with ironic and neutron for composable networks probably isn't - time well spent. - - -References -========== - -.. [1] `Review: Spec for network data v2 format `_. -.. [2] `os-net-config `_. -.. [3] `Abandoned spec for VLAN Aware Baremetal Instances `_. -.. [4] `Review: Add hostname and stack_name tags to ports `_. diff --git a/specs/wallaby/triplo-network-data-v2.rst b/specs/wallaby/triplo-network-data-v2.rst deleted file mode 100644 index 04d64d5c..00000000 --- a/specs/wallaby/triplo-network-data-v2.rst +++ /dev/null @@ -1,348 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============================== -Network Data format/schema (v2) -=============================== - -The network data schema (``network_data.yaml``) used to define composable -networks in TripleO has had several additions since it was first introduced. -Due to legacy compatibility some additions make the schema somewhat non- -intuitive. Such as adding support for routed networks, where the ``subnets`` -map was introduced. - -The goal of this spec is to get discussion and settle on a new network data -(v2) format that will be used once management of network resources such -as networks, segments and subnets are moved out of the heat stack. - -Problem description -=================== - -The current schema is somewhat inconsistent, and not as precice as it could -be. For example the ``base`` subnet being at level-0, while additional -subnets are in the ``subnets`` map. It would be more intuitive to define -all subnets in the ``subnets`` map. - -Currently the network resource properties are configured via a mix of -parameters in the heat environment and network data. For example -``dns_domain``, ``admin_state_up``, ``enable_dhcp``, ``ipv6_address_mode``, -``ipv6_ra_mode`` and ``shared`` properties are configured via Heat parameters, -while other properties such as ``cidr``, ``gateway_ip``, ``host_routes`` etc. -is defined in network data. - -Proposed Change -=============== - -Overview --------- - -Change the network data format so that all network properties are managed in -network data, so that network resources can be managed outside of the heat -stack. - -.. note:: Network data v2 format will only be used with the new tooling that - will manage networks outside of the heat stack. - -Network data v2 format should stay compatible with tripleo-heat-templates -jinja2 rendering outside of the ``OS::TripleO::Network`` resource and it's -subresources ``OS::TripleO::Network::{{network.name}}``. - -User Experience -^^^^^^^^^^^^^^^ - -Tooling will be provided for user's to export the network information from -an existing deployment. This tooling will output a network data file in -v2 format, which from then on can be used to manage the network resources -using tripleoclient commands or tripleo-ansible cli playbooks. - -The command line tool to manage the network resources will output the -environment file that must be included when deploying the heat stack. (Similar -to the environment file produced when provisioning baremetal nodes without -nova.) - -CLI Commands -^^^^^^^^^^^^ - -Command to export provisioned overcloud network information to network data v2 -format. - -.. code-block:: shell - - openstack overcloud network export \ - --stack \ - --output - -Command to create/update overcloud networks outside of heat. - -.. code-block:: shell - - openstack overcloud network provision \ - --networks-file \ - --output - - -Main difference between current network data schema and the v2 schema proposed -here: - -* Base subnet is moved to the ``subnets`` map, aligning configuration for - non-routed and routed deploymends (spine-and-leaf, DCN/Edge) -* The ``enabled`` (bool) is no longer used. Disabled networks should be - excluded from the file, removed or commented. -* The ``compat_name`` option is no longer required. This was used to change - the name of the heat resource internally. Since the heat resource will be a - thing of the past with network data v2, we don't need it. -* The keys ``ip_subnet``, ``gateway_ip``, ``allocation_pools``, ``routes``, - ``ipv6_subnet``, ``gateway_ipv6``, ``ipv6_allocation_pools`` and - ``routes_ipv6`` are no longer valid at the network level. -* New key ``physical_network``, our current physical_network names for base and - non-base segments are not quite compatible. Adding logic in code to - compensate is complex. (This field may come in handy when creating ironic - ports in metalsmith as well.) -* New keys ``network_type`` and ``segmentation_id`` since we could have users - that used ``{{network.name}}NetValueSpecs`` to set network_type vlan. - -.. note:: The new tooling should validate that non of the keys previously - valid in network data v1 are used in network data v2. - -Example network data v2 file for IPv4 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - - name: Storage - name_lower: storage (optional, default: name.lower()) - admin_state_up: false (optional, default: false) - dns_domain: storage.localdomain. (optional, default: undef) - mtu: 1442 (optional, default: 1500) - shared: false (optional, default: false) - service_net_map_replace: storage (optional, default: undef) - ipv6: true (optional, default: false) - vip: true (optional, default: false) - subnets: - subnet01: - ip_subnet: 172.18.1.0/24 - gateway_ip: 172.18.1.254 (optional, default: undef) - allocation_pools: (optional, default: []) - - start: 172.18.1.10 - end: 172.18.1.250 - enable_dhcp: false (optional, default: false) - routes: (optional, default: []) - - destination: 172.18.0.0/24 - nexthop: 172.18.1.254 - vlan: 21 (optional, default: undef) - physical_network: storage_subnet01 (optional, default: {{name.lower}}_{{subnet name}}) - network_type: flat (optional, default: flat) - segmentation_id: 21 (optional, default: undef) - subnet02: - ip_subnet: 172.18.0.0/24 - gateway_ip: 172.18.0.254 (optional, default: undef) - allocation_pools: (optional, default: []) - - start: 172.18.0.10 - end: 172.18.0.250 - enable_dhcp: false (optional, default: false) - routes: (optional, default: []) - - destination: 172.18.1.0/24 - nexthop: 172.18.0.254 - vlan: 20 (optional, default: undef) - physical_network: storage_subnet02 (optional, default: {{name.lower}}_{{subnet name}}) - network_type: flat (optional, default: flat) - segmentation_id: 20 (optional, default: undef) - -Example network data v2 file for IPv6 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - - name: Storage - name_lower: storage - admin_state_up: false - dns_domain: storage.localdomain. - mtu: 1442 - shared: false - vip: true - subnets: - subnet01: - ipv6_subnet: 2001:db8:a::/64 - gateway_ipv6: 2001:db8:a::1 - ipv6_allocation_pools: - - start: 2001:db8:a::0010 - end: 2001:db8:a::fff9 - enable_dhcp: false - routes_ipv6: - - destination: 2001:db8:b::/64 - nexthop: 2001:db8:a::1 - ipv6_address_mode: null - ipv6_ra_mode: null - vlan: 21 - physical_network: storage_subnet01 (optional, default: {{name.lower}}_{{subnet name}}) - network_type: flat (optional, default: flat) - segmentation_id: 21 (optional, default: undef) - subnet02: - ipv6_subnet: 2001:db8:b::/64 - gateway_ipv6: 2001:db8:b::1 - ipv6_allocation_pools: - - start: 2001:db8:b::0010 - end: 2001:db8:b::fff9 - enable_dhcp: false - routes_ipv6: - - destination: 2001:db8:a::/64 - nexthop: 2001:db8:b::1 - ipv6_address_mode: null - ipv6_ra_mode: null - vlan: 20 - physical_network: storage_subnet02 (optional, default: {{name.lower}}_{{subnet name}}) - network_type: flat (optional, default: flat) - segmentation_id: 20 (optional, default: undef) - -Example network data v2 file for dual stack -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Dual IPv4/IPv6 with two subnets per-segment, one for IPv4 and the other for -IPv6. A single neutron port with an IP address in each subnet can be created. - -In this case ``ipv6`` key will control weather services are configured to -bind to IPv6 or IPv4. (default ipv6: false) - -.. code-block:: yaml - - - name: Storage - name_lower: storage - admin_state_up: false - dns_domain: storage.localdomain. - mtu: 1442 - shared: false - ipv6: true (default ipv6: false) - vip: true - subnets: - subnet01: - ip_subnet: 172.18.1.0/24 - gateway_ip: 172.18.1.254 - allocation_pools: - - start: 172.18.1.10 - end: 172.18.1.250 - routes: - - destination: 172.18.0.0/24 - nexthop: 172.18.1.254 - ipv6_subnet: 2001:db8:a::/64 - gateway_ipv6: 2001:db8:a::1 - ipv6_allocation_pools: - - start: 2001:db8:a::0010 - end: 2001:db8:a::fff9 - routes_ipv6: - - destination: 2001:db8:b::/64 - nexthop: 2001:db8:a::1 - vlan: 21 - subnet02: - ip_subnet: 172.18.0.0/24 - gateway_ip: 172.18.0.254 - allocation_pools: - - start: 172.18.0.10 - end: 172.18.0.250 - routes: - - destination: 172.18.1.0/24 - nexthop: 172.18.0.254 - ipv6_subnet: 2001:db8:b::/64 - gateway_ipv6: 2001:db8:b::1 - ipv6_allocation_pools: - - start: 2001:db8:b::0010 - end: 2001:db8:b::fff9 - routes_ipv6: - - destination: 2001:db8:a::/64 - nexthop: 2001:db8:b::1 - vlan: 20 - -Alternatives ------------- - -#. Not changing the network data format - - In this case we need an alternative to provide the values for resource - properties currently managed using heat parameters, when moving - management of the network resources outside the heat stack. - -#. Only add new keys for properties - - Keep the concept of the ``base`` subnet at level-0, and only add keys - for properties currently managed using heat parameters. - - -Security Impact -=============== - -N/A - - -Upgrade Impact -============== - -When (if) we remove the capability to manage network resources in the -overcloud heat stack, the user must run the export command to generate -a new network data v2 file. Use this file as input to the ``openstack -overcloud network provision`` command, to generate the environment file -required for heat stack without network resources. - - -Performance Impact -================== - -N/A - - -Documentation Impact -==================== - -The network data v2 format must be documented. Procedures to use the commands -to export network information from existing deployments as well as -procedures to provision/update/adopt network resources with the non-heat stack -tooling must be provided. - -Heat parameters which will be deprecated/removed: - -* ``{{network.name}}NetValueSpecs``: Deprecated, Removed. - This was used to set ``provider:physical_network`` and - ``provider:network_type``, or actually **any** network property. -* ``{network.name}}NetShared``: Deprecated, replaced by network level - ``shared`` (bool) -* ``{{network.name}}NetAdminStateUp``: Deprecated, replaced by network - level ``admin_state_up`` (bool) -* ``{{network.name}}NetEnableDHCP``: Deprecated, replaced by subnet - level ``enable_dhcp`` (bool) -* ``IPv6AddressMode``: Deprecated, replaced by subnet level - ``ipv6_address_mode`` -* ``IPv6RAMode``: Deprecated, replaced by subnet level ``ipv6_ra_mode`` - -Once deployed_networks.yaml (https://review.opendev.org/751876) is used the -following parameters are Deprecated, since they will no longer be used: - -* {{network.name}}NetCidr -* {{network.name}}SubnetName -* {{network.name}}Network -* {{network.name}}AllocationPools -* {{network.name}}Routes -* {{network.name}}SubnetCidr_{{subnet}} -* {{network.name}}AllocationPools_{{subnet}} -* {{network.name}}Routes_{{subnet}} - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - * Harald Jensås - - -Work Items ----------- - -* Add tags to resources using heat stack - https://review.opendev.org/750666 -* Tools to extract provisioned networks from existing deployment - https://review.opendev.org/750671, https://review.opendev.org/750672 -* New tooling to provision/update/adopt networks - https://review.opendev.org/751739, https://review.opendev.org/751875 -* Deployed networks template in THT - https://review.opendev.org/751876 diff --git a/specs/xena/ansible-logging-tripleoclient.rst b/specs/xena/ansible-logging-tripleoclient.rst deleted file mode 100644 index e5713db3..00000000 --- a/specs/xena/ansible-logging-tripleoclient.rst +++ /dev/null @@ -1,304 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================== -Improve logging for ansible calls in tripleoclient -================================================== - -Launchpad blueprint: - -https://blueprints.launchpad.net/tripleo/+spec/ansible-logging-tripleoclient - -Problem description -=================== -Currently, the ansible playbooks logging as shown during a deploy or day-2 -operations such us upgrade, update, scaling is either too verbose, or not -enough. - -Furthermore, since we're moving to ephemeral services on the Undercloud (see -`ephemeral heat`_ for instance), getting information about the state, content -and related things is a bit less intuitive. A proper logging, with associated -CLI, can really improve that situation and provide a better user experience. - - -Requirements for the solution -============================= -No new service addition ------------------------ -We are already trying to remove things from the Undercloud, such as Mistral, -it's not in order to add new services. - -No increase in deployment and day-2 operations time ---------------------------------------------------- -The solution must not increase the time taken for deploy, update, upgrades, -scaling and any other day-2 operations. It must be 100% transparent to the -operator. - -Use existing tools ------------------- -In the same way we don't want to have new services, we don't want to reinvent -the wheel once more, and we must check the already huge catalog of existing -solutions. - -KISS ----- -Keep It Simple Stupid is a key element - code must be easy to understand and -maintain. - -Proposed Change -=============== - -Introduction ------------- -While working on the `Validation Framework`_, a big part was about the logging. -There, we found a way to get an actual computable output, and store it in a -defined location, allowing to provide a nice interface in order to list and -show validation runs. - -This heavily relies on an ansible callback plugin with specific libs, which are -shipped in `python-validations-libs`_ package. - -Since the approach is modular, those libs can be re-used pretty easily in other -projects. - -In addition, python-tripleoclient already depends on `python-validations-libs`_ -(via a dependency on validations-common), meaning we already have the needed -bits. - -The Idea --------- -Since we have the mandatory code already present on the system (provided by the -new `python-validations-libs`_ package), we can modify how ansible-runner is -configured in order to inject a callback, and get the output we need in both -the shell (direct feedback to the operator) and in a dedicated file. - -Since callback aren't cheap (but, hopefully not expensive either), proper PoC -must be conducted in order to gather metrics about CPU, RAM and time. Please -see Performance Impact section. - -Direct feedback ---------------- -The direct feedback will tell the operator about the current task being done -and, when it ends, if it's a success or not. - -Using a callback might provide a "human suited" output. - -File logging ------------- -Here, we must define multiple things, and take into account we're running -multiple playbooks, with multiple calls to ansible-runner. - -File location -............. -Nowadays, most if not all of the deploy related files are located in the -user home directory (i.e. ~/overcloud-deploy//). -It therefore sounds reasonable to get the log in the same location, or a -subdirectory in that location. - -Keeping this location also solves the potential access right issue, since a -standard home directory has a 0700 mode, preventing any other user to access -its content. - -We might even go a bit deeper, and enforce a 0600 mode, just to be sure. - -Remember, logs might include sensitve data, especially when we're running with -extra debugging. - -File format convention -...................... -In order to make the logs easily usable by automated tools, and since we -already heavily rely on JSON, the log output should be formated as JSON. This -would allow to add some new CLI commands such as "history list", "history show" -and so on. - -Also, JSON being well known by logging services such as ElasticSearch, using it -makes sending them to some central logging service really easy and convenient. - -While JSON is nice, it will more than probably prevent a straight read by the -operator - but with a working CLI, we might get something closer to what we -have in the `Validation Framework`_, for instance (see `this example`_). We -might even consider a CLI that will allow to convert from JSON to whatever -the operator might want, including but not limited to XML, plain text or JUnit -(Jenkins). - -There should be a new parameter allowing to switch the format, from "plain" to -"json" - the default value is still subject to discussion, but providing this -parameter will ensure Operators can do whetever they want with the default -format. A concensus seems to indicate "default to plain". - -Filename convention -................... -As said, we're running multiple playbooks during the actions, and we also want -to have some kind of history. - -In order to do that, the easiest way to get a name is to concatenate the time -and the playbook name, something like: - -* *timestamp*-*playbookname*.json - -Use systemd/journald instead of files -..................................... -One might want to use systemd/journald instead of plain files. While this -sounds appealing, there are multiple potential issues: - -#. Sensitive data will be shown in the system's journald, at hand of any other - user -#. Journald has rate limitations and threshold, meaning we might hit them, and - therefore lose logs, or prevent other services to use journald for their - own logging -#. While we can configure a log service (rsyslog, syslog-ng, etc) in order to - output specific content to specific files, we will face access issues on - them - -Therefore, we shouldn't use journald. - -Does it meet the requirements? ------------------------------- -* No service addition: yes - it's only a change in the CLI, no new dependecy is - needed (tripleoclient already depends on validations-common, which depends on - validations-libs) -* No increase in operation time: this has to be proven with proper PoC and - metrics gathering/comparison. -* Existing Tool: yes -* Actively maintained: so far, yes - expected to be extended outside of TripleO -* KISS: yes, based on the validations-libs and simple Ansible callback - -Alternatives -============ - -ARA ---- -`ARA Records Ansible`_ provides some of the functionnalities we implemented in -the Validation Framework logging, but it lacks some of the wanted features, -such as - -* CLI integration within tripleoclient -* Third-party service independency -* plain file logging in order to scrap them with SOSReport or other tools - -ARA needs a DB backend - we could inject results in the existing galera DB, but -that might create some issues with the concurrent accesses happening during a -deploy for instance. Using sqlite is also an option, but it means new packages, -new file location to save, binary format and so on. - -It also needs some web server in order to show the reporting, meaning yet -another httpd configuration, and the need to access to it on the undercloud. - -Also, ARA being a whole service, it would require to deploy it, configure it, -and maintain it - plus ensure it is properly running before each action in -order to ensure it gets the logs. - -By default, ARA doesn't affect the actual playbook output, while the goal of -this spec is mostly about it: provide a concise feedback to the operator, while -keeping the logs on disk, in files, with the ability to interact with them -through the CLI directly. - -In the end, ARA might be a solution, but it will require more work to get it -integrated, and, since the Triple UI has been deprecated, there isn't real way -to integrate it in an existing UI tool. - -Would it meet the requirements? -............................... -* No service addition: no, due to the "REST API" aspect. A service must answer - API calls -* No increase in operation time: probably yes, depending on the way ARA can - manage inputs queues. Since it's also using a callback, we have to account - for the potential resources used by it. -* Existing tool: yes -* Actively maintained: yes -* KISS: yes, but it adds new dependencies (DB backend, Web server, ARA service, - and so on) - -Note on the "new dependencies": while ARA can be launched -`without any service`_, it seems to be only for devel purpose, according to the -informative note we can read on the documentation page:: - - Good for small scale usage but inefficient and contains a lot of small files - at a large scale. - -Therefore, we shouldn't use ARA. - -Proposed Roadmap -================ -In Xena: - -* Ensure we have all the ABI capabilities within validations-libs in order to - set needed/wanted parameters for a different log location and file naming -* Start to work on the ansible-runner calls so that it uses a tweaked callback, - using the validations-libs capabilities in order to get the direct feedback - as well as the formatted file in the right location - -Security Impact -=============== -As we're going to store full ansible output on the disk, we must ensure log -location accesses are closed to any non-wanted user. As stated while talking -about the file location, the directory mode and ownership must be set so that -only the needed users can access its content (root + stack user) - -Once this is sorted out, no other security impact is to be expected - further -more, it will even make things more secure than now, since the current way -ansible is launched within tripleoclient puts an "ansible.log" file in the -operator home directory without any specific rights. - -Upgrade Impact -============== -Appart from ensuring the log location exists, there isn't any major upgrade -impact. A doc update must be done in order to point to the log location, as -well as some messages within the CLI. - -End User Impact -=============== -There are two impacts to the End User: - -* CLI output will be reworked in order to provide useful information (see - Direct Feedback above) -* Log location will change a bit for the ansible part (see File Logging above) - -Performance Impact -================== -A limited impact is to be expected - but proper PoC with metrics must be -conducted to assess the actual change. - -Multiple deploys must be done, with different Overcloud design, in order to -see the actual impact alongside the number of nodes. - -Deployer Impact -=============== -Same as End User Impact: CLI output will be changed, and the log location will -be updated. - -Developer Impact -================ -The callback is enabled by default, but the Developer might want to disable it. -Proper doc should reflect this. No real impact in the end. - -Implementation -============== -Contributors ------------- -* Cédric Jeanneret -* Mathieu Bultel - -Work Items ----------- -* Modify validations-libs in order to provided the needed interface (shouldn't - be really needed, the libs are already modular and should expose the wanted - interfaces and parameters) -* Create a new callback in tripleo-ansible -* Ensure the log directory is created with the correct rights -* Update the ansible-runner calls to enable the callback by default -* Ensure tripleoclient outputs status update on a regular basis while the logs - are being written in the right location -* Update/create the needed documentations about the new logging location and - management - -.. _ephemeral heat: https://specs.openstack.org/openstack/tripleo-specs/specs/wallaby/ephemeral-heat-overcloud.html -.. _Validation Framework: https://specs.openstack.org/openstack/tripleo-specs/specs/stein/validation-framework.html -.. _this example: https://asciinema.org/a/283645 -.. _python-validations-libs: https://opendev.org/openstack/validations-libs -.. _ARA Records Ansible: https://ara.recordsansible.org/ -.. _without any service: https://ara.readthedocs.io/en/latest/cli.html#ara-manage-generate -.. _ansible "acl": https://docs.ansible.com/ansible/latest/modules/acl_module.html diff --git a/specs/xena/healthcheck-cleanup.rst b/specs/xena/healthcheck-cleanup.rst deleted file mode 100644 index c9e311b1..00000000 --- a/specs/xena/healthcheck-cleanup.rst +++ /dev/null @@ -1,217 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============================== -Cleaning container healthchecks -=============================== - -https://blueprints.launchpad.net/tripleo/+spec/clean-container-healthchecks - -We don't rely on the `container healthcheck`_ results for anything in the -infrastructure. They are time and resource consuming, and their maintenance is -mostly random. We can at least remove the ones that aren't hitting an actual -API healthcheck endpoint. - -This proposal was discussed during a `session at the Xena PTG`_ - -Problem Description -=================== - -Since we moved the services to container, first with the docker engine, then -with podman, container healthchecks have been implemented and used. - -While the very idea of healthchecks isn't bad, the way we (TripleO) are -making and using them is mostly wrong: - -* no action is taken upon healthcheck failure -* some (most) aren't actually checking if the service is working, but merely - that the service container is running - -The healthchecks such as `healthcheck_port`_, `healthcheck_listen`_, -`healthcheck_socket`_ as well as most of the scripts calling -`healthcheck_curl`_ are mostly NOT doing anything more than ensuring a -service is running - and we already have this info when the container is -"running" (good), "restarting" (not good) or "exited" (with a non-0 code -- bad). - -Also, the way podman implements healthchecks is relying on systemd and its -transient service and `timers`_. Basically, for each container, a new systemd -unit is created and injected, as well as a new timer - meaning systemd calls -podman. This isn't really good for the hosts, especially the ones having -heavy load due to their usage. - -Proposed Change -=============== - -Overview --------- - -A deep cleaning of the current healthcheck is needed, such as the -`healthcheck_socket`_, `healthcheck_port`_, and `healthcheck_curl`_ -that aren't calling an actual API healthcheck endpoint. This list isn't -exhaustive. - -This will drastically reduce the amount of "podman" calls, leading -to less resource issues, and provide a better comprehension when we list -the processes or services. - -In case an Operator wants to get some status information, they can leverage -an existing validation:: - - openstack tripleo validator run --validation service-status - -This validation can be launched from the Undercloud directly, and will gather -remote status for every OC nodes, then provide a clear summary. - -Such a validation could also be launched from a third-party monitoring -instance, provided it has the needed info (mostly the inventory). - -Alternatives ------------- - -There are multiple alternatives we can even implement as a step-by-step -solution, though any of them would more than probably require their own -specifications and discussions: - -Replace the listed healthchecks by actual service healthchecks -.............................................................. - -Doing so would allow to get a better understanding of the stack health, but -will not solve the issue with podman calls (hence resource eating and related -things). -Such healchecks can be launched from an external tool, for instance based -on a host's cron, or an actual service. - -Call the healthchecks from an external tool -........................................... - -Doing so would prevent the potential resource issues with the "podman exec" -calls we're currently seeing, while allowing a centralization for the results, -providing a better way to get metrics and stats. - -Keep things as-is -................. - -Because we have to list this one, but there are hints this isn't the right -thing to do (hence the current spec). - -Security Impact ---------------- - -No real Security impact. Less services/calls might lead to smaller attack -surface, and it might prevent some *denial of service* situations. - -Upgrade Impact --------------- - -No Upgrade impact. - -Other End User Impact ---------------------- - -The End User doesn't have access to the healthcheck anyway - that's more for -the operator. - -Performance Impact ------------------- - -The systems will be less stressed, and this can improve the current situation -regarding performances and stability. - -Other Deployer Impact ---------------------- - -There is no "deployer impact" if we don't consider they are the operator. - -For the latter, there's a direct impact: ``podman ps`` won't be able to show -the health status anymore or, at least, not for the containers without such -checks. - -But the operator is able to leverage the service-status validation instead - -this validation will even provide more information since it takes into account -the failed containers, a thing ``podman ps`` doesn't show without the proper -option, and even with it, it's not that easy to filter. - -Developer Impact ----------------- - -In order to improve the healthchecks, especially for the API endpoints, service -developers will need to implement specific tests in the app. - -Once it's existing, working and reliable, they can push it to any healthcheck -tooling at disposition - being the embedded container healthcheck, or some -dedicated service as described in the third step. - - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - cjeanner - -Work Items ----------- - -#. Triage existing healthcheck, and if they aren't calling actual endpoint, - deactive them in tripleo-heat-templates -#. Ensure the stack stability isn't degraded by this change, and properly - document the "service-status" validation with the Validation Framework Team - -The second work item is more an empirical data on the long term - we currently -don't have actual data, appart a `Launchpad issue`_ pointing to a problem -maybe caused by the way healthchecks are launched. - -Possible future work items -.......................... - -#. Initiate a discussion with CloudOps (metrics team) regarding an dedicated - healthcheck service, and how to integrate it properly within TripleO -#. Initiate a cross-Team work toward actual healthcheck endpoints for the - services in need - -Those are just here for the sake of evolution. Proper specs will be needed -in order to frame the work. - -Dependencies -============ - -For step 1 and 2, no real dependencies are needed. - -Testing -======= - -Testing will require different things: - -* Proper metrics in order to ensure there's no negative impact - and that any - impact is measurable -* Proper insurance the removal of the healthcheck doesn't affect the services - in a negative way -* Proper testing of the validations, especially "service-status" in order to - ensure it's reliable enough to be considered as a replacement at some point - -Documentation Impact -==================== - -A documentation update will be needed regarding the overall healthcheck topic. - -References -========== - -* `Podman Healthcheck implementation and usage`_ - - -.. _container healthcheck: https://opendev.org/openstack/tripleo-common/src/branch/master/healthcheck -.. _healthcheck_port: https://opendev.org/openstack/tripleo-common/src/commit/a072a7f07ea75933a2372b1a95ae960095a3250e/healthcheck/common.sh#L49 -.. _healthcheck_listen: https://opendev.org/openstack/tripleo-common/src/commit/a072a7f07ea75933a2372b1a95ae960095a3250e/healthcheck/common.sh#L85 -.. _healthcheck_socket: https://opendev.org/openstack/tripleo-common/src/commit/a072a7f07ea75933a2372b1a95ae960095a3250e/healthcheck/common.sh#L95 -.. _healthcheck_curl: https://opendev.org/openstack/tripleo-common/src/commit/a072a7f07ea75933a2372b1a95ae960095a3250e/healthcheck/common.sh#L28 -.. _session at the Xena PTG: https://etherpad.opendev.org/p/tripleo-xena-drop-healthchecks -.. _timers: https://www.freedesktop.org/software/systemd/man/systemd.timer.html -.. _Podman Healthcheck implementation and usage: https://developers.redhat.com/blog/2019/04/18/monitoring-container-vitality-and-availability-with-podman/ -.. _Launchpad issue: https://bugs.launchpad.net/tripleo/+bug/1923607 diff --git a/specs/xena/keystoneless-undercloud.rst b/specs/xena/keystoneless-undercloud.rst deleted file mode 100644 index 0fba8852..00000000 --- a/specs/xena/keystoneless-undercloud.rst +++ /dev/null @@ -1,196 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================================================== -Support Keystoneless Undercloud (basic auth or noauth) -====================================================== - -The goal of this proposal is to introduce the community to the idea of -removing Keystone from TripleO undercloud and run the remaining OpenStack -services either with basic authentication or noauth (i.e. Standalone mode). - - -Problem Description -=================== - -With the goal of having a thin undercloud we've been simplifying the -undercloud architecture since a few cycles and have removed a number -of OpenStack services. After moving to use `network_data_v2`_ and -`ephemeral_heat`_ by default, we are left only with neutron, ironic -and ironic-inspector services. - -Keystone authentication and authorization does not add lot of value to the -undercloud. We use `admin` and `admin` project for everything. There are -also few service users (one per service) for communication between services. -Most of the overcloud deployment and configuration is done as the os user. -Also, for large deployments we increase token expiration time to a large -value which is orthogonal to keystone security. - - -Proposed Change -=============== - -Overview --------- - -At present, we have keystone running in the undercloud providing catalog, -authentication/authorization services to the remaining deployed services -neutron, ironic and ironic-inspector. Ephemeral heat uses a fake keystone -client which does not talk to keystone. - -All these remaining services are capabale of running standalone using either -`http_basic` or `noauth` auth_strategy and clients using openstacksdk and -keystoneauth can use `HTTPBasicAuth` or `NoAuth` identity plugins with the -standalone services. - -The proposal is to deploy these OpenStack services either with basic auth or -noauth and remove keystone from the undercloud by default. - -- Deploy ironic/ironic-inspector/neutron with `http_basic` (default) or `noauth` - -This would also allow us to remove some additional services like `memcached` -from the undercloud mainly used for authtoken caching. - - -Alternatives ------------- - -- Keep keystone in the undercloud as before. - - -Security Impact ---------------- - -There should not be any significant security implications by disabling keystone -on the undercloud as there are no multi-tenancy and RABC requirements for -undercloud users/operators. Deploying baremetal and networking services with `http_basic` authentication would protect against any possible intrusion as before. - - -Upgrade Impact --------------- - -There will be no upgrade impact; this change will be transparent to the -end-user. - - -Other End User Impact ---------------------- - -None. - - -Performance Impact ------------------- - -Disabling authentication and authorization would make the API calls faster and -the overall resource requirements of undercloud would reduce. - - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -None. - - -Implementation -============== - -- Add THT support for configuring `auth_strategy` for ironic and neutron - services and manage htpasswd files used for basic authentication by the - ironic services. - -.. code-block:: yaml - - IronicAuthStrategy: http_basic - NeutronAuthStrategy: http_basic - -- Normally, Identity service middleware provides a X-Project-Id header based on - the authentication token submitted by the service client. However when keystone - is not available neutron expects `project_id` in the `POST` requests (i.e create - API). Also, metalsmith communicates with `neutron` to create `ctlplane` ports for - instances. - - Add a middleware for neutron API `http_basic` pipeline to inject a fake project_id - in the context. - -- Add basic authentication middleware to oslo.middleware and use it for undercloud - neutron. - -- Create/Update clouds.yaml to use `auth_type: http_basic` and use endpoint overrides - for the public endpoints with `_endpoint_override` entries. We - would leverage the `EndpointMap` and change `extraconfig/post_deploy` to create - and update clouds.yaml. - -.. code-block:: yaml - - clouds: - undercloud: - auth: - password: piJsuvz3lKUtCInsiaQd4GZ1w - username: admin - auth_type: http_basic - baremetal_api_version: '1' - baremetal_endpoint_override: https://192.168.24.2:13385 - baremetal_introspection_endpoint_override: https://192.168.24.2:13050 - network_api_version: '2' - network_endpoint_override: https://192.168.24.2:13696 - -Assignee(s) ------------ - -Primary assignee: - ramishra - -Other contributors: - - -Work Items ----------- - -- Add basic authentication middleware in oslo.middleware - https://review.opendev.org/c/openstack/oslo.middleware/+/802234 -- Support `auth_strategy` with ironic and neutron services - https://review.opendev.org/c/openstack/tripleo-heat-templates/+/798241 -- Neutron middleware to add fake project_id to noauth pipleline - https://review.opendev.org/c/openstack/neutron/+/799162 -- Configure neutron paste deploy for basic authentication - https://review.opendev.org/c/openstack/tripleo-heat-templates/+/804598 -- Disable keystone by default - https://review.opendev.org/c/openstack/tripleo-heat-templates/+/794912 -- Add option to enable keystone if required - https://review.opendev.org/c/openstack/python-tripleoclient/+/799409 -- Other patches: - https://review.opendev.org/c/openstack/tripleo-ansible/+/796991 - https://review.opendev.org/c/openstack/tripleo-common/+/796825 - https://review.opendev.org/c/openstack/tripleo-ansible/+/797381 - https://review.opendev.org/c/openstack/tripleo-heat-templates/+/799408 - - -Dependencies -============ - -Ephemeral heat and network-data-v2 are used as defaults. - - -Documentation Impact -==================== - -Update the undercloud installation and upgrade guides. - - -References -========== - -* `network_data_v2`_ specification -* `ephemeral_heat`_ specification - -.. _network_data_v2: https://specs.openstack.org/openstack/tripleo-specs/specs/wallaby/triplo-network-data-v2-node-ports.html -.. _ephemeral_heat: https://specs.openstack.org/openstack/tripleo-specs/specs/wallaby/ephemeral-heat-overcloud.html diff --git a/specs/xena/tripleo-independent-release.rst b/specs/xena/tripleo-independent-release.rst deleted file mode 100644 index a9d116bb..00000000 --- a/specs/xena/tripleo-independent-release.rst +++ /dev/null @@ -1,191 +0,0 @@ - -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -================================================= -Moving TripleO repos to independent release model -================================================= - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/tripleo - -This spec proposes that we move all tripleo repos to the independent release -model. The proposal was first raised during tripleo irc meetings [1]_ and then -also on the openstack-discuss mailing list [2]_. - -Problem Description -=================== - -The TripleO repos [3]_ mostly follow the cycle-with-intermediary release -model, for example tripleo-heat-templates at [4]_. Mostly because some of -tripleo repos use the independent release model, for example tripleo-upgrade -at [5]_. A description of the different release models can be found at [6]_. - -By following the cycle-with-intermediary release model, TripleO is bound to -produce a release for each OpenStack development cycle and a corresponding -stable/branch in the tripleo repos. However as we have seen this causes an -ongoing maintenance burden; consider that currently TripleO supports 5 -active branches - Train, Ussuri, Victoria, Wallaby and Xena (current master). -In fact until very recently that list contained 7 branches, including Stein -and Queens (currently moving to End Of Life [7]_). - -This creates an ongoing maintenance and resource burden where for each -branch we are backporting changes, implementing, running and maintaining -upstream CI and ensuring compatibility with the rest of OpenStack with 3rd -party CI and the component and integration promotion pipelines [8]_, on an -ongoing bases. - -Finally, changes in the underlying OS between branches means that for some -branches we maintain two "types" of CI job; for stable/train we have to support -both Centos 7 and Centos 8. With the coming stable/xena, we would likely have -to support Centos-Stream-8 as well as Centos-Stream-9 in the event that -Stream-9 is not fully available by the xena release, which further compounds -the resource burden. By adopting the proposal laid out here we can choose to -skip the Xena branch thus avoiding this increased CI and maintenance cost. - -Proposed Change -=============== - -Overview --------- - -The proposal is for all TripleO repos that are currently using the -cycle-with-intermediary release model to switch to independent. This will -allow us to choose to skip a particular release and more importantly skip -the creation of the given stable/branch on those repos. - -This would allow the TripleO community to focus our resources on those branches -that are most 'important' to us, namely the 'FFU branches'. That is, the -branches that are part of the TripleO Fast Forward Upgrade chain (currently -these are Train -> Wallaby -> Z?). For example it is highly likely that we -would not create a Xena branch. - -Developers will be freed from having to backport changes across stable/branches -and this will have a dramatic effect on our upstream CI resource consumption -and maintenance burden. - -Alternatives ------------- - -We can continue to create all the stable/branches and use the same release -model we currently have. This would mean we would continue to have an increased -maintenance burden and would have to address that with increased resources. - -Security Impact ---------------- - -None - -Upgrade Impact --------------- - -For upgrades it would mean that TripleO would no longer directly support all -OpenStack stable branches. So if we decide not to create stable/xena for example -then you cannot upgrade from wallaby to xena using TripleO. In some respects -this would more closely match reality since the focus of the active tripleo -developer community has typically been on ensuring the Fast Forward Upgrade -(e.g. train to wallaby) and less so on ensuring the point to point upgrade -between 2 branches. - -Other End User Impact ---------------------- - -TripleO would no longer be able to deploy all versions of OpenStack. One idea -that was brough forth in the discussions around this topic thus far, is that -we can attempt to address this by designating a range of git tags as compatible -with a particular OpenStack stable branch. - -For example if TripleO doesn't create a stable/xena, but during the xena cycle -makes releases for the various Tripleo repos then *those* releases will be -compatible for deploying OpenStack stable/xena. We can maintain and publicise -a set of compatible tags for each of the affected repos (e.g., -tripleo-heat-templates versions 15.0.0 to 15.999.999 are compatible with -OpenStack stable/xena). - -Some rules around tagging will help us. Generally we can keep doing what we -currently do with respect to tagging; For major.minor.patch (e.g. 15.1.1) in -the release tag, we will always bump major to signal a new stable branch. - -One problem with this solution is that there is no place to backport fixes to. -For example if you are using tripleo-heat-templates 15.99.99 to deploy -OpenStack Xena (and there is no stable/xena for tht) then you'd have to apply -any fixes to the top of the 15.99.99 tag and use it. There would be no way -to commit these fixes into the code repo. - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -There were concerns raised in the openstack-discuss thread [2] about RDO -packaging and how it would be affected by this proposal. As was discussed -there are no plans for RDO to stop building packages for any branch. For the -building of tripleo repos we would have to rely on the latest compatible -git tag, as outlined above in `Other End User Impact`_. - -Developer Impact ----------------- - -Will have less stable/branches to backport fixes to. It is important to note -however that by skipping some branches, resulting backports across multiple -branches will result in a larger code diff and so be harder for developers to -implement. That is, there will be increased complexity in resulting backports if -we skip intermediate branches. - -As noted in the `Other End User Impact`_ section above, for those branches that -tripleo decides not to create, there will be no place for developers to commit -any branch specific fixes to. They can consume particular tagged releases of -TripleO repos that are compatible with the given branch, but will not be able -to commit those changes to the upstream repo since the branch will not exist. - -Implementation -============== - -Assignee(s) ------------ - -Wesley Hayutin -Marios Andreou - -Work Items ----------- - -Besides posting the review against the releases repo [9]_ we will need to -update documentation to reflect and inform about this change. - -Dependencies -============ - -None - -Testing -======= - -None - -Documentation Impact -==================== - -Yes we will at least need to add some section to the docs to explain this. -We may also add some landing page to show the currently 'active' or supported -TripleO branches. - -References -========== - -.. [1] `Tripleo IRC meeting logs 25 May 2021 `_ -.. [2] `openstack-discuss thread '[tripleo] Changing TripleO's release model' `_ -.. [3] `TripleO section in governance projects.yaml `_ -.. [4] `tripleo-heat-templates wallaby release file `_ -.. [5] `tripleo-upgrade independent release file `_ -.. [6] `OpenStack project release models `_ -.. [7] `openstack-discuss [TripleO] moving stable/stein and stable/queens to End of Life `_ -.. [8] `TripleO Docs - TripleO CI Promotions `_ -.. [9] `opendev.org openstack/releases git repo `_ diff --git a/specs/xena/tripleo-repos-single-source.rst b/specs/xena/tripleo-repos-single-source.rst deleted file mode 100644 index f67c781e..00000000 --- a/specs/xena/tripleo-repos-single-source.rst +++ /dev/null @@ -1,339 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================== -TripleO Repos Single Source -=========================== - -This proposal lays out the plan to use tripleo-repos as a single source -to install and configure non-base OS repos for TripleO - including -setting the required DLRN hashes. - -https://blueprints.launchpad.net/tripleo/+spec/tripleo-repos-single-source - -Problem Description -=================== - -Reviewing the code base, there are multiple places where repos are -specified. For example,in the release files we are setting the -configuration that is applied by `repo setup role`_. -Some of the other repo/version configurations are included in: - -* `tripleo repos`_ -* `repo setup role`_ -* `release config files`_ -* `container tooling (base tcib file)`_ -* `tripleo-ansible`_ -* `rdo config`_ (example) -* `tripleo-heat-templates`_ -* `tripleo-operator-ansible`_ - -.. _`tripleo repos`: https://opendev.org/openstack/tripleo-repos -.. _`repo setup role`: https://opendev.org/openstack/tripleo-quickstart/src/commit/d14d81204036a02562c3f4efd7acb3b38cb6ae95/roles/repo-setup/templates/repo_setup.sh.j2#L72 -.. _`release config files`: https://opendev.org/openstack/tripleo-quickstart/src/commit/d14d81204036a02562c3f4efd7acb3b38cb6ae95/config/release/tripleo-ci/CentOS-8/master.yml#L93 -.. _`container tooling (base tcib file)`: https://opendev.org/openstack/tripleo-common/src/commit/d3286377132ee6b0689a39e52858c07954711d13/container-images/tcib/base/base.yaml#L59 -.. _`tripleo-ansible`: https://opendev.org/openstack/tripleo-ansible/src/commit/509e630baa92673e72e641635d5742da01b4dc3b/tripleo_ansible/roles/tripleo_podman/vars/redhat-8.2.yml -.. _`rdo config`: https://review.rdoproject.org/r/31439 -.. _`tripleo-heat-templates`: https://opendev.org/openstack/tripleo-heat-templates/src/commit/125f45820255efe370af1024080bafc695892faa/environments/lifecycle/undercloud-upgrade-prepare.yaml -.. _`tripleo-operator-ansible`: https://opendev.org/openstack/tripleo-operator-ansible/src/commit/14a601a47be217386df83512fae3a2e5aa5444a3/roles/tripleo_container_image_build/molecule/default/converge.yml#L172 - - -The process of setting repo versions requires getting and -transforming DLRN hashes, for example resolving 'current-tripleo' -to a particular DLRN build ID and specifying the correct proxies. -Currently a large portion of this work is done in the release files -resulting in sections of complicated and fragile Bash scripts - -duplicated across numerous release files. - -This duplication, coupled with the various locations in use -for setting repo configurations, modules and supported versions -is confusing and error prone. - -There should be one source of truth for which repos are installed -within a tripleo deployment and how they are installed. -Single-sourcing all these functions will avoid the current -problems of duplication, over-writing settings and version confusion. - -Proposed Change -=============== - -Overview --------- - -This proposal puts forward using tripleo-repos as the 'source of truth' -for setting repo configurations, modules and supported versions - -including setting the DLRN hashes required to specify exact repo -versions to install for upstream development/CI workflows. - -Having a single source of truth for repo config, modules, etc. will make -development and testing more consistent, reliable and easier to debug. - -The intent is to use the existing tripleo-repos repo for this work and -not to create a new repo. It is as yet to be determined if we will add -a v2/versioned api or how we will handle the integration with the -existing functionality there. - -We aim to modularize the design and implementation of the proposed tripleo-repos -work. Two sub systems in particular have been identified that can be -implemented independently of, and ultimately to be consumed by, tripleo-repos; -the resolution of delorean build hashes from known tags (i.e. resolving -'current-tripleo' to a particular DLRN build ID) and the configuration of dnf -repos and modules will be implemented as independent python modules, with -their own unit tests, clis, ansible modules etc. - -Integration Points ------------------- - -The new work in tripleo-repos will have to support with all -the cases currently in use and will have to integrate with: - -* DLRN Repos -* release files -* container and overcloud image builds -* rdo config -* yum/dnf repos and modules -* Ansible (Ansible module) -* promotion pipeline - ensuring the correct DLRN hashes - -Incorporating the DLRN hash functionality makes the tool -more complex. Unit tests will be required to guard -against frequent breakages. This is one of the reasons that we decided to split -this DLRN hash resolution into its own dedicated python module -'tripleo-get-hash' for which we can have independent unit tests. - -The scope of the new tripleo-repos tool will be limited to upstream -development/CI workflows. - -Alternatives ------------- - -Functionality to set repos, modules and versions is already available today. -It would be possible to leave the status quo or: - -* Use rdo config to set one version per release - however, this would not - address the issue of changing DLRN hashes -* Create an rpm that lays down /etc/tripleo-release where container-tools could - be meta data in with that, similar to /etc/os-release - -Security Impact ---------------- - -No security impact is anticipated. The work is currently in the tripleo -open-source repos and will remain there - just in a consolidated -place and format. - -Upgrade Impact --------------- - -Currently there will be no upgrade impact. The new CLI will support -all release versions under support and in use. At a later date, -when the old CLI is deprecated there may be some update -implications. - -However,there may be work to make the emit_releases_file -https://opendev.org/openstack/tripleo-ci/src/branch/master/scripts/emit_releases_file/emit_releases_file.py -functionality compatible with the new CLI. - -Other End User Impact ---------------------- - -Work done on the new project branch will offer a different version of CLI, v2. -End users would be able to select which version of the CLI to use - until -the old CLI is deprecated. - - -Performance Impact ------------------- - -No performance impact is expected. Possible performance improvements could -result from ensuring that proxy handling (release file, mirrors, rdoproject) -is done correctly and consistently. - -Other Deployer Impact ---------------------- - - -Developer Impact ----------------- - -See ```Other End User Impact``` section. - -Implementation -============== - -The functionality added to tripleo-repos will be writen as a Python module -with a CLI and will be able to perform the following primary functions: - -* Single source the installation of all TripleO related repos -* Include the functionality current available in the repo-setup role - including creating repos from templates and files -* Perform proxy handling such as is done in the release files - (mirrors, using rdoproject for DLRN repos) -* Get and transform human-readable DLRN hashes - to be implemented as an - independent module. -* Support setting yum modules such as container-tools - to be implemented - as an independent module. -* Support enabling and disabling repos and setting their priorities - -The repo-setup role shall remain but it will invoke tripleo-repos. -All options required to be passed to tripleo-repos should be in the -release file. - -Work done on the new project branch will offer a different version of CLI, v2. -Unit tests will be added on this branch to test the new CLI directly. -CI would be flipped to run in the new branch when approved by TripleO teams. -All current unit tests should pass with the new code. - -An Ansible module will be added to call the tripleo-repos -options from Ansible directly without requiring the end -user to invoke the Python CLI from within Ansible. - -The aim is for tripleo-repos to be the single source for all repo related -configuration. In particular the goal is to serve the following 3 personas: - -* Upstream/OpenStack CI jobs -* Downstream/OSP/RHEL jobs -* Customer installations - -The configuration required to serve each of these use cases is slightly -different. In upstream CI jobs we need to configure the latest current-tripleo -promoted content repos. In downstream/OSP jobs we need to use rhos-release -and in customer installations we need to use subscription manager. - -Because of these differing requirements we are leaning towards storing the -configuration for each in their intended locations, with the upstream config -being the 'base' and the downstream config building ontop of that (the -implication is that some form of inheritance will be used to avoid duplication). -This was discussed during the `Xena PTG session`_ - -.. _`Xena PTG session`: https://etherpad.opendev.org/p/ci-tripleo-repos - -Assignee(s) ------------ - -* sshnaidm (DF and CI) -* marios (CI and W-release PTL) -* weshay -* chandankumar -* ysandeep -* arxcruz -* rlandy -* other DF members (cloudnull) - -Work Items ----------- - -Proposed Schedule -================= - -Investigative work will be begin in the W-release cycle on a project branch -in tripleo-repos. The spec will be put forward for approval in the X-release -cycle and impactful and integration work will be visible once the spec -is approved. - -Dependencies -============ - -This work has a dependency on the `DLRN API`_ and on yum/dnf. - -.. _`DLRN API`: https://dlrn.readthedocs.io/en/latest/api.html - -Testing -======= - -Specific unit tests will be added with the python-based code built. -All current CI tests will run through this work and will -test it on all releases and in various aspects such as: - -* container build -* overcloud image build -* TripleO deployments (standalone, multinode, scenarios, OVB) -* updates and upgrades - -CLI Design -========== - -Here is an abstract sketch of the intended cli design for the -new tripleo-repos. - -It covers most of the needs discussed at multiple places. - -Scenario 1 ----------- - -The goal is to construct a repo with the correct hash for an integration -or a component pipeline. - -For this scenario: - -* Any combination of `hash, distro, commit, release, promotion, url` parameters can passed -* Use the `tripleo-get-hash`_ module to determine the DLRN build ID -* Use the calculated DLRN build ID to create and add a repo - -.. _`tripleo-get-hash`: https://opendev.org/openstack/tripleo-repos/src/branch/master/tripleo-get-hash - - -Scenario 2 ----------- - -The goal is to construct any type of yum/dnf repo. - -For this scenario: - -* Construct and add a yum/dnf repo using a combination of the following parameters -* filename - filename for saving the resulting repo (mandatory) -* reponame - name of repository (mandatory) -* baseurl - base URL of the repository (mandatory) -* down_url - URL to download repo file from (mandatory/multually exclusive to baseurl) -* priority - priority of resulting repo (optional) -* enabled - 0/1 whether the repo is enabled or not (default: 1 - enabled) -* gpgcheck - whether to check GPG keys for repo (default: 0 - don't check) -* module_hotfixes - whether to make all RPMs from the repository available (default: 0) -* sslverify - whether to use a cert to use repo metadata (default: 1) -* type - type of the repo(default: generic, others: custom and file) - - -Scenario 3 ----------- - -The goal is to enable or disable specific dnf module and also install or -remove a specific package. - -For this scenario: - -* Specify -* module name -* which version to disable -* which version to enable -* which specific package from the module to install (optional) - - -Scenario 4 ----------- - -The goal is to enable or disable some repos, -remove any associated repo files no longer needed, -and then perform a system update. - -For this scenario: - -* Specify -* repo names to be disabled -* repo names to be enabled -* the files to be removed -* whether to perform the system update - - -Documentation Impact -==================== - -tripleo-docs will be updated to point to the new supported -repo/modules/versions setting workflow in tripleo-repos. - -References to old sources of settings such as tripleo-ansible, -release files in tripleo-quickstart and the repo-setup role -will have to be removed and replaced to point to the new -workflow. diff --git a/specs/xena/whole-disk-default.rst b/specs/xena/whole-disk-default.rst deleted file mode 100644 index 7b7f67fd..00000000 --- a/specs/xena/whole-disk-default.rst +++ /dev/null @@ -1,307 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=================================== -Deploy whole disk images by default -=================================== - -https://blueprints.launchpad.net/tripleo/+spec/whole-disk-default - -This blueprint tracks the tasks required to switch to whole-disk overcloud -images by default instead of the current overcloud-full partition image. - -Whole disk images vs partition images -===================================== - -The current overcloud-full partition image consists of the following: - -* A compressed qcow2 image file which contains a single root partition with - all the image contents - -* A kernel image file for the kernel to boot - -* A ramdisk file to boot with the kernel - -Whereas the overcloud-hardened-uefi-full whole-disk image consists of a single -compressed qcow2 image containing the following: - -* A partition layout containing UEFI boot, legacy boot, and a root partition - -* The root partition contains a single lvm group with a number of logical - volumes of different sizes which are mounted at /, /tmp, /var, /var/log, etc. - -When a partition image is deployed, ironic-python-agent does the following on -the baremetal disk being deployed to: - -* Creates the boot and root partitions on the disk - -* Copies the partition image contents to the root partition - -* Populates the empty boot partition with everything required to boot, including - the kernel image, ramdisk file, a generated grub config, and an installed - grub binary - -When a whole-disk image is deployed, ironic-python-agent simply copies the whole -image to the disk. - -When the partition image deploy boots for the first time, the root partition -grows to take up all of the available disk space. This mechanism is provided -by the base cloud image. There is no equivalent partition growing mechanism -for a multi-volume LVM whole-disk image. - -Problem Description -=================== - -The capability to build and deploy a whole-disk overcloud image has been -available for many releases, but it is time to switch to this as the default. -Doing this will avoid the following issues and bring the following benefits: - -* As of CentOS-8.4, grub will stop support for installing the bootloader on a - UEFI system. ironic-python-agent depends on grub installs to set up EFI boot - with partition images, so UEFI boot will stop working when CentOS 8.4 is - used. - -* Other than this new grub behaviour, keeping partition boot working in - ironic-python-agent has been a development burden and involves code - complexity which is avoided for whole-disk deployments. - -* TripleO users are increasingly wanting to deploy with UEFI Secure Boot - enabled, this is only possible with whole-disk images that use the signed - shim bootloader. - -* Partition images need to be distributed with kernel and ramdisk files, adding - complexity to file management of deployed images compared to a single - whole-disk image file. - -* The `requirements for a hardened image`_ includes having separate volumes for - root, data etc. All TripleO users get the security benefit of hardened images - when a whole-disk image is used. - -* We currently need dedicated CI jobs both in the upstream check/gate (when the - relevant files changed) but also in periodic integration lines, to build and - publish the latest 'current-tripleo' version of the hardened images. In the long - term, only a single hardend UEFI whole-disk image needs to be built and - published, reducing the CI footprint. (in the short term, CI footprint may go up - so the whole-disk image can be published, and while hardened vs hardened-uefi - jobs are refactored. - -Proposed Change -=============== - -Overview --------- - -Wherever the partition image overcloud-full.qcow2 is built, published, or used -needs to be updated to use overcloud-hardened-uefi-full.qcow2 by default. - -This blueprint will be considered complete when it is possible to follow the -default path in the documentation and the result is an overcloud deployed -with whole-disk images. - -Image upload tool -+++++++++++++++++ - -The default behaviour of ``openstack overcloud image upload`` needs to be -aware that overcloud-hardened-uefi-full.qcow2 should be uploaded by default -when it is detected in the local directory. - -Reviewing image build YAML -++++++++++++++++++++++++++ - -Once the periodic jobs are updated, image YAML defining -overcloud-hardened-full can be deleted, leaving only -overcloud-hardened-uefi-full. Other refactoring can be done such as renaming --python3.yaml back to -base.yaml. - -Reviewing partition layout -++++++++++++++++++++++++++ - -Swift data is stored in ``/srv`` and according to the criteria of hardened -images this should be in its own partition. This will need to be added to the -existing partition layout for whole-disk UEFI images. - -Partition growing -+++++++++++++++++ - -On node first boot, a replacement mechanism for growing the root partition is -required. This is a harder problem for the multiple LVM volumes which the -whole-disk image creates. Generally the ``/var`` volume should grow to take -available disk space because this is where TripleO and OpenStack services store -their state, but sometimes ``/srv`` will need to grow for Swift storage, and -sometimes there may need to be a proportional split of multiple volumes. This -suggests that there will be new tripleo-heat-templates variables which will -specify the volume/proportion growth behaviour on a per-role basis. - -A new utility is required which automates this LVM volume growing -requirement. It could be implemented a number of ways: - -1. A new project/package containing the utility, installed on the image and - run by first-boot or early tripleo-ansible. - -2. A utility script installed by a diskimage-builder/tripleo-image-elements - element and run by first-boot or as a first-boot ansible task (post-provisioning - or early deploy). - -3. Implement entirely in an ansible role, either in its own repository, or as - part of tripleo-ansible. It would be run by early tripleo-ansible. - -This utility will also be useful to other cloud workloads which use LVM based -images, so some consideration is needed for making it a general purpose tool -which can be used outside an overcloud image. Because of this, option 2. is -proposed initially as the preferred way to install this utility, and it will -be proposed as a new element in diskimage-builder. Being coupled with -diskimage-builder means the utility can make assumptions about the partition -layout: - -* a single Volume Group that defaults to name ``vg`` - -* volume partitions are formatted with XFS, which can be resized while mounted - -Alternatives ------------- - -Because of the grub situation, the only real alternative is dropping support -for UEFI boot, which means only supporting legacy BIOS boot indefinitely. -This would likely have negative feedback from end-users. - -Security Impact ---------------- - -* All deployments will use images that comply with the hardened-image - requirements, so deployments will gain these security benefits - -* Whole disk images are UEFI Secure Boot enabled, so this blueprint brings us - closer to recommending that Secure Boot be switched on always. This will - validate to users that they have deployed boot/kernel binaries signed by Red - Hat. - -Upgrade Impact --------------- - -Nodes upgraded in-place will continue to be partition image based, and -new/replaced nodes will be deployed with whole-disk images. This doesn't have -a specific upgrade implication, unless we document an option for replacing -every node in order to ensure all nodes are deployed with whole-disk images. - -Other End User Impact ---------------------- - -There is little end-user impact other than: - -* The change of habit required to use overcloud-hardened-uefi-full.qcow2 - instead of overcloud-full.qcow2 - -* The need to set the heat variable if custom partition growing behaviour is - required - -Performance Impact ------------------- - -There is no known performance impact with this change. - -Other Deployer Impact ---------------------- - -All deployer impacts have already been mentioned elsewhere. - -Developer Impact ----------------- - -There are no developer impacts beyond the already mentioned deployer impacts. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - Steve Baker - -Work Items ----------- - -* python-tripleoclient: image upload command, handle - overcloud-hardened-uefi-full.qcow2 as the default if it exists locally - -* tripleo-ansible/cli-overcloud-node-provision.yaml: detect - overcloud-hardened-uefi-full.(qcow2|raw) as the default if it exists in - /var/lib/ironic/images - -* RDO jobs: - * add periodic job for overcloud-hardened-uefi-full - * remove periodic job for overcloud-hardened-full - * modify image publishing jobs to publish overcloud-hardened-uefi-full.qcow2 - -* tripleo-image-elements/overcloud-partition-uefi: add ``/srv`` logical volume - for swift data - -* tripleo-quickstart-extras: Use the whole_disk_images=True variable to switch to - downloading/uploading/deploying overcloud-hardened-uefi-full.qcow2 - -* tripleo-ci/featureset001/002: Enable whole_disk_images=True - -* diskimage-builder: Add new element which installs utility for growing LVM - volumes based on specific volume/proportion mappings - -* tripleo-common/image-yaml: - * refactor to remove non-uefi hardened image - * rename -python3.yaml back to -base.yaml - * add the element which installs the grow partition utility - -* tripleo-heat-templates: Define variables for driving partition growth - volume/proportion mappings - -* tripleo-ansible: Consume the volume/proportion mapping and run the volume - growing utility on every node in early boot. - -* tripleo-docs: - * Update the documentation for deploying whole-disk images by default - * Document variables for controlling partition growth - -Dependencies -============ - -Unless diskimage-builder require separate tracking to add the partition -growth utility, all tasks can be tracked under this blueprint. - -Testing -======= - -Image building and publishing ------------------------------ - -Periodic jobs which build images, and jobs which build and publish images to -downloadable locations need to be updated to build and publish -overcloud-hardened-uefi-full.qcow2. Initially this can be in parallel with -the existing overcloud-full.qcow2 publishing, but eventually that can be -switched off. - -overcloud-hardened-full.qcow2 is the same as -overcloud-hardened-uefi-full.qcow2 except that it only supports legacy BIOS -booting. Since overcloud-hardened-uefi-full.qcow2 supports both legacy BIOS -and UEFI boot, the periodic jobs which build overcloud-hardened-full.qcow2 -can be switched off from Wallaby onwards (assuming these changes are backported -as far back as Wallaby). - -CI support ----------- - -CI jobs which consume published images need to be modified so they can -download overcloud-hardened-uefi-full.qcow2 and deploy it as a whole-disk -image. - -Documentation Impact -==================== - -The TripleO Deployment Guide needs to be modified so that -overcloud-hardened-uefi-full.qcow2 is referred to throughout, and so that it -correctly documents deploying a whole-disk image based overcloud. - -References -========== - -.. _requirements for a hardened image: https://teknoarticles.blogspot.com/2017/07/build-and-use-security-hardened-images.html diff --git a/specs/yoga/taskcore-directord.rst b/specs/yoga/taskcore-directord.rst deleted file mode 100644 index 86d914f9..00000000 --- a/specs/yoga/taskcore-directord.rst +++ /dev/null @@ -1,514 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================================================== -Unifying TripleO Orchestration with Task-Core and Directord -=========================================================== - -Include the URL of your launchpad blueprint: -https://blueprints.launchpad.net/tripleo/+spec/unified-orchestration - -The purpose of this spec is to introduce core concepts around Task-Core and -Directord, explain their benefits, and cover why the project should migrate -from using Ansible to using Directord and Task-Core. - -TripleO has long been established as an enterprise deployment solution for -OpenStack. Different task executions have been used at different times. -Originally, os-collect-config was used, then the switch to Ansible was -completed. A new task execution environment will enable moving forward -with a solution designed around the specific needs of TripleO. - -The tools being introduced are Task-Core and Directord. - -Task-Core_: - A dependency management and inventory graph solution which allows operators - to define tasks in simple terms with robust dominion over a given - environment. Declarative dependencies will ensure that if a container/config - is changed, only the necessary services are reloaded/restarted. Task-Core - provides access to the right tools for a given job with provenance, allowing - operators and developers to define outcomes confidently. - -Directord_: - A deployment framework built to manage the data center life cycle, which is - both modular and fast. Directord focuses on consistently maintaining - deployment expectations with a near real-time level of performance_ at almost - any scale. - - -Problem Description -=================== - -Task execution in TripleO is: - -* Slow -* Resource intensive -* Complex -* Defined in a static and sequential order -* Not optimized for scale - -TripleO presently uses Ansible to achieve its task execution orchestration -goals. While the TripleO tooling around Ansible (playbooks, roles, modules, -plugins) has worked and is likely to continue working should maintainers bear -an increased burden, future changes around direction due to `Ansible Execution -Environments`_ provide an inflection point. These upstream changes within -Ansible, where it is fundamentally moving away from the TripleO use case, force -TripleO maintainers to take on more ownership for no additional benefit. The -TripleO use case is actively working against the future direction of Ansible. - -Further, the Ansible lifecycle has never matched that of TripleO. A single -consistent and backwards compatible Ansible version can not be used across a -single version of TripleO without the tripleo-core team committing to maintain -that version of Ansible, or commit to updating the Ansible version in a stable -TripleO release. The cost to maintain a tool such as Ansible that the core team -does not own is high vs switching to custom tools designed specifically for the -TripleO use case. - -The additional cost of maintaining Ansible as the task execution engine for -TripleO, has a high likelihood of causing a significant disruption to the -TripleO project; this is especially true as the project looks to support future -OS versions. - -Presently, there are diminishing benefits that can be realized from any -meaningful performance, scale, or configurability improvments. The -simplification efforts and work around custom Ansible strategies and plugins -have reached a conclusion in terms of returns. - -While other framework changes to expose scaling mechanisms, such as using -``--limit`` or partitioning of the ansible execution across multiple stacks or -roles do help with the scaling problem, they are however in the category of -work arounds as they do not directly address the inherent scaling issues with -task executions. - -Proposed Change -=============== - -To make meaningful task execution orchestration improvements, TripleO must -simplify the framework with new tools, enable developers to build intelligent -tasks, and provide meaningful performance enhancements that scale to meet -operators' expectations. If TripleO can capitalize on this moment, it will -improve the quality of life for day one deployers and day two operations and -upgrades. - -The proposal is to replace all usage of Ansible with Directord for task -execution, and add the usage of Task-Core for dynamic task dependencies. - -In some ways, the move toward Task-Core and Directord creates a -General-Problem_, as it's proposing the replacement of many bespoke tools, which -are well known, with two new homegrown ones. Be that as it may, much attention -has been given to the user experience, addressing many well-known pain points -commonly associated with TripleO environments, including: scale, barrier to -entry, execution times, and the complex step process. - -Overview --------- - -This specification consists of two parts that work together to achieve the -project goals. - -Task-Core: - Task-Core builds upon native OpenStack libraries to create a dependency graph - and executes a compiled solution. With Task-Core, TripleO will be able to - define a deployment with dependencies instead of brute-forcing one. While - powerful, Task-Core keeps development easy and consistent, reducing the time - to deliver and allowing developers to focus on their actual deliverable, not - the orchestration details. Task-Core also guarantees reproducible builds, - runtime awareness, and the ability to resume when issues are encountered. - -* Templates containing step-logic and ad-hoc tasks will be refactored into - Task-Core definitions. - -* Each component can have its own Task-Core purpose, providing resources and - allowing other resources to depend on it. - -* The invocation of Task-Core will be baked into the TripleO client, it will - not have to be invoked as a separate deployment step. - -* Advanced users will be able to use Task-Core to meet their environment - expectations without fully understanding the deployment nuance of multiple - bespoke systems. - -* Employs a validation system around inputs to ensure they are correct before - starting the deployment. While the validation wont ensure an operational - deployment, it will remove some issues caused by incorrect user input, such - as missing dependent services or duplicate services; providing early feedback - to deployers so they're able to make corrections before running longer - operations. - -Directord: - Directord provides a modular execution platform that is aware of managed - nodes. Because Directord leverages messaging, the platform can guarantee - availability, transport, and performance. Directord has been built from the - ground up, making use of industry-standard messaging protocols which ensure - pseudo-real-time performance and limited resource utilization. The built-in - DSL provides most of what the TripleO project will require out of the box. - Because no solution is perfect, Directord utilizes a plugin system that will - allow developers to create new functionality without compromise or needing to - modify core components. Additionally, plugins are handled the same, allowing - Directord to ensure the delivery and execution performance remain consistent. - -* Directord is a single application that is ideally suited for containers while - also providing native hooks into systems; this allows Directord to operate in - heterogeneous environments. Because Directord is a simplified application, - operators can choose how they want to run it and are not forced into a one size - fits all solution. - -* Directord is platform-agnostic, allowing it to run across systems, versions, - and network topologies while simultaneously guaranteeing it maintains the - smallest possible footprint. - -* Directord is built upon messaging, giving it the unique ability to span - network topologies with varying latencies; messaging protocols compensate for - high latency environments and will finally give TripleO the ability to address - multiple data-centers and fully embrace "the edge." - -* Directord client/server communication is secured (TLS, etc) and encrypted. - -* Directord node management to address unreachable or flapping clients. - -With Task-Core and Directord, TripleO will have an intelligent dependency graph -that is both easy to understand and extend. TripleO will now be aware of things -like service dependencies, making it possible to run day two operations quickly -and more efficiently (e.g, update and restart only dependent services). -Finally, TripleO will shrink its maintenance burden by eliminating Ansible. - - -Alternatives ------------- - -Stay the course with Ansible - -Continuing with Ansible for task execution means that the TripleO core team -embraces maintaining Ansible for the specific TripleO use case. Additionally, -the TripleO project begins documenting the scale limitations and the boundaries -that exist due to the nature of task execution. Focus needs to shift to the -required maintenance necessary for functional expectations TripleO. Specific -Ansible versions also need to be maintained beyond their upstream lifecycle. -This maintenance would likely include maintaining an Ansible branch where -security and bug fixes could be backported, with our own project CI to validate -functionality. - -TripleO could also embrace the use of `Ansible Execution Environments`_ through -continued investigative efforts. Although, if TripleO is already maintaining -Ansible, this would not be strictly required. - - -Security Impact ---------------- - -Task-Core and Directord are two new tools and attack surfaces, which will -require a new security assessment to be performed to ensure the tooling -exceeds the standard already set. That said, steps have already been taken to -ensure the new proposed architecture is FIPS_ compatible, and enforces -`transport encryption`_. - -Directord also uses `ssh-python`_ for bootstrapping tasks. - -Ansible will be removed, and will no longer have a security impact within -TripleO. - - -Upgrade Impact --------------- - -The undercloud can be upgraded in place to use Directord and Task-Core. There -will be upgrade tasks that will migrate the undercloud as necessary to use the -new tools. - -The overcloud can also be upgraded in place with the new tools. Upgrade tasks -will be migrated to use the Directord DSL just like deployment tasks. This spec -proposes no changes to the overcloud architecture itself. - -As part of the upgrade task migration, the tasks can be rewritten to take -advantage of the new features exposed by these tools. With the introduction of -Task-Core, upgrade tasks can use well-defined dependencies for dynamic -ordering. Just like deployment, update/upgrade times will be decreased due to -the aniticipated performance increases. - - -Other End User Impact ---------------------- - -When following the `happy path`_, the end-user, deployers, and operators will -not interact with this change as the user interface will effectively remain the -same. However the user experience will change. Operators accustomed to Ansible -tasks, logging, and output, will instead need to become familiar with those -same aspects of Directord and Task-Core. - -If an operator wishes to leverage the advanced capabilities of either -Task-Core or Directord, the tooling will have documented end user interfaces -available for interfaces such as custom components and orchestrations. - -It should be noted that there's a change in deployment architecture in that -Directord follows a server/client model; albeit an ephemeral one. This change -aims to be fully transparent, however, it is something that end users, -deployers, will need to be aware of. - - -Performance Impact ------------------- - -This specification will have a positive impact on performance. Due to the -messaging architecture of Directord, near-realtime task execution will be -possible in parallel across all nodes. - -* Performance_ analysis has been done comparing configurability and runtime of - Directord vs. Ansible, the TripleO default orchestration tool. This analysis - highlights some of the performance gains this specification will provide; - initial testing suggests that Task-Core and Directord is more than 10x - faster than our current tool chain, representing a potential 90% time savings - in just the task execution overhead. - -* One of the goals of this specification is to remove impediments in the time - to work. Deployers should not be spending exorbitant time waiting for tools to - do work; in some cases, waiting longer for a worker to be available than it - would take to perform a task manually. - -* Improvements from being able to execute more efficiently in parallel. The - Ansible strategy work allowed us to run tasks from a given Ansible play in - parallel accoss the nodes. However this was limited to a effectively a single - play per node in terms of execution. The granularity was limited to a play - such that an Ansible play that with 100 items of work for one role and 10 - items of work would be run in parallel on the nodes. The role with 10 items - of work would likely finish first and the overall execution would have to - wait until the entire play was completed everywhere. The long pole for a - play's execution is the node with the most set of tasks. With the transition - to task-core and directord, the overall unit of work is an orchestration - which may have 5 tasks. If we take the same 100 tasks for one role and split - them up into 20 orchestrations that can be run in parallel, and the 10 items - of work into two orchestrations for the other roles. We are able to better - execute the work in parallel when there are no specific ordering - requirements. Improvements are expected around host prep tasks and other - services where we do not have specific ordering requirements. Today these - tasks get put in a random spot within a play and have to wait on other - unrelated tasks to complete before being run. We expect there to be less - execution overhead time per the other items in this section, however the - overall improvements are limited based on how well we can remove unnecessary - ordering requirements. - -* Deployers will no longer be required to run a massive server for medium-scale - deployment. Regardless of size, the memory footprint and compute cores needed - to execute a deployment will be significantly reduced. - - -Other Deployer Impact ---------------------- - -Task-Core and Directord represent an unknown factor; as such, they are -**not** battle-tested and will create uncertainty in an otherwise "stable_" -project. - -Deployers will experience the time savings of doing deployments. Deployers who -implement new services will need to do so with Directord and Task-Core. - -Extensive testing has been done; -all known use-cases, from system-level configuration to container pod -orchestration, have been covered, and automated tests have been created to -ensure nothing breaks unexpectedly. Additionally, for the first time, these -projects have expectations on performance, with tests backing up those claims, -even at a large scale. - -At present, TripleO assumes SSH access between the Undercloud and -Overcloud is always present. Additionally, TripleO believes the infrastructure -is relatively static, making day two operations risky and potentially painful. -Task-Core will reduce the computational burden when crafting action plans, and -Directord will ensure actions are always performed against the functional -hosts. - -Another improvement this specification will enhance is in the area of vendor -integrations. Vendors will be able to provide meaningful task definitions which -leverage an intelligent inventory and dependency system. No longer will TripleO -require vendors have in-depth knowledge of every deployment detail, even those -outside of the scope of their deliverable. By easing the job definitions, -simplifying the development process, and speeding up the execution of tasks are -all positive impacts on deployers. - -Test clouds are still highly recommended sources of information; however, -system requirements on the Undercloud will reduce. By reducing the resources -required to operate the Undercloud, the cost of test environments, in terms of -both hardware and time, will be significantly lowered. With a lower barrier to -entry developers and operators alike will be able to more easily contribute to -the overall project. - - -Developer Impact ----------------- - -To fully realize the benefits of this specification Ansible tasks will need to -be refactored into the Task-Core scheme. While Task-Core can run Ansible and -Directord has a plugin system which easily allows developers to port legacy -modules into Directord plugins, there will be a developer impact as the TripleO -development methodology will change. It's fair to say that the potential -developer impact will be huge, yet, the shift isn't monumental. Much of the -Ansible presently in TripleO is shell-oriented, and as such, it is easily -portable and as stated, compatibility layers exist allowing the TripleO project -to make the required shift gradually. Once the Ansible tasks are -ported, the time saved in execution will be significant. - -Example `Task-Core and Directord implementation for Keystone`_: - While this implementation example is fairly basic, it does result in a - functional Keystone environment and in roughly 5 minutes and includes - services like MySQL, RabbitMQ, Keystone as well as ensuring that the - operating systems is setup and configured for a cloud execution environment. - The most powerful aspect of this example is the inclusion of the graph - dependency system which will allow us easily externalize services. - -* The use of advanced messaging protocols instead of SSH means TripleO can more - efficiently address deployments in local data centers or at the edge - -* The Directord server and storage can be easily offloaded, making it possible - for the TripleO Client to be executed from simple environments without access - to the overcloud network; imagine running a massive deployment from a laptop. - - -Implementation -============== - -In terms of essential TripleO integration, most of the work will occur within -the tripleoclient_, with the following new workflow. - -`Execution Workflow`_:: - - ┌────┐ ┌─────────────┐ ┌────┐ ┌─────────┐ ┌─────────┬──────┐ ??????????? - │USER├──►│TripleOclient├──►│Heat├──►│Task-Core├──►│Directord│Server├──►? Network ? - └────┘ └─────────────┘ └────┘ └─────────┘ └─────────┴──────┘ ??????????? - ▲ ▲ ▲ - │ ┌─────────┬───────┐ | | - └──────────────────────►│Directord│Storage│◄──┘ | - └─────────┴───────┘ | - | - ┌─────────┬──────┐ | - │Directord│Client│◄───────┘ - └─────────┴──────┘ - -* Directord|Server - Task executor connecting to client. - -* Directord|Client - Client program running on remote hosts connecting back to - the Directord|Server. - -* Directord|Storage - An optional component, when not externalized, Directord will - maintain the runtime storage internally. In this configuration Directord is - ephemeral. - -To enable a gradual transition, ansible-runner_ has been implemented within -Task-Core, allowing the TripleO project to convert playbooks into tasks that -rely upon strongly typed dependencies without requiring a complete rewrite. The -initial implementation should be transparent. Once the Task-Core hooks are set -within tripleoclient_ functional groups can then convert their tripleo-ansible_ -roles or ad-hoc Ansible tasks into Directord orchestrations. Teams will have -the flexibility to transition code over time and are incentivized by a -significantly improved user experience and shorter time to delivery. - - -Assignee(s) ------------ - -Primary assignee: - * Cloudnull - Kevin Carter - * Mwhahaha - Alex Schultz - * Slagle - James Slagle - - -Other contributors: - * ??? - - -Work Items ----------- - -#. Migrate Directord and Task-Core to the OpenStack namespace. -#. Package all of Task-Core, Directord, and dependencies for pypi -#. RPM Package all of Task-Core, Directord, and dependencies for RDO -#. Directord container image build integration within TripleO / tcib -#. Converge on a Directord deployment model (container, system, hybrid). -#. Implement the Task-Core code path within TripleO client. -#. Port in template Ansible tasks to Directord orchestrations. -#. Port Ansible roles into Directord orchestrations. -#. Port Ansible modules and actions into pure Python or Directord components -#. Port Ansible workflows in tripleoclient into pure Python or Directord - orchestrations. -#. Migration tooling for Heat templates, Ansible roles/modules/actions. -#. Port Ansible playbook workflows in tripleoclient to pure Python or - Directord orchestrations. -#. Undercloud upgrade tasks to migrate to Directord + Task-Core architecture -#. Overcloud upgrade tasks to migrate to enable Directord client bootstrapping - - -Dependencies -============ - -Both Task-Core and Directord are dependencies, as they're new projects. These -dependencies may or may not be brought into the OpenStack namespace; -regardless, both of these projects, and their associated dependencies, will -need to be packaged and provided for by RDO. - - -Testing -======= - -If successful, the implementation of Task-Core and Directord will leave the -existing testing infrastructure unchanged. TripleO will continue to function as -it currently does through the use of the tripleoclient_. - -New tests will be created to ensure the Task-Core and Directord components -remain functional and provide an SLA around performance and configurability -expectations. - - -Documentation Impact -==================== - -Documentation around Ansible will need to be refactored. - -New documentation will need to be created to describe the advanced -usage of Task-Core and Directord. Much of the client interactions from the -"`happy path`_" will remain unchanged. - - -References -========== - -* Directord official documentation https://directord.com - -* Ansible's decision to pivot to execution environments: - https://ansible-runner.readthedocs.io/en/latest/execution_environments.html - -.. _Task-Core: https://github.com/mwhahaha/task-core - -.. _Directord: https://github.com/cloudnull/directord - -.. _General-Problem: https://xkcd.com/974 - -.. _`legacy tooling`: https://xkcd.com/1822 - -.. _`transport encryption`: https://directord.com/drivers.html - -.. _FIPS: https://en.wikipedia.org/wiki/Federal_Information_Processing_Standards - -.. _Performance: https://directord.com/overview.html#comparative-analysis - -.. _practical: https://xkcd.com/382 - -.. _stable: https://xkcd.com/1343 - -.. _validation: https://xkcd.com/327 - -.. _scheme: https://github.com/mwhahaha/task-core/tree/main/schema - -.. _`Task-Core and Directord implementation for Keystone`: https://raw.githubusercontent.com/mwhahaha/task-core/main/examples/directord/services/openstack-keystone.yaml - -.. _`happy path`: https://xkcd.com/85 - -.. _tripleoclient: https://github.com/openstack/python-tripleoclient - -.. _`Execution Workflow`: https://review.opendev.org/c/openstack/tripleo-heat-templates/+/798747 - -.. _ansible-runner: https://github.com/ansible/ansible-runner - -.. _tripleo-ansible: https://github.com/openstack/tripleo-ansible - -.. _`Ansible Execution Environments`: https://ansible-runner.readthedocs.io/en/latest/execution_environments.html - -.. _`ssh-python`: https://pypi.org/project/ssh-python diff --git a/specs/yoga/tripleo_ceph_ingress.rst b/specs/yoga/tripleo_ceph_ingress.rst deleted file mode 100644 index 725894b9..00000000 --- a/specs/yoga/tripleo_ceph_ingress.rst +++ /dev/null @@ -1,259 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================================== -TripleO Ceph Ingress Daemon Integration -=========================================== - -Starting in the Octopus release, Ceph introduced its own day1 tool called -cephadm and its own day2 tool called orchestrator which replaced ceph-ansible. -During the Wallaby and Xena cycles TripleO moved away from ceph-ansible and -adopted cephadm [1]_ as described in [2]_. -During Xena cycle a new approach of deploying Ceph in a TripleO context has -been established and now a Ceph cluster can be provisioned before the overcloud -is created, leaving to the overcloud deployment phase the final configuration -of the Ceph cluster which depends on the OpenStack enabled services defined by -the tripleo-heat-templates interface. -The next goal is to deploy as many Ceph services as possible using the deployed -ceph interface instead of during overcloud deployment. -As part of this effort, we should pay attention to the high-availability aspect, -how it's implemented in the current release and how it should be changed for -Ceph. -This spec represents a follow up of [3]_, it defines the requirements to rely -on the Ceph provided HA daemons and describes the changes required in TripleO -to meet this goal. - -Problem Description -=================== - -In the following description we are referring to the Ganesha daemon and the -need of the related Ceph Ingress daemon deployment, but the same applies to -all the existing daemons that requires an high-availability configuration -(e.g., RGW and the Ceph dashboard for the next Ceph release). -In TripleO we support deployment of Ganesha both when the Ceph cluster is -itself managed by TripleO and when the Ceph cluster is itself not managed by -TripleO. -When the cluster is managed by TripleO, as per spec [3]_, it is preferable to -have cephadm manage the lifecycle of the NFS container instead of deploying it -with tripleo-ansible, and this is broadly covered and solved by allowing the -tripleo Ceph mkspec module to support the new Ceph daemon [4]_. -The ceph-nfs daemon deployed by cephadm has its own HA mechanism, called -ingress, which is based on haproxy and keepalived [5]_ so we would no longer -use pcmk as the VIP owner. -This means we would run pcmk and keepalived in addition to haproxy (deployed by -tripleo) and another haproxy (deployed by cephadm) on the same server (though -with listeners on different ports). -This approach only relies on Ceph components, and both external and internal -scenarios are covered. -However, adopting the ingress daemon for a TripleO deployed Ceph cluster means -that we need to make the overcloud aware about the new running services: for -this reason the proposed change is meant to introduce a new TripleO resource -that properly handles the interface with the Ceph services and is consistent -with the tripleo-heat-templates roles. - -Proposed Change -=============== - -Overview --------- - -The change proposed by this spec requires the introduction of a new TripleO -Ceph Ingress resource that describes the ingress service that provides load -balancing and HA. -The impact of adding a new `OS::TripleO::Services::CephIngress` resource can -be seen on the following projects. - - -tripleo-common --------------- - -As described in Container Image Preparation [6]_ the undercloud may be used as -a container registry for all the ceph related containers and a new, supported -syntax, has been introduced to `deployed ceph` to download containers from -authenticated registries. -However, as per [7]_, the Ceph ingress daemons won’t be baked into the Ceph -daemon container, hence `tripleo container image prepare` should be executed to -pull the new container images/tags in the undercloud as made for the Ceph -Dashboard and the regular Ceph image. -Once the ingress containers are available, it's possible to deploy the daemon -on top of ceph-nfs or ceph-rgw. -In particular, if this spec is going to be implemented, `deployed ceph` will be -the only way of setting up this daemon through cephadm for ceph-nfs, resulting -in a simplified tripleo-heat-templates interface and a less number of tripleo -ansible tasks execution because part of the configuration is moved before the -overcloud is deployed. -As part of this effort, considering that the Ceph related container images have -grown over the time, a new condition will be added to the tripleo-container jinja -template [8]_ to avoid pulling additional ceph images if Ceph is not deployed by -TripleO [10]_. -This will result in a new optimization for all the Ceph external cluster use cases, -as well as the existing CI jobs without Ceph. - -tripleo-heat-templates ----------------------- -A Heat resource will be created within the cephadm space. The new resource will -be also added to the existing Controller roles and all the relevant environment -files will be updated with the new reference. -In addition, as described in the spec [3]_, pacemaker constraints for ceph-nfs -and the related vip will be removed. -The tripleo-common ceph_spec library is already able to generate the spec for -this kind of daemon and it will trigger cephadm [4]_ to deploy an ingress daemon -provided that the NFS Ceph spec is applied against an existing cluster and the -backend daemon is up and running. -As mentioned before, the ingress daemon can also be deployed on top of an RGW -instance, therefore the proposed change is valid for all the Ceph services that -require an HA configuration. - - -Security Impact ---------------- - -The ingress daemon applied to an existing ceph-nfs instance is managed by -cephadm, resulting in a simplified model in terms of lifecycle. A Ceph spec for -the ingress daemon is generated right after the ceph-nfs instance is applied, -and as per [5]_ it requires two additional options: - -* frontend_port -* monitoring_port - -The two ports are required by haproxy to accept incoming requests and for -monitoring purposes, hence we need to make TripleO aware about this new service -and properly setup the firewall rules. As long as the ports defined by the spec -are passed to the overcloud deployment process and defined in the -tripleo-heat-templates CephIngress daemon resource, the `firewall_rules` -tripleo ansible role is run and rules are applied for both the frontend and -monitoring port. The usual network used by this daemon (and affected by the new -applied rules) is the `StorageNFS`, but we might have cases where an operator -overrides it. -The lifecycle, builds and security aspects for the container images associated -to the CephIngress resource are not managed by TripleO, and the Ceph -organization takes care about maintanance and updates. - - - -Upgrade Impact --------------- - -The problem of an existing Ceph cluster is covered by the spec [8]_. - - -Performance Impact ------------------- - -Since two new images (and the equivalent tripleo-heat-templates services) have -been introduced, some time is required to pull these new additional containers -in the undercloud. However, the tripleo_containers jinja template has been -updated, splitting off the Ceph related container images. In particular, during -the containers image prepare phase, a new boolean option has been added and -pulling the Ceph images can be avoided by setting the `ceph_images` boolean to -false. By doing this we can improve performances when Ceph is not required. - -Developer Impact ----------------- -This effort can be easily extended to move the RGW service to deployed ceph, -which is out of scope of this spec. - -Implementation -============== - -Deployment Flow ---------------- - -The deployment and configuration described in this spec will happen during -`openstack overcloud ceph deploy`, as described in [8]_. -The current implementation of `openstack overcloud network vip provision` -allows to provision 1 vip per network, which means that using the new Ceph -Ingress daemon (that requires 1 vip per service) can break components that -are still using the VIP provisioned on the storage network (or any other -network depending on the tripleo-heat-templates override specified) and -are managed by pacemaker. -A new option `--ceph-vip` for `openstack overcloud ceph deploy` command -will be added [11]_. This option may be used to reserve VIP(s) for each -Ceph service specified by the 'service/network' mapping defined as input. -For instance, a generic ceph service mapping can be something like the -following:: - - --- - ceph_services: - - service: ceph_nfs - network: storage - - service: ceph_rgw - network: storage - -For each service added to the list above, a virtual ip on the specified -network (that can be a composable network) will be created and used as -frontend_vip of the ingress daemon. -As described in the overview section, an ingress object will be defined -and deployed and this is supposed to manage both the VIP and the HA for -this component. - -Assignee(s) ------------ - -- fmount -- fultonj -- gfidente - -Work Items ----------- - -- Create a new Ceph prefixed Heat resource that describes the Ingress daemon - in the TripleO context. -- Add both haproxy and keepalived containers to the Ceph container list so that - they can be pulled during the `Container Image preparation` phase. -- Create a set of tasks to deploy both the nfs and the related ingress - daemon -- Deprecate the pacemaker related configuration for ceph-nfs, including - pacemaker constraints between the manila-share service and ceph-nfs -- Create upgrade playbooks to transition from TripleO/pcmk managed nfs - ganesha to nfs/ingress daemons deployed by cephadm and managed by ceph - orch - -Depending on the state of the directord/task-core migration we might skip the -ansible part, though we could POC with it to get started, extending the existing -tripleo-ansible cephadm role. - -Dependencies -============ - -This work depends on the tripleo_ceph_nfs spec [3]_ that moves from tripleo -deployed ganesha to the cephadm approach. - -Testing -======= - -The NFS daemon feature can be enabled at day1 and it will be tested against -the existing TripleO scenario004 [9]_. -As part of the implementation plan, the update of the existing heat templates -environment CI files, which contain both the Heat resources and the testing -job parameters, is one of the goals of this spec. - - -Documentation Impact -==================== - -The documentation will describe the new parameters introduced to the `deployed -ceph` cli to give the ability to deploy additional daemons (ceph-nfs and the -related ingress daemon) as part of deployed ceph. -However, we should provide upgrade instructions for pre existing environments -that need to transition from TripleO/pcmk managed nfs ganesha to nfs daemons -deployed by cephadm and managed by ceph orch. - - -References -========== - -.. [1] `cephadm `_ -.. [2] `tripleo-ceph `_ -.. [3] `tripleo-nfs-spec `_ -.. [4] `tripleo-ceph-mkspec `_ -.. [5] `cephadm-nfs-ingress `_ -.. [6] `container-image-preparation `_ -.. [7] `ceph-ingress-containers `_ -.. [8] `tripleo-common-j2 `_ -.. [9] `tripleo-scenario004 `_ -.. [10] `tripleo-common-split-off `_ -.. [11] `tripleo-ceph-vip `_ diff --git a/specs/yoga/tripleo_ceph_manila.rst b/specs/yoga/tripleo_ceph_manila.rst deleted file mode 100644 index 7f31fff0..00000000 --- a/specs/yoga/tripleo_ceph_manila.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=========================================== -TripleO Ceph Ganesha Integration for Manila -=========================================== - -Starting in the Octopus release, Ceph introduced its own day1 tool called -cephadm and its own day2 tool called orchestrator which replaced ceph-ansible. -During the Wallaby and Xena cycles TripleO moved away from ceph-ansible and -adopted cephadm [1]_ as described in [2]_. -However, the ganesha deamon deployment remained under the tripleo-ansible -control, with a set of tasks that are supposed to replicate the relevant part -of the ceph-nfs ceph-ansible role [3]_. -This choice ensured backward compatibility with the older releases. - -Problem Description -=================== - -In TripleO we support deployment of Ganesha both when the Ceph cluster is -itself managed by TripleO and when the Ceph cluster is itself not managed -by TripleO. -When the cluster is managed by TripleO, an NFS daemon can be deployed as a -regular TripleO service via the tripleo-ansible module [4]_. -It is preferable to have cephadm manage the lifecycle of the NFS container -instead of deploying it with tripleo-ansible. -In order to do this we will require the following changes on both TripleO -and Manila: - -- the orchestrator provides an interface that should be used by Manila to - interact with the ganesha instances. The nfs orchestrator interface is - described in [5]_ and can be used to manipulate the nfs daemon, as well - as create and delete exports. - In the past the ganesha configuration file was fully customized by - ceph-ansible; the orchestrator is going to have a set of overrides to - preserve backwards compatibility. This result is achieved by setting a - userconfig object that lives within the Ceph cluster [5]_. It's going - to be possible to check, change and reset the nfs daemon config using - the same interface provided by the orchestrator [11]_. - -- The deployed NFS daemon is based on the watch_url mechanism [6]_: - adopting a cephadm deployed ganesha instance requires the Manila driver - be updated to support this new approach. This work is described in [10]_. - -- The ceph-nfs daemon deployed by cephadm has its own HA mechanism, called - ingress, which is based on haproxy and keepalived [7]_ so we would no - longer use pcmk as the VIP owner. - Note this means we would run pcmk and keepalived in addition to haproxy - (deployed by tripleo) and another haproxy (deployed by cephadm) on the - same server (though with listeners on different ports). - Because cephadm is controlling the ganesha life cycle, the pcs cli will - no longer be used to interact with the ganesha daemon and we will change - where the ingress daemon is used. - -When the Ceph cluster is *not* managed by TripleO, the Ganesha service is -currently deployed standalone on the overcloud and it's configured to use -the external Ceph MON and MDS daemons. -However, if this spec is implemented, then the standalone ganesha service -will no longer be deployed by TripleO. Instead, we will require that the -admin of the external ceph cluster add the ceph-nfs service to that cluster. -Though TripleO will still configure Manila to use that service. - -Thus in the external case, Ganesha won't be deployed and details about the -external Ganesha must be provided as input during overcloud deployment. We -will also provide tools to help someone who has deployed Ganesaha on the -overcloud transition the service to their external Ceph cluster. From a high -level the process will be the following: - -1. Generate a cephadm spec so that after the external ceph cluster becomes - managed by cephadm the spec can be used to add a the ceph-nfs service - with the required properties. -2. Disable the VIP PCS uses and provide a documented method for it to be - moved to the external ceph cluster. - -Proposed Change -=============== - -Overview --------- - -An ansible task will generate the Ceph NFS daemon spec and it will trigger -cephadm [2]_ to deploy the Ganesha container. - -- the NFS spec should be rendered and applied against the existing Ceph - cluster -- the ingress spec should be rendered (as part of the NFS deployment) - and applied against the cluster - -The container will be no longer controlled by pacemaker. - - -Security Impact ---------------- - -None, the same code which TripleO would already use for the generation of -the Ceph cluster config and keyrings will be consumed. - -Upgrade Impact --------------- - -- We will deprecate the ganesha managed by PCS so that it will still work - up until Z. -- We will provide playbooks which migrate from the old NFS service to the - new one. -- We will assume these playbooks will be available in Z and run prior to - the upgrade to the next release. - -Other End User Impact ---------------------- - -For fresh deployments, the existing input parameters will be reused to -drive the newer deployment tool. -For an existing environment, after the Ceph upgrade, the TripleO deployed -NFS instance will be stopped and removed by the migration playbook provided, -as well as the related pacemaker resources and constraints; cephadm will -be able to deploy and manage the new NFS instances, and the end user will -see a disruption in the NFS service. - -Performance Impact ------------------- - -No changes. - -Other Deployer Impact ---------------------- - -* "deployed ceph": For the first implementation of this spec we'll deploy - during overcloud deployment but we will aim to deliver this so that it - is compatible with "deployed ceph". VIPs are provisioned with - `openstack overcloud network vip provision` before - `openstack overcloud network provision` and before - `openstack overcloud node provision` so we would have an ingress VIP in - advance so we could do this with "deployed ceph". - -* directord/task-core: We will ultimately need this implemented for the - directord/task-core tool but could start with ansible tasks added to - the tripleo_ceph role. Depending on the state of the directord/task-core - migration when we implement we might skip the ansible part, though we - could POC with it to get started. - -Developer Impact ----------------- - -Assuming the manila services are able to interact with Ganesha using the -watch_url mechanism, the NFS daemon can be generated as a regular Ceph -daemon using the spec approach provided by the tripleo-ansible module [4]_. - -Implementation -============== - -Deployment Flow ---------------- - -The deployment and configuration described in this spec will happen during -`openstack overcloud deploy`, as described in [8]_. -This is consistent with how tripleo-ansible used to run during step2 to -configure these services. The tripleo-ansible tasks should be moved from a -pure ansible templating approach that generates the systemd unit according -to the input provided to a cephadm based daemon that can be configured with -the usual Ceph mgr config-key mechanism. -As described in the overview section, an ingress object will be defined and -deployed and this is supposed to manage both the VIP and the HA for this -component. - -Assignee(s) ------------ - -- fmount -- fultonj -- gfidente - -Work Items ----------- - -- Change the tripleo-ansible module to support the Ceph ingress daemon - type -- Create a set of tasks to deploy both the nfs and the related ingress - daemons -- Deprecate the pacemaker related configuration for ceph-nfs, including - pacemaker constraints between the manila-share service and ceph-nfs -- Create upgrade playbooks to transition from TripleO/pcmk managed nfs - ganesha to nfs daemons deployed by cephadm and managed by ceph orch - -Dependencies -============ - -- This work depends on the manila spec [10]_ that moves from dbus to the - watch_url approach - -Testing -======= - -The NFS daemon feature can be enabled at day1 and it will be tested against -the existing TripleO scenario004 [9]_. -As part of the implementation plan, the update of the existing heat templates -environment CI files, which contain the testing job parameters, is one of the -goals of this spec. -An important aspect of the job definition process is related to standalone vs -multinode. -As seen in the past, multinode can help catching issues that are not visible -in a standalone environment, but of course the job configuration can be improved -in the next cycles, and we can start with standalone testing, which is what is -present today in CI. - -Documentation Impact -==================== - -No changes should be necessary to the TripleO documentation, as the described -interface remains the unchanged. -However, we should provide upgrade instructions for pre existing environments -that need to transition from TripleO/pcmk managed nfs ganesha to nfs daemons -deployed by cephadm and managed by ceph orch. - -References -========== - -.. [1] `cephadm `_ -.. [2] `tripleo-ceph `_ -.. [3] `tripleo-ceph-ganesha `_ -.. [4] `tripleo-ceph-mkspec `_ -.. [5] `tripleo-ceph-nfs `_ -.. [6] `ganesha-watch_url `_ -.. [7] `cephadm-nfs-ingress `_ -.. [8] `tripleo-cephadm `_ -.. [9] `tripleo-scenario004 `_ -.. [10] `cephfs-nfs-drop-dbus `_ -.. [11] `cephfs-get-config `_ - diff --git a/specs/zed/decouple-tripleo-tasks.rst b/specs/zed/decouple-tripleo-tasks.rst deleted file mode 100644 index 712a662c..00000000 --- a/specs/zed/decouple-tripleo-tasks.rst +++ /dev/null @@ -1,253 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -====================== -Decouple TripleO Tasks -====================== - -https://blueprints.launchpad.net/tripleo/+spec/decouple-tripleo-tasks - -This spec proposes decoupling tasks across TripleO by organizing tasks in a way -that they are grouped as a function of what they manage. The desire is to be -able to better isolate and minimize what tasks need to be run for specific -management operations. The process of decoupling tasks is implemented through -moving tasks into standalone native ansible roles and playbooks in tripleo-ansible. - - -Problem Description -=================== - -TripleO presently manages the entire software configuration of the overcloud at -once each time ``openstack overcloud deploy`` is executed. Regardless of -whether nodes were already deployed, require a full redeploy for some reason, -or are new nodes (scale up, replacement) all tasks are executed. The -functionality of only executing needed tasks lies within Ansible. - -The problem with relying entirely on Ansible to determine if any changes are -needed is that it results in long deploy times. Even if nothing needs to be -done, it can take hours just to have Ansible check each task in order to make -that determination. - -Additionally, TripleO's reliance on external tooling (Puppet, container config -scripts, bootstrap scripts, etc) means that tasks executing those tools -**must** be executed by Ansible as Ansible does not have the necessary data -needed in order to determine if those tasks need to be executed or not. These -tasks often have cascading effects in determining what other tasks need to be -run. This is a general problem across TripleO, and is why the model of just -executing all tasks on each deploy has been the accepted pattern. - - -Proposed Change -=============== - -The spec proposes decoupling tasks and separating them out as needed to manage -different functionality within TripleO. Depending on the desired management -operation, tripleoclient will contain the necessary functionality to trigger -the right tasks. Decoupling and refactoring tasks will be done by migrating to -standalone ansible role and playbooks within tripleo-ansible. This will allow -for reusing the standalone ansible artifacts from tripleo-ansible to be used -natively with just ``ansible-playbook``. At the same time, the -``tripleo-heat-templates`` interfaces are maintained by consuming the new roles -and playbooks from ``tripleo-ansible``. - -Overview --------- - -There are 3 main changes proposed to implement this spec: - -#. Refactor ansible tasks from ``tripleo-heat-templates`` into standalone roles - in tripleo-ansible. -#. Develop standalone playbooks within tripleo-ansible to consume the - tripleo-ansible roles. -#. Update tripleo-heat-templates to use the standalone roles and playbooks from - ``tripleo-ansible`` with new ``role_data`` interfaces to drive specific - functionality with new ``openstack overcloud`` commands. - -Writing standalone roles in ``tripleo-ansible`` will largely be an exercise of -copy/paste from tasks lists in ``tripleo-heat-templates``. As tasks are moved -into standalone roles, tripleo-heat-templates can be directly updated to run -tasks from the those roles using ``include_role``. This pattern is already well -established in tripleo-heat-templates with composable services that use -existing standalone roles. - -New playbooks will be developed within tripleo-ansible to drive the standalone -roles using pure ``ansible-playbook``. These playbooks will offer a native -ansible experience for deploying with tripleo-ansible. - -The design principles behind the standalone role and playbooks are: - -#. Native execution with ansible-playbook, an inventory, and variable files. -#. No Heat. While Heat remains part of the TripleO architecture, it has no - bearing on how the native ansible is developed in tripleo-ansible. - tripleo-heat-templates can consume the standalone ansible playbooks and - roles from tripleo-ansible, but it does not dictate the interface. The - interface should be defined for native ansible best practices. -#. No puppet. As the standalone roles are developed, they will not rely on - puppet for configuration or any other tasks. To allow integration with - tripleo-heat-templates and existing TripleO interfaces (Hiera, Heat - parameters), the roles will allow skipping config generation and other parts - that use puppet so that pieces can be overridden by - ``tripleo-heat-templates`` specific tasks. When using native Ansible, - templated config files and native ansible tasks will be used instead of - Puppet. -#. While the decoupled tasks will allow for cleaner interfaces for executing - just specific management operations, all tasks will remain idempotent. A - full deployment that re-runs all tasks will still work, and result in no - effective changes for an already deployed cloud with the same set of inputs. - -The standalone roles will use separated task files for each decoupled -management interface exposed. The playbooks will be separated by management -interface as well to allow for executing just specific management functionality. - -The decoupled management interfaces are defined as: - -* bootstrap -* install -* pre-network -* network -* configure -* container-config -* service-bootstrap - -New task interfaces in ``tripleo-heat-templates`` will be added under -``role_data`` to correspond with the new management interfaces, and consume the -standalone ansible from tripleo-ansible. This will allow executing just -specific management interfaces and using the standalone playbooks from -tripleo-ansible directly. - -New subcommands will be added to tripleoclient to trigger the new management -interface operations, ``openstack overcloud install``, ``openstack overcloud -configure``, etc. - -``openstack overcloud deploy`` would continue to function as it presently does -by doing a full assert of the system state with all tasks. The underlying -playbook, ``deploy-steps-playbook.yaml`` would be updated as necessary to -include the other playbooks so that all tasks can be executed. - -Alternatives ------------- - -:Alternative 1 - Use --tags/--skip-tags: - -With ``--tags`` / ``--skip-tags``, tasks could be selectively executed. In the -past this has posed other problems within TripleO. Using tags does not allow -for composing tasks to the level needed, and often results in running tasks -when not needed or forgetting to tag needed tasks. Having to add the special -cased ``always`` tag becomes necessary so that certain tasks are run when -needed. The tags become difficult to maintain as it is not apparent what tasks -are tagged when looking at the entire execution. Additionally, not all -operations within TripleO map to Ansible tasks one to one. Container startup -are declared in a custom YAML format, and that format is then used as input to -a task. It is not possible to tag individual container startups unless tag -handling logic was added to the custom modules used for container startup. - -:Alternative 2 - Use --start-at-task: - -Using ``--start-at-task`` is likewise problematic, and it does not truly -partition the full set of tasks. Tasks would need to be reordered anyway across -much of TripleO so that ``--start-at-task`` would work. It would be more -straightforward to separate by playbook if a significant number of tasks need -to be reordered. - -Security Impact ---------------- - -Special consideration should be given to security related tasks to ensure that -the critical tasks are executed when needed. - -Upgrade Impact --------------- - -Upgrade and update tasks are already separated out into their own playbooks. -There is an understanding that the full ``deploy_steps_playbook.yaml`` is -executed after an update or upgrade however. This full set of tasks could end -up being reduced if tasks are sufficiently decoupled in order to run the -necessary pieces in isolation (config, bootstrap, etc). - -Other End User Impact ---------------------- - -Users will need to be aware of the limitations of using the new management -commands and playbooks. The expectation within TripleO has always been the -entire state of the system is re-asserted on scale up and configure operations. - -While the ability to still do a full assert would be present, it would no -longer be required. Operators and users will need to understand that only -running certain management operations may not fully apply a desired change. If -only a reconfiguration is done, it may not imply restarting containers. With -the move to standalone and native ansible components, with less -``config-download`` based generation, it should be more obvious what each -playbooks is responsible for managing. The native ansible interfaces will help -operators reason about what needs to be run and when. - -Performance Impact ------------------- - -Performance should be improved for the affected management operations due to -having to run less tasks, and being able to run only the tasks needed for a -given operation. - -There should be no impact when running all tasks. Tasks must be refactored in -such a way that the overall deploy process when all tasks are run is not made -slower. - -Other Deployer Impact ---------------------- - -Discuss things that will affect how you deploy and configure OpenStack -that have not already been mentioned, such as: - -* What config options are being added? Should they be more generic than - proposed (for example a flag that other hypervisor drivers might want to - implement as well)? Are the default values ones which will work well in - real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -Developer Impact ----------------- - -TripleO developers will be responsible for updating the service templates that -they maintain in order to refactor the tasks. - -Implementation -============== - -Assignee(s) ------------ - -Primary assignee: - James Slagle - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -None. - -Testing -======= - -Existing CI jobs would cover changes to task refactorings. -New CI jobs could be added for the new isolated management operations. - -Documentation Impact -==================== - -New commands and playbooks must be documented. - - -References -========== -`standalone-roles POC `_ diff --git a/specs/zed/placeholder.rst b/specs/zed/placeholder.rst deleted file mode 100644 index 58b0d0ce..00000000 --- a/specs/zed/placeholder.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -=============== -Zed placeholder -=============== diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/test_titles.py b/tests/test_titles.py deleted file mode 100644 index 2439a44c..00000000 --- a/tests/test_titles.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import glob -import re - -import docutils.core -import testtools - - -class TestTitles(testtools.TestCase): - def _get_title(self, section_tree): - section = { - 'subtitles': [], - } - for node in section_tree: - if node.tagname == 'title': - section['name'] = node.rawsource - elif node.tagname == 'section': - subsection = self._get_title(node) - section['subtitles'].append(subsection['name']) - return section - - def _get_titles(self, spec): - titles = {} - for node in spec: - if node.tagname == 'section': - # Note subsection subtitles are thrown away - section = self._get_title(node) - titles[section['name']] = section['subtitles'] - return titles - - def _check_titles(self, filename, expect, actual): - missing_sections = [x for x in expect.keys() if x not in actual.keys()] - extra_sections = [x for x in actual.keys() if x not in expect.keys()] - - msgs = [] - if len(missing_sections) > 0: - msgs.append("Missing sections: %s" % missing_sections) - if len(extra_sections) > 0: - msgs.append("Extra sections: %s" % extra_sections) - - for section in expect.keys(): - missing_subsections = [x for x in expect[section] - if x not in actual[section]] - # extra subsections are allowed - if len(missing_subsections) > 0: - msgs.append("Section '%s' is missing subsections: %s" - % (section, missing_subsections)) - - if len(msgs) > 0: - self.fail("While checking '%s':\n %s" - % (filename, "\n ".join(msgs))) - - def _check_lines_wrapping(self, tpl, raw): - for i, line in enumerate(raw.split("\n")): - if "http://" in line or "https://" in line: - continue - self.assertTrue( - len(line) < 80, - msg="%s:%d: Line limited to a maximum of 79 characters." % - (tpl, i+1)) - - def _check_no_cr(self, tpl, raw): - matches = re.findall('\r', raw) - self.assertEqual( - len(matches), 0, - "Found %s literal carriage returns in file %s" % - (len(matches), tpl)) - - - def _check_trailing_spaces(self, tpl, raw): - for i, line in enumerate(raw.split("\n")): - trailing_spaces = re.findall(" +$", line) - self.assertEqual(len(trailing_spaces),0, - "Found trailing spaces on line %s of %s" % (i+1, tpl)) - - - def test_template(self): - with open("specs/template.rst") as f: - template = f.read() - spec = docutils.core.publish_doctree(template) - template_titles = self._get_titles(spec) - - releases = [x.split('/')[1] for x in glob.glob('specs/*/')] - for release in releases: - files = glob.glob("specs/%s/*/*" % release) - for filename in files: - self.assertTrue(filename.endswith(".rst"), - "spec filenames must use 'rst' extension.") - with open(filename) as f: - data = f.read() - - spec = docutils.core.publish_doctree(data) - titles = self._get_titles(spec) - self._check_titles(filename, template_titles, titles) - self._check_lines_wrapping(filename, data) - self._check_no_cr(filename, data) - self._check_trailing_spaces(filename, data) diff --git a/tools/abandon_old_reviews.sh b/tools/abandon_old_reviews.sh deleted file mode 100644 index 36a07c4e..00000000 --- a/tools/abandon_old_reviews.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# WARNING! -# Please do not run this script without talking to the TripleO PTL. Auto -# abandoning people's changes is a good thing, but must be done with care. -# -# before you run this modify your .ssh/config to create a -# review.opendev.org entry: -# -# Host review.opendev.org -# User -# Port 29418 -# - -DRY_RUN=0 -CLEAN_PROJECT="" - -function print_help { - echo "Script to abandon patches without activity for more than 4 weeks." - echo "Usage:" - echo " ./abandon_old_reviews.sh [--dry-run] [--project ] [--help]" - echo " --dry-run In dry-run mode it will only print what patches would be abandoned " - echo " but will not take any real actions in gerrit" - echo " --project Only check patches from if passed." - echo " It must be one of the projects which are a part of the TripleO-group." - echo " If project is not provided, all projects from the TripleO-group will be checked" - echo " --help Print help message" -} - -while [ $# -gt 0 ]; do - key="${1}" - - case $key in - --dry-run) - echo "Enabling dry run mode" - DRY_RUN=1 - shift # past argument - ;; - --project) - CLEAN_PROJECT="project:openstack/${2}" - shift # past argument - shift # past value - ;; - --help) - print_help - exit 2 - esac -done - -set -o errexit -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -function abandon_review { - local gitid=$1 - shift - local msg=$@ - unassign_and_new_bug $gitid - if [ $DRY_RUN -eq 1 ]; then - echo "Would abandon $gitid" - else - echo "Abandoning $gitid" - ssh review.opendev.org gerrit review $gitid --abandon --message \"$msg\" - fi -} - -function unassign_and_new_bug { - # unassign current assignee and set bug to 'new' status - local gitid=$1 - cm=$(ssh review.opendev.org "gerrit query $gitid --current-patch-set --format json" | jq .commitMessage) - for closes in $(echo -e $cm | awk '/[cC]loses-[bB]ug/ {match($0,/[0-9]+/); bug=substr($0,RSTART,RLENGTH); print bug}'); do - if [ $DRY_RUN -eq 1 ]; then - echo "Would unassign and tag 'timeout-abandon' $closes" - else - echo "Attempting to change status of bug $closes to New" - python "$DIR/unassign_bug.py" $closes - fi - done -} - -PROJECTS="($( -python - < 90 days without comment and currently blocked by a -core reviewer with a -2. We are abandoning this for now. -Feel free to reactivate the review by pressing the restore button and -contacting the reviewer with the -2 on this review to ensure you -address their concerns. For more details check policy -https://specs.openstack.org/openstack/tripleo-specs/specs/policy/patch-abandonment.html -EOF -) - -for review in $blocked_reviews; do - echo "Blocked review $review" - abandon_review $review $blocked_msg -done - -# then purge all the reviews that are > 90d with no changes and Zuul has -1ed - -failing_reviews=$(ssh review.opendev.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:90d NOT label:Verified>=1,Zuul" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') - -failing_msg=$(cat < 90 days without comment, and failed Zuul the last -time it was checked. We are abandoning this for now. -Feel free to reactivate the review by pressing the restore button and -leaving a 'recheck' comment to get fresh test results. -For more details check policy -https://specs.openstack.org/openstack/tripleo-specs/specs/policy/patch-abandonment.html -EOF -) - -for review in $failing_reviews; do - echo "Failing review $review" - abandon_review $review $failing_msg -done - -# then purge all the reviews that are > 180 days with WIP -1 - -very_old_reviews=$(ssh review.opendev.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:180d Workflow<=-1" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') - -very_old_msg=$(cat < 180 days without comment and WIP -1. We are abandoning this for now. -Feel free to reactivate the review by pressing the restore button and -contacting the reviewers. -For more details check policy -https://specs.openstack.org/openstack/tripleo-specs/specs/policy/patch-abandonment.html -EOF -) - -for review in $very_old_reviews; do - echo "Workflow -1 review $review" - abandon_review $review $very_old_msg -done \ No newline at end of file diff --git a/tools/unassign_bug.py b/tools/unassign_bug.py deleted file mode 100644 index da9043fa..00000000 --- a/tools/unassign_bug.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2019 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unassigns assignee from tripleobug, adds message and tag. -If you get the following exception, you need X11 and python-dbus installed: -'RuntimeError: No recommended backend was available. Install -the keyrings.alt package if you want to use the non-recommended -backends. See README.rst for details.' -or check solutions from: -https://github.com/jaraco/keyring/issues/258 -""" - -import os -import sys - -from launchpadlib.launchpad import Launchpad - - -MSG_BODY = "\ -This bug has had a related patch abandoned and has been automatically \ -un-assigned due to inactivity. Please re-assign yourself if you are \ -continuing work or adjust the state as appropriate if it is no longer valid." - - -def unassign(bug_num): - login = os.environ.get('LAUNCHPAD_LOGIN', 'tripleo') - password = os.environ.get('LAUNCHPAD_PASSWORD', 'production') - launchpad = Launchpad.login_with(login, password) - b = launchpad.bugs[bug_num] - for task in b.bug_tasks: - for tag in task.bug_target_name: - if 'tripleo' not in tag: - # try not to interfere with non-tripleo projects too much - continue - task.assignee = None - if task.status == "In Progress": - task.status = 'New' - task.lp_save() - b.tags = b.tags + ['timeout-abandon'] - b.newMessage(content=MSG_BODY, subject='auto-abandon-script') - b.lp_save() - - -if __name__ == '__main__': - unassign(int(sys.argv[1])) diff --git a/tox.ini b/tox.ini deleted file mode 100644 index b629604e..00000000 --- a/tox.ini +++ /dev/null @@ -1,18 +0,0 @@ -[tox] -minversion = 2.0 -envlist = docs,py36 -skipsdist = True - -[testenv] -basepython = python3 -usedevelop = True -setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt -commands = stestr run --slowest {posargs} - -[testenv:venv] -commands = {posargs} - -[testenv:docs] -commands = - sphinx-build -W -b html doc/source doc/build/html