diff --git a/README.md b/README.md index 09ecc69..5874b52 100644 --- a/README.md +++ b/README.md @@ -18,9 +18,7 @@ Valet responds to the challenges outlined above by enhancing OpenStack Nova sche * [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet * [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet * [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine -* Music: a data storage and persistence service * [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music -* [havalet](https://github.com/att-comdev/valet/blob/master/doc/ha.md): a service that assists in providing high availability for Valet ## Additional documents: diff --git a/doc/.idea/doc.iml b/doc/.idea/doc.iml new file mode 100644 index 0000000..6711606 --- /dev/null +++ b/doc/.idea/doc.iml @@ -0,0 +1,11 @@ + + + + + + + + + + \ No newline at end of file diff --git a/doc/.idea/misc.xml b/doc/.idea/misc.xml new file mode 100644 index 0000000..ed7ebf7 --- /dev/null +++ b/doc/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/doc/.idea/modules.xml b/doc/.idea/modules.xml new file mode 100644 index 0000000..989d897 --- /dev/null +++ b/doc/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/doc/.idea/workspace.xml b/doc/.idea/workspace.xml new file mode 100644 index 0000000..305f06f --- /dev/null +++ b/doc/.idea/workspace.xml @@ -0,0 +1,250 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1485886196137 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/LICENSE b/doc/LICENSE deleted file mode 100644 index 68c771a..0000000 --- a/doc/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/doc/ha.md b/doc/ha.md deleted file mode 100644 index 242d50d..0000000 --- a/doc/ha.md +++ /dev/null @@ -1,111 +0,0 @@ -High Availability Valet Tools -============================= - -This tool monitors one or more configured processes to maintain high -availability. - -~~~~ {.bash} -$ python ./ha_valet.py [-p name] -~~~~ - -ha\_valet.cfg -------------- - -The ha\_valet configuration file contains a list of dictionaries. List -keys are logical process names. List values are dictionaries -representing a monitored Valet-related process. - -Each dictionary **must** contain the following properties: - - host - user - port - protocol - start_command - stop_command - test_command - -Optional properties include: - - order - priority - standy_by_list - -### Notes - -- The return value of `test_command` **must not** be 0 and should - reflect the monitored process priority (see next section). - -- `stand_by_list` is an optional comma-delimited list of hosts used in - conjunction with active/stand-by scenarios. ha\_valet will attempt - to restart the instance with the lower priority. If that instance - fails to start, ha\_valet will try restarting the process of the - next host in the list. - -- `priority` is used to establish the primary/secondary hierarchy. It - **must** be greater than 0. The lower the number, the higher the - priority. - -### Monitored Process Priority - -Monitored process priority is used in conjunction with active/stand-by -scenarios. Unless a process is down, its priority **must** be greater -than 0. The lower the number, the higher the priority. - -For example, an instance returning `1` (in response to `test_command`) -will take precedence over an instance returning `2`. A priority of 0 -means the process is down. - -Examples --------- - -### Host A - - :Ostro - host = Host_A - stand_by_list = Host_A,Host_B - user = stack - port = 8091 - protocol = http - priority = 1 - start_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py start'" % (user, host) - stop_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py stop'" % (user, host) - test_command="ssh %s@%s 'exit $(@OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py status ; echo $?)'" % (user, host) - - :Allegro - host = Host_A - user = stack - port = 8090 - protocol = http - priority = 1 - start_command="sudo python @ALLEGRO_WSGI_DIR@/wsgi.py &" - stop_command="sudo pkill -f wsgi" - test_command="netstat -nap | grep %s | grep LISTEN | wc -l | exit $(awk \'{print $1}\')" % (port) - -### Host B (172.20.90.130) - - :Ostro - host = Host_B - stand_by_list = Host_A,Host_B - user = stack - port = 8091 - protocol = http - priority = 2 - start_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py start'" % (user, host) - stop_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py stop'" % (user, host) - test_command="ssh %s@%s 'exit $(@OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py status ; echo $?)'" % (user, host) - - :Allegro - host = Host_B - user = stack - port = 8090 - protocol = http - priority = 1 - start_command="sudo python @ALLEGRO_WSGI_DIR@/wsgi.py &" - stop_command="sudo pkill -f wsgi" - test_command="netstat -nap | grep %s | grep LISTEN | wc -l | exit $(awk \'{print $1}\')" % (port) - -Contact -------- - -Joe D'Andrea \ No newline at end of file diff --git a/doc/ostro_listener.md b/doc/ostro_listener.md index 11ec580..e6a6929 100644 --- a/doc/ostro_listener.md +++ b/doc/ostro_listener.md @@ -23,18 +23,19 @@ Throughout this document, the following installation-specific items are required. Have values for these prepared and ready before continuing. Suggestions for values are provided in this document where applicable. - Name Description Example - ----------------------------- --------------------------------------------------- ------------------------------------------- - `$USER` User id `user1234` - `$VENV` Python virtual environment path (if any) `/etc/ostro-listener/venv` - `$OSTRO_LISTENER_PATH` Local git repository's `ostro_listener` directory `/home/user1234/git/allegro/ostro_listener` - `$CONFIG_FILE` Event Listener configuration file `/etc/ostro-listener/ostro-listener.conf` - `$RABBITMQ_HOST` RabbitMQ hostname or IP address `localhost` - `$RABBITMQ_USERNAME` RabbitMQ username `guest` - `$RABBITMQ_PASSWORD_FILE` Full path to RabbitMQ password file `/etc/ostro-listener/passwd` - `$MUSIC_URL` Music API endpoints and port in URL format `http://127.0.0.1:8080/` - `$MUSIC_KEYSPACE` Music keyspace `valet` - `$MUSIC_REPLICATION_FACTOR` Music replication factor `1` +| Name | Description | Example | +|------|-------------|---------| +| `$USER` | User id | `user1234` | +| `$VENV` | Python virtual environment path (if any) | `/etc/ostro-listener/venv` | +| `$OSTRO_LISTENER_PATH` | Local git repository's `ostro_listener` directory | `/home/user1234/git/allegro/ostro_listener` | +| `$CONFIG_FILE` | Event Listener configuration file | `/etc/ostro-listener/ostro-listener.conf` | +| `$RABBITMQ_HOST` | RabbitMQ hostname or IP address | `localhost` | +| `$RABBITMQ_USERNAME` | RabbitMQ username | `guest` | +| `$RABBITMQ_PASSWORD_FILE` | Full path to RabbitMQ password file | `/etc/ostro-listener/passwd` | +| `$MUSIC_URL` | Music API endpoints and port in URL format | `http://127.0.0.1:8080/` | +| `$MUSIC_KEYSPACE` | Music keyspace | `valet` | +| `$MUSIC_REPLICATION_FACTOR` | Music replication factor | `1` | + Root or sufficient sudo privileges are required for some steps. @@ -250,8 +251,3 @@ $ sudo pip uninstall ostro-listener Remove previously made configuration file changes, files, and other settings as needed. - -Contact -------- - -Joe D'Andrea diff --git a/doc/ostro_release.md b/doc/ostro_release.md index ae04014..adcac90 100644 --- a/doc/ostro_release.md +++ b/doc/ostro_release.md @@ -28,15 +28,6 @@ Valet1.0/Ostro features load spikes of tenant applications. Later, we will deploy more dynamic mechanism in the future version of Ostro. -- High availability Ostro replicas run as active-passive way. When - active Ostro fails, automatically the passive one is activated via - HAValet. All data is updated in MUSIC database at runtime whenever - it is changed. When the passive Ostro is activated, it gets data - from MUSIC to initialize its status rather than from OpenStack. - Ostro also takes ping messages to show if it is alive or not. - - Runtime update via the Oslo message bus or RO Working on this. - Migration tip Working on this. - - diff --git a/doc/valet.md b/doc/valet.md deleted file mode 100644 index fb1520d..0000000 --- a/doc/valet.md +++ /dev/null @@ -1,34 +0,0 @@ -# Valet - -Valet gives OpenStack the ability to optimize cloud resources while simultaneously meeting a cloud application's QoS requirements. Valet provides an api service, a placement optimizer (Ostro), a high availability data storage and persistence layer (Music), and a set of OpenStack plugins. - -**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md). - -Learn more about Valet: - -* [OpenStack Newton Summit Presentation](https://www.openstack.org/videos/video/valet-holistic-data-center-optimization-for-openstack) (Austin, TX, 27 April 2016) -* [Presentation Slides](http://www.research.att.com/export/sites/att_labs/techdocs/TD_101806.pdf) (PDF) - -Valet consists of the following components: - -* [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet -* [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet -* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine -* Music: a data storage and persistence service -* [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music -* [havalet](https://github.com/att-comdev/valet/blob/master/doc/ha.md): a service that assists in providing high availability for Valet - -Additional documents: - -* [OpenStack Heat Resource Plugins](https://github.com/att-comdev/valet/blob/master/valet_plugins/valet_plugins/heat/README.md): Heat resources -* [Placement API](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): API requests/responses -* [Using Postman with valet-api](https://github.com/att-comdev/valet/blob/master/valet/tests/api/README.md): Postman support - -## Thank You - -Alicia Abella, Saar Alaluf, Bharath Balasubramanian, Roy Ben Hai, Shimon Benattar, Yael Ben Shalom, Benny Bustan, Rachel Cohen, Joe D'Andrea, Harel Dorfman, Boaz Elitzur, P.K. Esrawan, Inbal Harduf, Matti Hiltunen, Doron Honigsberg, Kaustubh Joshi, Gueyoung Jung, Gerald Karam, David Khanin, Israel Kliger, Erez Korn, Max Osipov, Chris Rice, Amnon Sagiv, Gideon Shafran, Galit Shemesh, Anna Yefimov; AT&T Advanced Technology and Architecture, AT&T Technology Development - AIC, Additional partners in AT&T Domain 2.0. Apologies if we missed anyone (please advise via email!). - -## Contact - -Joe D'Andrea - diff --git a/doc/valet_api.md b/doc/valet_api.md index 5ce0df8..2a4f44f 100644 --- a/doc/valet_api.md +++ b/doc/valet_api.md @@ -4,7 +4,7 @@ Valet gives OpenStack the ability to optimize cloud resources while simultaneous This document covers installation of valet-api, the API engine used to interact with Valet. -**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md). These instructions are to be used by the Bedminster and Tel Aviv development teams. +**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md). ## Prerequisites @@ -13,7 +13,7 @@ Prior to installation: * Ubuntu 14.04 LTS * Python 2.7.6 with pip * An OpenStack Kilo cloud -* Music 6.0 +* [Music](https://github.com/att-comdev/valet) 6.0 * [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md) 2.0 Throughout this document, the following installation-specific items are required. Have values for these prepared and ready before continuing. Suggestions for values are provided in this document where applicable. @@ -249,7 +249,3 @@ $ sudo pip uninstall valet-api ``` Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed. - -## Contact - -Joe D'Andrea diff --git a/doc/valet_os.md b/doc/valet_os.md index c9ee1e5..42f24b6 100644 --- a/doc/valet_os.md +++ b/doc/valet_os.md @@ -173,7 +173,3 @@ $ sudo pip uninstall valet-openstack ``` Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed. - -## Contact - -Joe D'Andrea diff --git a/etc/valet/api/config.py b/etc/valet/api/config.py index 239f31d..dc08178 100644 --- a/etc/valet/api/config.py +++ b/etc/valet/api/config.py @@ -1,12 +1,12 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -94,8 +94,9 @@ ostro = { messaging = { 'config': { - 'transport_url': 'rabbit://' + CONF.messaging.username + ':' + CONF.messaging.password + - '@' + CONF.messaging.host + ':' + str(CONF.messaging.port) + '/' + 'transport_url': 'rabbit://' + CONF.messaging.username + ':' + + CONF.messaging.password + '@' + CONF.messaging.host + ':' + + str(CONF.messaging.port) + '/' } } diff --git a/etc/valet/openstack/notification_listener/notification_listener.py b/etc/valet/openstack/notification_listener/notification_listener.py index e63d6fd..6fdc2b2 100644 --- a/etc/valet/openstack/notification_listener/notification_listener.py +++ b/etc/valet/openstack/notification_listener/notification_listener.py @@ -1,33 +1,39 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Notification Listener.""" + import json from oslo_config import cfg import oslo_messaging class NotificationEndpoint(object): + """Endponit for a notifcation (info, warn, error).""" def info(self, ctxt, publisher_id, event_type, payload, metadata): + """Print notifaction was received and dumb json data to print.""" print('recv notification:') print(json.dumps(payload, indent=4)) def warn(self, ctxt, publisher_id, event_type, payload, metadata): + """Warn.""" None def error(self, ctxt, publisher_id, event_type, payload, metadata): + """Error.""" None transport = oslo_messaging.get_transport(cfg.CONF) diff --git a/setup.py b/setup.py index f2a4562..e88a1e1 100644 --- a/setup.py +++ b/setup.py @@ -1,19 +1,19 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -'''Setup''' +""" Setup """ import setuptools diff --git a/test-requirements.txt b/test-requirements.txt index a22accb..a95fece 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -26,4 +26,4 @@ python-heatclient<=1.2.0 oslo.messaging==1.8.3 #tempest<=12.1.0 ---------- needs to be installed on Jenkins, no output when using tox -#tempest-lib>=0.8.0 \ No newline at end of file +#tempest-lib>=0.8.0 diff --git a/tox.ini b/tox.ini index cd3f561..bd53ae6 100644 --- a/tox.ini +++ b/tox.ini @@ -63,4 +63,3 @@ show-source = True ignore = E123,E125,E501,H401,H105,H301 builtins = _ exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build - diff --git a/valet/api/app.py b/valet/api/app.py index 227d284..8e90e6b 100644 --- a/valet/api/app.py +++ b/valet/api/app.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Application''' +"""Application.""" from pecan.deploy import deploy from pecan import make_app @@ -23,7 +23,7 @@ from valet.api.db import models def setup_app(config): - """ App Setup """ + """App Setup.""" identity.init_identity() messaging.init_messaging() models.init_model() @@ -36,6 +36,7 @@ def setup_app(config): # entry point for apache2 def load_app(config_file): + """App Load.""" register_conf() set_domain(project='valet') return deploy(config_file) diff --git a/valet/api/common/__init__.py b/valet/api/common/__init__.py index 16a1385..ead1a25 100644 --- a/valet/api/common/__init__.py +++ b/valet/api/common/__init__.py @@ -1,19 +1,25 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Terminate thread.""" + import ctypes def terminate_thread(thread): - """Terminates a python thread from another thread. + """Terminate a python thread from another thread. :param thread: a threading.Thread instance """ @@ -26,8 +32,8 @@ def terminate_thread(thread): if res == 0: raise ValueError("nonexistent thread id") elif res > 1: - # """if it returns a number greater than one, you're in trouble, - # and you should call it again with exc=NULL to revert the effect""" + # If it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) raise SystemError("PyThreadState_SetAsyncExc failed") print('valet watcher thread exits') diff --git a/valet/api/common/compute.py b/valet/api/common/compute.py index e1a289c..a7178ed 100644 --- a/valet/api/common/compute.py +++ b/valet/api/common/compute.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Compute helper library''' +"""Compute helper library.""" from novaclient import client from pecan import conf @@ -23,7 +23,7 @@ VERSION = 2 def nova_client(): - '''Returns a nova client''' + """Return a nova client.""" sess = conf.identity.engine.session nova = client.Client(VERSION, session=sess) return nova diff --git a/valet/api/common/hooks.py b/valet/api/common/hooks.py index d9b5c9b..f0e2bd2 100644 --- a/valet/api/common/hooks.py +++ b/valet/api/common/hooks.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Hooks''' +"""Hooks.""" import json import logging @@ -31,8 +31,10 @@ LOG = logging.getLogger(__name__) class MessageNotificationHook(PecanHook): - '''Send API request/responses out as Oslo msg notifications.''' + """Send API request/responses out as Oslo msg notifications.""" + def after(self, state): + """Function sends valet notification.""" self.dummy = True LOG.info('sending notification') notifier = conf.messaging.notifier @@ -44,7 +46,8 @@ class MessageNotificationHook(PecanHook): else: notifier_fn = notifier.error - ctxt = {} # Not using this just yet. + # Not using this just yet. + ctxt = {} request_path = state.request.path @@ -86,7 +89,8 @@ class MessageNotificationHook(PecanHook): } } - # notifier_fn blocks in case rabbit mq is down - it prevents Valet API to return its response :( + # notifier_fn blocks in case rabbit mq is down + # it prevents Valet API to return its response # send the notification in a different thread notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload)) notifier_thread.start() @@ -99,10 +103,11 @@ class MessageNotificationHook(PecanHook): class NotFoundHook(PecanHook): - '''Catchall 'not found' hook for API''' + """Catchall 'not found' hook for API.""" + def on_error(self, state, exc): + """Redirect to app-specific not_found endpoint if 404 only.""" self.dummy = True - '''Redirects to app-specific not_found endpoint if 404 only''' if isinstance(exc, webob.exc.WSGIHTTPException) and exc.code == 404: message = _('The resource could not be found.') error('/errors/not_found', message) diff --git a/valet/api/common/i18n.py b/valet/api/common/i18n.py index 29d0f1b..919fbcb 100644 --- a/valet/api/common/i18n.py +++ b/valet/api/common/i18n.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""i18n library""" +"""i18n library.""" import gettext diff --git a/valet/api/common/identity.py b/valet/api/common/identity.py index d2d1317..a062d22 100644 --- a/valet/api/common/identity.py +++ b/valet/api/common/identity.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Identity helper library''' +"""Identity helper library.""" from datetime import datetime @@ -30,12 +30,13 @@ LOG = logging.getLogger(__name__) def utcnow(): - '''Returns the time (UTC)''' + """Return the time (UTC).""" return datetime.now(tz=pytz.utc) class Identity(object): - '''Convenience library for all identity service-related queries.''' + """Convenience library for all identity service-related queries.""" + _args = None _client = None _interface = None @@ -43,7 +44,7 @@ class Identity(object): @classmethod def is_token_admin(cls, token): - '''Returns true if decoded token has an admin role''' + """Return true if decoded token has an admin role.""" for role in token.user.get('roles', []): if role.get('name') == 'admin': return True @@ -51,16 +52,16 @@ class Identity(object): @classmethod def tenant_from_token(cls, token): - '''Returns tenant id from decoded token''' + """Return tenant id from decoded token.""" return token.tenant.get('id', None) @classmethod def user_from_token(cls, token): - '''Returns user id from decoded token''' + """Return user id from decoded token.""" return token.user.get('id', None) def __init__(self, interface='admin', **kwargs): - '''Initializer.''' + """Initializer.""" self._interface = interface self._args = kwargs self._client = None @@ -68,7 +69,7 @@ class Identity(object): @property def _client_expired(self): - '''Returns True if cached client's token is expired.''' + """Return True if cached client's token is expired.""" # NOTE: Keystone may auto-regen the client now (v2? v3?) # If so, this trip may no longer be necessary. Doesn't # hurt to keep it around for the time being. @@ -84,7 +85,7 @@ class Identity(object): @property def client(self): - '''Returns an identity client.''' + """Return an identity client.""" if not self._client or self._client_expired: auth = v2.Password(**self._args) self._session = session.Session(auth=auth) @@ -94,11 +95,11 @@ class Identity(object): @property def session(self): - '''Read-only access to the session.''' + """Read-only access to the session.""" return self._session def validate_token(self, auth_token): - '''Returns validated token or None if invalid''' + """Return validated token or None if invalid.""" kwargs = { 'token': auth_token, } @@ -110,7 +111,7 @@ class Identity(object): return None def is_tenant_list_valid(self, tenant_list): - '''Returns true if tenant list contains valid tenant IDs''' + """Return true if tenant list contains valid tenant IDs.""" tenants = self.client.tenants.list() if isinstance(tenant_list, list): found = False @@ -123,14 +124,15 @@ class Identity(object): def is_tenant_in_tenants(tenant_id, tenants): - for tenant in tenants: - if tenant_id == tenant.id: - return True - return False + """Return true if tenant exists.""" + for tenant in tenants: + if tenant_id == tenant.id: + return True + return False def _identity_engine_from_config(config): - '''Initialize the identity engine based on supplied config.''' + """Initialize the identity engine based on supplied config.""" # Using tenant_name instead of project name due to keystone v2 kwargs = { 'username': config.get('username'), @@ -144,7 +146,7 @@ def _identity_engine_from_config(config): def init_identity(): - '''Initialize the identity engine and place in the config.''' + """Initialize the identity engine and place in the config.""" config = conf.identity.config engine = _identity_engine_from_config(config) conf.identity.engine = engine diff --git a/valet/api/common/messaging.py b/valet/api/common/messaging.py index b768c0f..ca66fcd 100644 --- a/valet/api/common/messaging.py +++ b/valet/api/common/messaging.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Messaging helper library''' +"""Messaging helper library.""" from oslo_config import cfg import oslo_messaging as messaging @@ -22,7 +22,7 @@ from valet.api.conf import set_domain, DOMAIN def _messaging_notifier_from_config(config): - '''Initialize the messaging engine based on supplied config.''' + """Initialize the messaging engine based on supplied config.""" transport_url = config.get('transport_url') transport = messaging.get_transport(cfg.CONF, transport_url) notifier = messaging.Notifier(transport, driver='messaging', @@ -32,7 +32,7 @@ def _messaging_notifier_from_config(config): def init_messaging(): - '''Initialize the messaging engine and place in the config.''' + """Initialize the messaging engine and place in the config.""" set_domain(DOMAIN) config = conf.messaging.config notifier = _messaging_notifier_from_config(config) diff --git a/valet/api/common/ostro_helper.py b/valet/api/common/ostro_helper.py index 3b6b577..5c00507 100644 --- a/valet/api/common/ostro_helper.py +++ b/valet/api/common/ostro_helper.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Ostro helper library''' +"""Ostro helper library.""" import json import logging @@ -42,13 +42,13 @@ EXCLUSIVITY = 'exclusivity' def _log(text, title="Ostro"): - '''Log helper''' + """Log helper.""" log_text = "%s: %s" % (title, text) LOG.debug(log_text) class Ostro(object): - '''Ostro optimization engine helper class.''' + """Ostro optimization engine helper class.""" args = None request = None @@ -56,12 +56,15 @@ class Ostro(object): error_uri = None tenant_id = None - tries = None # Number of times to poll for placement. - interval = None # Interval in seconds to poll for placement. + # Number of times to poll for placement. + tries = None + + # Interval in seconds to poll for placement. + interval = None @classmethod def _build_error(cls, message): - '''Build an Ostro-style error message''' + """Build an Ostro-style error message.""" if not message: message = _("Unknown error") error = { @@ -74,7 +77,7 @@ class Ostro(object): @classmethod def _build_uuid_map(cls, resources): - '''Build a dict mapping names to UUIDs.''' + """Build a dict mapping names to UUIDs.""" mapping = {} for key in resources.iterkeys(): if 'name' in resources[key]: @@ -84,7 +87,7 @@ class Ostro(object): @classmethod def _sanitize_resources(cls, resources): - '''Ensure lowercase keys at the top level of each resource.''' + """Ensure lowercase keys at the top level of each resource.""" for res in resources.itervalues(): for key in list(res.keys()): if not key.islower(): @@ -92,12 +95,12 @@ class Ostro(object): return resources def __init__(self): - '''Initializer''' + """Initializer.""" self.tries = conf.music.get('tries', 10) self.interval = conf.music.get('interval', 1) def _map_names_to_uuids(self, mapping, data): - '''Map resource names to their UUID equivalents.''' + """Map resource names to their UUID equivalents.""" if isinstance(data, dict): for key in data.iterkeys(): if key != 'name': @@ -110,11 +113,11 @@ class Ostro(object): return data def _prepare_resources(self, resources): - ''' Pre-digests resource data for use by Ostro. + """Pre-digest resource data for use by Ostro. Maps Heat resource names to Orchestration UUIDs. Ensures exclusivity groups exist and have tenant_id as a member. - ''' + """ mapping = self._build_uuid_map(resources) ostro_resources = self._map_names_to_uuids(mapping, resources) self._sanitize_resources(ostro_resources) @@ -126,8 +129,7 @@ class Ostro(object): # TODO(JD): This really belongs in valet-engine once it exists. def _send(self, stack_id, request): - '''Send request.''' - + """Send request.""" # Creating the placement request effectively enqueues it. PlacementRequest(stack_id=stack_id, request=request) # pylint: disable=W0612 @@ -149,13 +151,13 @@ class Ostro(object): return json.dumps(response) def _verify_groups(self, resources, tenant_id): - ''' Verifies group settings. Returns an error status dict if the + """Verify group settings. - group type is invalid, if a group name is used when the type - is affinity or diversity, if a nonexistant exclusivity group - is found, or if the tenant is not a group member. - Returns None if ok. - ''' + Returns an error status dict if the group type is invalid, if a + group name is used when the type is affinity or diversity, if a + nonexistant exclusivity group is found, or if the tenant + is not a group member. Returns None if ok. + """ message = None for res in resources.itervalues(): res_type = res.get('type') @@ -167,13 +169,17 @@ class Ostro(object): group_type == DIVERSITY: if group_name: self.error_uri = '/errors/conflict' - message = _("%s must not be used when {0} is '{1}'. ").format(GROUP_NAME, GROUP_TYPE, group_type) + message = _("%s must not be used when" + " {0} is '{1}'.").format(GROUP_NAME, + GROUP_TYPE, + group_type) break elif group_type == EXCLUSIVITY: message = self._verify_exclusivity(group_name, tenant_id) else: self.error_uri = '/errors/invalid' - message = _("{0} '{1}' is invalid.").format(GROUP_TYPE, group_type) + message = _("{0} '{1}' is invalid.").format(GROUP_TYPE, + group_type) break if message: return self._build_error(message) @@ -182,7 +188,9 @@ class Ostro(object): return_message = None if not group_name: self.error_uri = '/errors/invalid' - return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME, GROUP_TYPE, EXCLUSIVITY) + return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME, + GROUP_TYPE, + EXCLUSIVITY) group = Group.query.filter_by( # pylint: disable=E1101 name=group_name).first() @@ -191,15 +199,19 @@ class Ostro(object): return_message = "%s '%s' not found" % (GROUP_NAME, group_name) elif group and tenant_id not in group.members: self.error_uri = '/errors/conflict' - return_message = _("Tenant ID %s not a member of {0} '{1}' ({2})").format(self.tenant_id, GROUP_NAME, group.name, group.id) + return_message = _("Tenant ID %s not a member of " + "{0} '{1}' ({2})").format(self.tenant_id, + GROUP_NAME, + group.name, + group.id) return return_message def build_request(self, **kwargs): - ''' Build an Ostro request. If False is returned, - - the response attribute contains status as to the error. - ''' + """Build an Ostro request. + If False is returned then the response attribute contains + status as to the error. + """ # TODO(JD): Refactor this into create and update methods? self.args = kwargs.get('args') self.tenant_id = kwargs.get('tenant_id') @@ -235,7 +247,7 @@ class Ostro(object): return True def is_request_serviceable(self): - ''' Returns true if the request has at least one serviceable resource. ''' + """Return true if request has at least one serviceable resource.""" # TODO(JD): Ostro should return no placements vs throw an error. resources = self.request.get('resources', {}) for res in resources.itervalues(): @@ -245,7 +257,7 @@ class Ostro(object): return False def ping(self): - '''Send a ping request and obtain a response.''' + """Send a ping request and obtain a response.""" stack_id = str(uuid.uuid4()) self.args = {'stack_id': stack_id} self.response = None @@ -256,7 +268,7 @@ class Ostro(object): } def replan(self, **kwargs): - '''Replan a placement.''' + """Replan a placement.""" self.args = kwargs.get('args') self.response = None self.error_uri = None @@ -269,7 +281,7 @@ class Ostro(object): } def migrate(self, **kwargs): - '''Replan the placement for an existing resource.''' + """Replan the placement for an existing resource.""" self.args = kwargs.get('args') self.response = None self.error_uri = None @@ -281,7 +293,7 @@ class Ostro(object): } def query(self, **kwargs): - '''Send a query.''' + """Send a query.""" stack_id = str(uuid.uuid4()) self.args = kwargs.get('args') self.args['stack_id'] = stack_id @@ -295,7 +307,7 @@ class Ostro(object): } def send(self): - '''Send the request and obtain a response.''' + """Send the request and obtain a response.""" request_json = json.dumps([self.request]) # TODO(JD): Pass timeout value? diff --git a/valet/api/conf.py b/valet/api/conf.py index 69a565e..59552bc 100644 --- a/valet/api/conf.py +++ b/valet/api/conf.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Conf.""" + from oslo_config import cfg @@ -70,10 +72,12 @@ music_opts = [ def set_domain(project=DOMAIN): + """Set Domain.""" CONF([], project) def register_conf(): + """Register confs.""" CONF.register_group(server_group) CONF.register_opts(server_opts, server_group) CONF.register_group(music_group) diff --git a/valet/api/db/models/__init__.py b/valet/api/db/models/__init__.py index e11cd78..7051169 100644 --- a/valet/api/db/models/__init__.py +++ b/valet/api/db/models/__init__.py @@ -1,18 +1,15 @@ -# -*- encoding: utf-8 -*- # -# Copyright (c) 2014-2016 AT&T +# Copyright 2014-2017 AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. diff --git a/valet/api/db/models/music/__init__.py b/valet/api/db/models/music/__init__.py index 4c17a11..0efab51 100644 --- a/valet/api/db/models/music/__init__.py +++ b/valet/api/db/models/music/__init__.py @@ -1,22 +1,19 @@ -# -*- encoding: utf-8 -*- # -# Copyright (c) 2014-2016 AT&T +# Copyright 2014-2017 AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -'''Music ORM - Common Methods''' +"""Music ORM - Common Methods""" from abc import ABCMeta, abstractmethod import inspect @@ -28,7 +25,7 @@ from valet.api.db.models.music.music import Music def get_class(kls): - '''Returns a class given a fully qualified class name''' + """Returns a class given a fully qualified class name""" parts = kls.split('.') module = ".".join(parts[:-1]) mod = __import__(module) @@ -38,7 +35,7 @@ def get_class(kls): class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903 - '''Abstract Class Method from Python 3.3's abc module''' + """Abstract Class Method from Python 3.3's abc module""" __isabstractmethod__ = True @@ -48,28 +45,28 @@ class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903 class ClassPropertyDescriptor(object): # pylint: disable=R0903 - '''Supports the notion of a class property''' + """Supports the notion of a class property""" def __init__(self, fget, fset=None): - '''Initializer''' + """Initializer""" self.fget = fget self.fset = fset def __get__(self, obj, klass=None): - '''Get attribute''' + """Get attribute""" if klass is None: klass = type(obj) return self.fget.__get__(obj, klass)() def __set__(self, obj, value): - '''Set attribute''' + """Set attribute""" if not self.fset: raise AttributeError(_("Can't set attribute")) type_ = type(obj) return self.fset.__get__(obj, type_)(value) def setter(self, func): - '''Setter''' + """Setter""" if not isinstance(func, (classmethod, staticmethod)): func = classmethod(func) self.fset = func @@ -77,7 +74,7 @@ class ClassPropertyDescriptor(object): # pylint: disable=R0903 def classproperty(func): - '''Class Property decorator''' + """Class Property decorator""" if not isinstance(func, (classmethod, staticmethod)): func = classmethod(func) @@ -85,36 +82,36 @@ def classproperty(func): class Results(list): - '''Query results''' + """Query results""" def __init__(self, *args, **kwargs): # pylint: disable=W0613 - '''Initializer''' + """Initializer""" super(Results, self).__init__(args[0]) def all(self): - '''Return all''' + """Return all""" return self def first(self): - '''Return first''' + """Return first""" if len(self) > 0: return self[0] @six.add_metaclass(ABCMeta) class Base(object): - ''' A custom declarative base that provides some Elixir-inspired shortcuts. ''' + """ A custom declarative base that provides some Elixir-inspired shortcuts. """ __tablename__ = None @classproperty def query(cls): # pylint: disable=E0213 - '''Return a query object a la sqlalchemy''' + """Return a query object a la sqlalchemy""" return Query(cls) @classmethod def __kwargs(cls): - '''Return common keyword args''' + """Return common keyword args""" keyspace = conf.music.get('keyspace') kwargs = { 'keyspace': keyspace, @@ -124,33 +121,33 @@ class Base(object): @classmethod def create_table(cls): - '''Create table''' + """Create table""" kwargs = cls.__kwargs() kwargs['schema'] = cls.schema() conf.music.engine.create_table(**kwargs) @abstractclassmethod def schema(cls): - '''Return schema''' + """Return schema""" return cls() @abstractclassmethod def pk_name(cls): - '''Primary key name''' + """Primary key name""" return cls() @abstractmethod def pk_value(self): - '''Primary key value''' + """Primary key value""" pass @abstractmethod def values(self): - '''Values''' + """Values""" pass def insert(self): - '''Insert row''' + """Insert row""" kwargs = self.__kwargs() kwargs['values'] = self.values() pk_name = self.pk_name() @@ -161,7 +158,7 @@ class Base(object): conf.music.engine.create_row(**kwargs) def update(self): - '''Update row''' + """Update row""" kwargs = self.__kwargs() kwargs['pk_name'] = self.pk_name() kwargs['pk_value'] = self.pk_value() @@ -169,7 +166,7 @@ class Base(object): conf.music.engine.update_row_eventually(**kwargs) def delete(self): - '''Delete row''' + """Delete row""" kwargs = self.__kwargs() kwargs['pk_name'] = self.pk_name() kwargs['pk_value'] = self.pk_value() @@ -177,26 +174,26 @@ class Base(object): @classmethod def filter_by(cls, **kwargs): - '''Filter objects''' + """Filter objects""" return cls.query.filter_by(**kwargs) # pylint: disable=E1101 def flush(self, *args, **kwargs): - '''Flush changes to storage''' + """Flush changes to storage""" # TODO(JD): Implement in music? May be a no-op pass def as_dict(self): - '''Return object representation as a dictionary''' + """Return object representation as a dictionary""" return dict((k, v) for k, v in self.__dict__.items() if not k.startswith('_')) class Query(object): - '''Data Query''' + """Data Query""" model = None def __init__(self, model): - '''Initializer''' + """Initializer""" if inspect.isclass(model): self.model = model elif isinstance(model, basestring): @@ -204,7 +201,7 @@ class Query(object): assert inspect.isclass(self.model) def __kwargs(self): - '''Return common keyword args''' + """Return common keyword args""" keyspace = conf.music.get('keyspace') kwargs = { 'keyspace': keyspace, @@ -213,7 +210,7 @@ class Query(object): return kwargs def __rows_to_objects(self, rows): - '''Convert query response rows to objects''' + """Convert query response rows to objects""" results = [] pk_name = self.model.pk_name() # pylint: disable=E1101 for __, row in rows.iteritems(): # pylint: disable=W0612 @@ -224,13 +221,13 @@ class Query(object): return Results(results) def all(self): - '''Return all objects''' + """Return all objects""" kwargs = self.__kwargs() rows = conf.music.engine.read_all_rows(**kwargs) return self.__rows_to_objects(rows) def filter_by(self, **kwargs): - '''Filter objects''' + """Filter objects""" # Music doesn't allow filtering on anything but the primary key. # We need to get all items and then go looking for what we want. all_items = self.all() @@ -250,14 +247,14 @@ class Query(object): def init_model(): - '''Data Store Initialization''' + """Data Store Initialization""" conf.music.engine = _engine_from_config(conf.music) keyspace = conf.music.get('keyspace') conf.music.engine.create_keyspace(keyspace) def _engine_from_config(configuration): - '''Create database engine object based on configuration''' + """Create database engine object based on configuration""" configuration = dict(configuration) kwargs = { 'host': configuration.get('host'), @@ -268,36 +265,30 @@ def _engine_from_config(configuration): def start(): - '''Start transaction''' + """Start transaction""" pass def start_read_only(): - '''Start read-only transaction''' + """Start read-only transaction""" start() def commit(): - '''Commit transaction''' + """Commit transaction""" pass def rollback(): - '''Rollback transaction''' + """Rollback transaction""" pass def clear(): - '''Clear transaction''' + """Clear transaction""" pass def flush(): - '''Flush to disk''' + """Flush to disk""" pass - - -from valet.api.db.models.music.groups import Group -from valet.api.db.models.music.ostro import PlacementRequest, PlacementResult, Event -from valet.api.db.models.music.placements import Placement -from valet.api.db.models.music.plans import Plan diff --git a/valet/api/db/models/music/groups.py b/valet/api/db/models/music/groups.py index 33b5dec..6bf62fb 100644 --- a/valet/api/db/models/music/groups.py +++ b/valet/api/db/models/music/groups.py @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Group Model''' +"""Group Model.""" from . import Base import simplejson class Group(Base): - '''Group model''' + """Group model.""" + __tablename__ = 'groups' id = None # pylint: disable=C0103 @@ -31,7 +32,7 @@ class Group(Base): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'id': 'text', 'name': 'text', @@ -44,16 +45,16 @@ class Group(Base): @classmethod def pk_name(cls): - '''Primary key name''' + """Primary key name.""" return 'id' def pk_value(self): - '''Primary key value''' + """Primary key value.""" return self.id def values(self): - '''Values''' - # TODO(JD): Support lists in Music + """Values.""" + # TODO(UNKNOWN): Support lists in Music # Lists aren't directly supported in Music, so we have to # convert to/from json on the way out/in. return { @@ -64,7 +65,7 @@ class Group(Base): } def __init__(self, name, description, type, members, _insert=True): - '''Initializer''' + """Initializer.""" super(Group, self).__init__() self.name = name self.description = description or "" @@ -73,15 +74,15 @@ class Group(Base): self.members = [] # members ignored at init time self.insert() else: - # TODO(JD): Support lists in Music + # TODO(UNKNOWN): Support lists in Music self.members = simplejson.loads(members) def __repr__(self): - '''Object representation''' + """Object representation.""" return '' % self.name def __json__(self): - '''JSON representation''' + """JSON representation.""" json_ = {} json_['id'] = self.id json_['name'] = self.name diff --git a/valet/api/db/models/music/music.py b/valet/api/db/models/music/music.py index 97f37cf..d56e189 100644 --- a/valet/api/db/models/music/music.py +++ b/valet/api/db/models/music/music.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Music Data Store API''' +"""Music Data Store API.""" import json import logging @@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__) class REST(object): - '''Helper class for REST operations.''' + """Helper class for REST operations.""" hosts = None port = None @@ -37,8 +37,7 @@ class REST(object): _urls = None def __init__(self, hosts, port, path='/', timeout='10'): - '''Initializer. Accepts target host list, port, and path.''' - + """Initializer. Accepts target host list, port, and path.""" self.hosts = hosts # List of IP or FQDNs self.port = port # Port Number self.path = path # Path starting with / @@ -46,8 +45,7 @@ class REST(object): @property def urls(self): - '''Returns list of URLs using each host, plus the port/path.''' - + """Return list of URLs using each host, plus the port/path.""" if not self._urls: urls = [] for host in self.hosts: @@ -62,17 +60,19 @@ class REST(object): @staticmethod def __headers(content_type='application/json'): - '''Returns HTTP request headers.''' + """Return HTTP request headers.""" headers = { 'accept': content_type, 'content-type': content_type, } return headers - def request(self, method='get', content_type='application/json', path='/', data=None): - ''' Performs HTTP request ''' + def request(self, method='get', content_type='application/json', path='/', + data=None): + """Perform HTTP request.""" if method not in ('post', 'get', 'put', 'delete'): - raise KeyError(_("Method must be one of post, get, put, or delete.")) + raise KeyError(_("Method must be one of post, get, put, " + "or delete.")) method_fn = getattr(requests, method) response = None @@ -107,7 +107,8 @@ class REST(object): class Music(object): - '''Wrapper for Music API''' + """Wrapper for Music API.""" + lock_names = None # Cache of lock names created during session lock_timeout = None # Maximum time in seconds to acquire a lock @@ -116,8 +117,7 @@ class Music(object): def __init__(self, host=None, hosts=None, # pylint: disable=R0913 port='8080', lock_timeout=10, replication_factor=3): - '''Initializer. Accepts a lock_timeout for atomic operations.''' - + """Initializer. Accept a lock_timeout for atomic operations.""" # If one host is provided, that overrides the list if not hosts: hosts = ['localhost'] @@ -137,7 +137,7 @@ class Music(object): self.replication_factor = replication_factor def create_keyspace(self, keyspace): - '''Creates a keyspace.''' + """Create a keyspace.""" data = { 'replicationInfo': { 'class': 'SimpleStrategy', @@ -154,7 +154,7 @@ class Music(object): return response.ok def create_table(self, keyspace, table, schema): - '''Creates a table.''' + """Create a table.""" data = { 'fields': schema, 'consistencyInfo': { @@ -171,14 +171,14 @@ class Music(object): return response.ok def version(self): - '''Returns version string.''' + """Return version string.""" path = '/version' response = self.rest.request(method='get', content_type='text/plain', path=path) return response.text def create_row(self, keyspace, table, values): - '''Create a row.''' + """Create a row.""" data = { 'values': values, 'consistencyInfo': { @@ -194,14 +194,14 @@ class Music(object): return response.ok def create_lock(self, lock_name): - '''Returns the lock id. Use for acquiring and releasing.''' + """Return the lock id. Use for acquiring and releasing.""" path = '/locks/create/%s' % lock_name response = self.rest.request(method='post', content_type='text/plain', path=path) return response.text def acquire_lock(self, lock_id): - '''Acquire a lock.''' + """Acquire a lock.""" path = '/locks/acquire/%s' % lock_id response = self.rest.request(method='get', content_type='text/plain', path=path) @@ -209,7 +209,7 @@ class Music(object): return response.text.lower() == 'true' def release_lock(self, lock_id): - '''Release a lock.''' + """Release a lock.""" path = '/locks/release/%s' % lock_id response = self.rest.request(method='delete', content_type='text/plain', path=path) @@ -217,7 +217,7 @@ class Music(object): @staticmethod def __row_url_path(keyspace, table, pk_name, pk_value): - '''Returns a Music-compliant row URL path.''' + """Return a Music-compliant row URL path.""" path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { 'keyspace': keyspace, 'table': table, @@ -229,7 +229,7 @@ class Music(object): def update_row_eventually(self, keyspace, table, # pylint: disable=R0913 pk_name, pk_value, values): - '''Update a row. Not atomic.''' + """Update a row. Not atomic.""" data = { 'values': values, 'consistencyInfo': { @@ -243,8 +243,7 @@ class Music(object): def update_row_atomically(self, keyspace, table, # pylint: disable=R0913 pk_name, pk_value, values): - '''Update a row atomically.''' - + """Update a row atomically.""" # Create lock for the candidate. The Music API dictates that the # lock name must be of the form keyspace.table.primary_key lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % { @@ -279,7 +278,7 @@ class Music(object): return response.ok def delete_row_eventually(self, keyspace, table, pk_name, pk_value): - '''Delete a row. Not atomic.''' + """Delete a row. Not atomic.""" data = { 'consistencyInfo': { 'type': 'eventual', @@ -291,7 +290,7 @@ class Music(object): return response.ok def read_row(self, keyspace, table, pk_name, pk_value, log=None): - '''Read one row based on a primary key name/value.''' + """Read one row based on a primary key name/value.""" path = self.__row_url_path(keyspace, table, pk_name, pk_value) response = self.rest.request(path=path) if log: @@ -299,11 +298,11 @@ class Music(object): return response.json() def read_all_rows(self, keyspace, table): - '''Read all rows.''' + """Read all rows.""" return self.read_row(keyspace, table, pk_name=None, pk_value=None) def drop_keyspace(self, keyspace): - '''Drops a keyspace.''' + """Drop a keyspace.""" data = { 'consistencyInfo': { 'type': 'eventual', @@ -315,16 +314,15 @@ class Music(object): return response.ok def delete_lock(self, lock_name): - '''Deletes a lock by name.''' + """Delete a lock by name.""" path = '/locks/delete/%s' % lock_name response = self.rest.request(content_type='text/plain', method='delete', path=path) return response.ok def delete_all_locks(self): - '''Delete all locks created during the lifetime of this object.''' - - # TODO(JD): Shouldn't this really be part of internal cleanup? + """Delete all locks created during the lifetime of this object.""" + # TODO(UNKNOWN): Shouldn't this really be part of internal cleanup? # FIXME: It can be several API calls. Any way to do in one fell swoop? for lock_name in self.lock_names: self.delete_lock(lock_name) diff --git a/valet/api/db/models/music/ostro.py b/valet/api/db/models/music/ostro.py index 56062e6..fe51f8f 100644 --- a/valet/api/db/models/music/ostro.py +++ b/valet/api/db/models/music/ostro.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Ostro Models''' +"""Ostro Models.""" from . import Base class PlacementRequest(Base): - '''Placement Request Model''' + """Placement Request Model.""" + __tablename__ = 'placement_requests' stack_id = None @@ -27,7 +28,7 @@ class PlacementRequest(Base): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'stack_id': 'text', 'request': 'text', @@ -37,22 +38,22 @@ class PlacementRequest(Base): @classmethod def pk_name(cls): - '''Primary key name''' + """Primary key name.""" return 'stack_id' def pk_value(self): - '''Primary key value''' + """Primary key value.""" return self.stack_id def values(self): - '''Values''' + """Values.""" return { 'stack_id': self.stack_id, 'request': self.request, } def __init__(self, request, stack_id=None, _insert=True): - '''Initializer''' + """Initializer.""" super(PlacementRequest, self).__init__() self.stack_id = stack_id self.request = request @@ -60,11 +61,11 @@ class PlacementRequest(Base): self.insert() def __repr__(self): - '''Object representation''' + """Object representation.""" return '' % self.stack_id def __json__(self): - '''JSON representation''' + """JSON representation.""" json_ = {} json_['stack_id'] = self.stack_id json_['request'] = self.request @@ -72,7 +73,8 @@ class PlacementRequest(Base): class PlacementResult(Base): - '''Placement Result Model''' + """Placement Result Model.""" + __tablename__ = 'placement_results' stack_id = None @@ -80,7 +82,7 @@ class PlacementResult(Base): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'stack_id': 'text', 'placement': 'text', @@ -90,22 +92,22 @@ class PlacementResult(Base): @classmethod def pk_name(cls): - '''Primary key name''' + """Primary key name.""" return 'stack_id' def pk_value(self): - '''Primary key value''' + """Primary key value.""" return self.stack_id def values(self): - '''Values''' + """Values.""" return { 'stack_id': self.stack_id, 'placement': self.placement, } def __init__(self, placement, stack_id=None, _insert=True): - '''Initializer''' + """Initializer.""" super(PlacementResult, self).__init__() self.stack_id = stack_id self.placement = placement @@ -113,11 +115,11 @@ class PlacementResult(Base): self.insert() def __repr__(self): - '''Object representation''' + """Object representation.""" return '' % self.stack_id def __json__(self): - '''JSON representation''' + """JSON representation.""" json_ = {} json_['stack_id'] = self.stack_id json_['placement'] = self.placement @@ -125,7 +127,8 @@ class PlacementResult(Base): class Event(Base): - '''Event Model''' + """Event Model.""" + __tablename__ = 'events' event_id = None @@ -133,7 +136,7 @@ class Event(Base): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'event_id': 'text', 'event': 'text', @@ -143,22 +146,22 @@ class Event(Base): @classmethod def pk_name(cls): - '''Primary key name''' + """Primary key name.""" return 'event_id' def pk_value(self): - '''Primary key value''' + """Primary key value.""" return self.event_id def values(self): - '''Values''' + """Values.""" return { 'event_id': self.event_id, 'event': self.event, } def __init__(self, event, event_id=None, _insert=True): - '''Initializer''' + """Initializer.""" super(Event, self).__init__() self.event_id = event_id self.event = event @@ -166,11 +169,11 @@ class Event(Base): self.insert() def __repr__(self): - '''Object representation''' + """Object representation.""" return '' % self.event_id def __json__(self): - '''JSON representation''' + """JSON representation.""" json_ = {} json_['event_id'] = self.event_id json_['event'] = self.event diff --git a/valet/api/db/models/music/placements.py b/valet/api/db/models/music/placements.py index 701a238..3fe8ab1 100644 --- a/valet/api/db/models/music/placements.py +++ b/valet/api/db/models/music/placements.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Placement Model''' +"""Placement Model.""" from . import Base, Query class Placement(Base): - '''Placement Model''' + """Placement Model.""" + __tablename__ = 'placements' id = None # pylint: disable=C0103 @@ -32,7 +33,7 @@ class Placement(Base): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'id': 'text', 'name': 'text', @@ -47,15 +48,15 @@ class Placement(Base): @classmethod def pk_name(cls): - '''Primary key name''' + """Primary key name.""" return 'id' def pk_value(self): - '''Primary key value''' + """Primary key value.""" return self.id def values(self): - '''Values''' + """Values.""" return { 'name': self.name, 'orchestration_id': self.orchestration_id, @@ -67,7 +68,7 @@ class Placement(Base): def __init__(self, name, orchestration_id, resource_id=None, plan=None, plan_id=None, location=None, reserved=False, _insert=True): - '''Initializer''' + """Initializer.""" super(Placement, self).__init__() self.name = name self.orchestration_id = orchestration_id @@ -82,11 +83,11 @@ class Placement(Base): self.insert() def __repr__(self): - '''Object representation''' + """Object representation.""" return '' % self.name def __json__(self): - '''JSON representation''' + """JSON representation.""" json_ = {} json_['id'] = self.id json_['name'] = self.name diff --git a/valet/api/db/models/music/plans.py b/valet/api/db/models/music/plans.py index fcbe1b1..c1af6ff 100644 --- a/valet/api/db/models/music/plans.py +++ b/valet/api/db/models/music/plans.py @@ -13,13 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Plan Model''' +"""Plan Model.""" from . import Base, Query class Plan(Base): - '''Plan model''' + """Plan model.""" + __tablename__ = 'plans' id = None # pylint: disable=C0103 @@ -28,7 +29,7 @@ class Plan(Base): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'id': 'text', 'name': 'text', @@ -39,22 +40,22 @@ class Plan(Base): @classmethod def pk_name(cls): - '''Primary key name''' + """Primary key name.""" return 'id' def pk_value(self): - '''Primary key value''' + """Primary key value.""" return self.id def values(self): - '''Values''' + """Values.""" return { 'name': self.name, 'stack_id': self.stack_id, } def __init__(self, name, stack_id, _insert=True): - '''Initializer''' + """Initializer.""" super(Plan, self).__init__() self.name = name self.stack_id = stack_id @@ -62,9 +63,8 @@ class Plan(Base): self.insert() def placements(self): - '''Return list of placements''' - - # TODO(JD): Make this a property? + """Return list of placements.""" + # TODO(UNKNOWN): Make this a property? all_results = Query("Placement").all() results = [] for placement in all_results: @@ -74,15 +74,15 @@ class Plan(Base): @property def orchestration_ids(self): - '''Return list of orchestration IDs''' + """Return list of orchestration IDs.""" return list(set([p.orchestration_id for p in self.placements()])) def __repr__(self): - '''Object representation''' + """Object representation.""" return '' % self.name def __json__(self): - '''JSON representation''' + """JSON representation.""" json_ = {} json_['id'] = self.id json_['stack_id'] = self.stack_id diff --git a/valet/api/v1/commands/populate.py b/valet/api/v1/commands/populate.py index 6bb063c..ec97188 100644 --- a/valet/api/v1/commands/populate.py +++ b/valet/api/v1/commands/populate.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Populate command''' +"""Populate command.""" from pecan.commands.base import BaseCommand @@ -29,14 +29,15 @@ from valet.api.db.models import Plan def out(string): - '''Output helper''' + """Output helper.""" print("==> %s" % string) class PopulateCommand(BaseCommand): - '''Load a pecan environment and initializate the database.''' + """Load a pecan environment and initializate the database.""" def run(self, args): + """Function creates and initializes database and environment.""" super(PopulateCommand, self).run(args) out(_("Loading environment")) register_conf() diff --git a/valet/api/v1/controllers/__init__.py b/valet/api/v1/controllers/__init__.py index db56856..9cd9f40 100644 --- a/valet/api/v1/controllers/__init__.py +++ b/valet/api/v1/controllers/__init__.py @@ -1,22 +1,19 @@ -# -*- encoding: utf-8 -*- # -# Copyright (c) 2014-2016 AT&T +# Copyright 2014-2017 AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -'''Controllers Package''' +"""Controllers Package.""" import logging from notario.decorators import instance_of @@ -36,7 +33,7 @@ LOG = logging.getLogger(__name__) # def valid_group_name(value): - '''Validator for group name type.''' + """Validator for group name type.""" if not value or not set(value) <= set(string.letters + string.digits + "-._~"): LOG.error("group name is not valid") LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \ @@ -45,12 +42,12 @@ def valid_group_name(value): @instance_of((list, dict)) def valid_plan_resources(value): - '''Validator for plan resources.''' + """Validator for plan resources.""" ensure(len(value) > 0) def valid_plan_update_action(value): - '''Validator for plan update action.''' + """Validator for plan update action.""" assert value in ['update', 'migrate'], _("must be update or migrate") # @@ -59,7 +56,7 @@ def valid_plan_update_action(value): def set_placements(plan, resources, placements): - '''Set placements''' + """Set placements.""" for uuid in placements.iterkeys(): name = resources[uuid]['name'] properties = placements[uuid]['properties'] @@ -70,11 +67,11 @@ def set_placements(plan, resources, placements): def reserve_placement(placement, resource_id=None, reserve=True, update=True): - ''' Reserve placement. Can optionally set the physical resource id. + """Reserve placement. Can optionally set the physical resource id. Set reserve=False to unreserve. Set update=False to not update the data store (if the update will be made later). - ''' + """ if placement: LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'), {'rsrv': _("Reserving") if reserve else _("Unreserving"), @@ -92,7 +89,7 @@ def reserve_placement(placement, resource_id=None, reserve=True, update=True): def update_placements(placements, reserve_id=None, unlock_all=False): - '''Update placements. Optionally reserve one placement.''' + """Update placements. Optionally reserve one placement.""" for uuid in placements.iterkeys(): placement = Placement.query.filter_by( # pylint: disable=E1101 orchestration_id=uuid).first() @@ -119,7 +116,7 @@ def update_placements(placements, reserve_id=None, unlock_all=False): # def error(url, msg=None, **kwargs): - '''Error handler''' + """Error handler.""" if msg: request.context['error_message'] = msg if kwargs: diff --git a/valet/api/v1/controllers/errors.py b/valet/api/v1/controllers/errors.py index 794dfde..e0e3da0 100644 --- a/valet/api/v1/controllers/errors.py +++ b/valet/api/v1/controllers/errors.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Errors''' +"""Errors.""" import logging from pecan import expose, request, response @@ -26,10 +26,9 @@ LOG = logging.getLogger(__name__) def error_wrapper(func): - '''Error decorator.''' + """Error decorator.""" def func_wrapper(self, **kw): - '''Wrapper.''' - + """Wrapper.""" kwargs = func(self, **kw) status = status_map.get(response.status_code) message = getattr(status, 'explanation', '') @@ -56,12 +55,12 @@ def error_wrapper(func): # pylint: disable=W0613 class ErrorsController(object): - ''' Errors Controller /errors/{error_name} ''' + """Error Controller /errors/{error_name}.""" @expose('json') @error_wrapper def schema(self, **kw): - '''400''' + """400.""" request.context['error_message'] = str(request.validation_error) response.status = 400 return request.context.get('kwargs') @@ -69,13 +68,13 @@ class ErrorsController(object): @expose('json') @error_wrapper def invalid(self, **kw): - '''400''' + """400.""" response.status = 400 return request.context.get('kwargs') @expose() def unauthorized(self, **kw): - '''401''' + """401.""" # This error is terse and opaque on purpose. # Don't give any clues to help AuthN along. response.status = 401 @@ -92,21 +91,21 @@ class ErrorsController(object): @expose('json') @error_wrapper def forbidden(self, **kw): - '''403''' + """403.""" response.status = 403 return request.context.get('kwargs') @expose('json') @error_wrapper def not_found(self, **kw): - '''404''' + """404.""" response.status = 404 return request.context.get('kwargs') @expose('json') @error_wrapper def not_allowed(self, **kw): - '''405''' + """405.""" kwargs = request.context.get('kwargs') if kwargs: allow = kwargs.get('allow', None) @@ -118,20 +117,20 @@ class ErrorsController(object): @expose('json') @error_wrapper def conflict(self, **kw): - '''409''' + """409.""" response.status = 409 return request.context.get('kwargs') @expose('json') @error_wrapper def server_error(self, **kw): - '''500''' + """500.""" response.status = 500 return request.context.get('kwargs') @expose('json') @error_wrapper def unavailable(self, **kw): - '''503''' + """503.""" response.status = 503 return request.context.get('kwargs') diff --git a/valet/api/v1/controllers/groups.py b/valet/api/v1/controllers/groups.py index d189af4..3e05165 100644 --- a/valet/api/v1/controllers/groups.py +++ b/valet/api/v1/controllers/groups.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Groups''' +"""Groups.""" import logging @@ -48,7 +48,7 @@ MEMBERS_SCHEMA = ( def server_list_for_group(group): - '''Returns a list of VMs associated with a member/group.''' + """Return a list of VMs associated with a member/group.""" args = { "type": "group_vms", "parameters": { @@ -72,7 +72,7 @@ def server_list_for_group(group): def tenant_servers_in_group(tenant_id, group): - ''' Returns a list of servers the current tenant has in group_name ''' + """Return a list of servers the current tenant has in group_name.""" servers = [] server_list = server_list_for_group(group) nova = nova_client() @@ -89,20 +89,23 @@ def tenant_servers_in_group(tenant_id, group): def no_tenant_servers_in_group(tenant_id, group): - ''' Verify no servers from tenant_id are in group. + """Verify no servers from tenant_id are in group. Throws a 409 Conflict if any are found. - ''' + """ server_list = tenant_servers_in_group(tenant_id, group) if server_list: - error('/errors/conflict', _('Tenant Member {0} has servers in group "{1}": {2}').format(tenant_id, group.name, server_list)) + error('/errors/conflict', _('Tenant Member {0} has servers in group ' + '"{1}": {2}').format(tenant_id, + group.name, + server_list)) class MembersItemController(object): - ''' Members Item Controller /v1/groups/{group_id}/members/{member_id} ''' + """Member Item Controller /v1/groups/{group_id}/members/{member_id}.""" def __init__(self, member_id): - '''Initialize group member''' + """Initialize group member.""" group = request.context['group'] if member_id not in group.members: error('/errors/not_found', _('Member not found in group')) @@ -110,30 +113,30 @@ class MembersItemController(object): @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET,DELETE' @expose(generic=True, template='json') def index(self): - '''Catch all for unallowed methods''' + """Catch all for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Verify group member''' + """Verify group member.""" response.status = 204 @index.when(method='DELETE', template='json') def index_delete(self): - '''Delete group member''' + """Delete group member.""" group = request.context['group'] member_id = request.context['member_id'] @@ -146,34 +149,35 @@ class MembersItemController(object): class MembersController(object): - ''' Members Controller /v1/groups/{group_id}/members ''' + """Members Controller /v1/groups/{group_id}/members.""" @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'PUT,DELETE' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='PUT', template='json') @validate(MEMBERS_SCHEMA, '/errors/schema') def index_put(self, **kwargs): - '''Add one or more members to a group''' + """Add one or more members to a group.""" new_members = kwargs.get('members', None) if not conf.identity.engine.is_tenant_list_valid(new_members): - error('/errors/conflict', _('Member list contains invalid tenant IDs')) + error('/errors/conflict', _('Member list contains ' + 'invalid tenant IDs')) group = request.context['group'] group.members = list(set(group.members + new_members)) @@ -186,7 +190,7 @@ class MembersController(object): @index.when(method='DELETE', template='json') def index_delete(self): - '''Delete all group members''' + """Delete all group members.""" group = request.context['group'] # Can't delete a member if it has associated VMs. @@ -199,49 +203,50 @@ class MembersController(object): @expose() def _lookup(self, member_id, *remainder): - '''Pecan subcontroller routing callback''' + """Pecan subcontroller routing callback.""" return MembersItemController(member_id), remainder class GroupsItemController(object): - ''' Groups Item Controller /v1/groups/{group_id} ''' + """Group Item Controller /v1/groups/{group_id}.""" members = MembersController() def __init__(self, group_id): - '''Initialize group''' - group = Group.query.filter_by(id=group_id).first() # pylint: disable=E1101 + """Initialize group.""" + # pylint:disable=E1101 + group = Group.query.filter_by(id=group_id).first() if not group: error('/errors/not_found', _('Group not found')) request.context['group'] = group @classmethod def allow(cls): - ''' Allowed methods ''' + """Allowed methods.""" return 'GET,PUT,DELETE' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Display a group''' + """Display a group.""" return {"group": request.context['group']} @index.when(method='PUT', template='json') @validate(UPDATE_GROUPS_SCHEMA, '/errors/schema') def index_put(self, **kwargs): - '''Update a group''' + """Update a group.""" # Name and type are immutable. # Group Members are updated in MembersController. group = request.context['group'] @@ -255,7 +260,7 @@ class GroupsItemController(object): @index.when(method='DELETE', template='json') def index_delete(self): - '''Delete a group''' + """Delete a group.""" group = request.context['group'] if isinstance(group.members, list) and len(group.members) > 0: error('/errors/conflict', _('Unable to delete a Group with members.')) @@ -264,29 +269,29 @@ class GroupsItemController(object): class GroupsController(object): - ''' Groups Controller /v1/groups ''' + """Group Controller /v1/groups.""" @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET,POST' @expose(generic=True, template='json') def index(self): - '''Catch all for unallowed methods''' + """Catch all for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''List groups''' + """List groups.""" groups_array = [] for group in Group.query.all(): # pylint: disable=E1101 groups_array.append(group) @@ -295,7 +300,7 @@ class GroupsController(object): @index.when(method='POST', template='json') @validate(GROUPS_SCHEMA, '/errors/schema') def index_post(self, **kwargs): - '''Create a group''' + """Create a group.""" group_name = kwargs.get('name', None) description = kwargs.get('description', None) group_type = kwargs.get('type', None) @@ -314,5 +319,5 @@ class GroupsController(object): @expose() def _lookup(self, group_id, *remainder): - '''Pecan subcontroller routing callback''' + """Pecan subcontroller routing callback.""" return GroupsItemController(group_id), remainder diff --git a/valet/api/v1/controllers/placements.py b/valet/api/v1/controllers/placements.py index 7862630..00b5cbe 100644 --- a/valet/api/v1/controllers/placements.py +++ b/valet/api/v1/controllers/placements.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Placements''' +"""Placements.""" import logging @@ -32,50 +32,53 @@ LOG = logging.getLogger(__name__) class PlacementsItemController(object): - ''' Placements Item Controller /v1/placements/{placement_id} ''' + """Placements Item Controller /v1/placements/{placement_id}.""" def __init__(self, uuid4): - '''Initializer.''' + """Initializer.""" self.uuid = uuid4 - self.placement = Placement.query.filter_by(id=self.uuid).first() # pylint: disable=E1101 + self.placement = Placement.query.filter_by(id=self.uuid).first() + # pylint: disable=E1101 if not self.placement: - self.placement = Placement.query.filter_by(orchestration_id=self.uuid).first() # disable=E1101 + self.placement = Placement.query.filter_by( + orchestration_id=self.uuid).first() + # disable=E1101 if not self.placement: error('/errors/not_found', _('Placement not found')) request.context['placement_id'] = self.placement.id @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET,POST,DELETE' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - ''' Inspect a placement. + """Inspect a placement. Use POST for reserving placements made by a scheduler. - ''' + """ return {"placement": self.placement} @index.when(method='POST', template='json') def index_post(self, **kwargs): - ''' Reserve a placement. This and other placements may be replanned. + """Reserve a placement. This and other placements may be replanned. Once reserved, the location effectively becomes immutable. - ''' + """ res_id = kwargs.get('resource_id') LOG.info(_('Placement reservation request for resource id ' '%(res_id)s, orchestration id %(orch_id)s.'), @@ -122,7 +125,8 @@ class PlacementsItemController(object): # We may get one or more updated placements in return. # One of those will be the original placement # we are trying to reserve. - plan = Plan.query.filter_by(id=self.placement.plan_id).first() # pylint: disable=E1101 + plan = Plan.query.filter_by(id=self.placement.plan_id).first() + # pylint: disable=E1101 args = { "stack_id": plan.stack_id, @@ -151,7 +155,7 @@ class PlacementsItemController(object): @index.when(method='DELETE', template='json') def index_delete(self): - '''Delete a Placement''' + """Delete a Placement.""" orch_id = self.placement.orchestration_id self.placement.delete() LOG.info(_('Placement with orchestration id %s deleted.'), orch_id) @@ -159,29 +163,29 @@ class PlacementsItemController(object): class PlacementsController(object): - ''' Placements Controller /v1/placements ''' + """Placements Controller /v1/placements.""" @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Get placements.''' + """Get placements.""" placements_array = [] for placement in Placement.query.all(): # pylint: disable=E1101 placements_array.append(placement) @@ -189,5 +193,5 @@ class PlacementsController(object): @expose() def _lookup(self, uuid4, *remainder): - '''Pecan subcontroller routing callback''' + """Pecan subcontroller routing callback.""" return PlacementsItemController(uuid4), remainder diff --git a/valet/api/v1/controllers/plans.py b/valet/api/v1/controllers/plans.py index 839a182..0638dec 100644 --- a/valet/api/v1/controllers/plans.py +++ b/valet/api/v1/controllers/plans.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Plans''' +"""Plans.""" import logging @@ -48,19 +48,22 @@ UPDATE_SCHEMA = ( (decorators.optional('timeout'), types.string) ) + # pylint: disable=R0201 class PlansItemController(object): - ''' Plans Item Controller /v1/plans/{plan_id} ''' + """Plan Item Controller /v1/plans/{plan_id}.""" def __init__(self, uuid4): - '''Initializer.''' + """Initializer.""" self.uuid = uuid4 - self.plan = Plan.query.filter_by(id=self.uuid).first() # pylint: disable=E1101 + self.plan = Plan.query.filter_by(id=self.uuid).first() + # pylint: disable=E1101 if not self.plan: - self.plan = Plan.query.filter_by(stack_id=self.uuid).first() # pylint: disable=E1101 + self.plan = Plan.query.filter_by(stack_id=self.uuid).first() + # pylint: disable=E1101 if not self.plan: error('/errors/not_found', _('Plan not found')) @@ -68,32 +71,31 @@ class PlansItemController(object): @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET,PUT,DELETE' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Get plan''' + """Get plan.""" return {"plan": self.plan} @index.when(method='PUT', template='json') @validate(UPDATE_SCHEMA, '/errors/schema') def index_put(self, **kwargs): - '''Update a Plan''' - + """Update a Plan.""" action = kwargs.get('action') if action == 'migrate': # Replan the placement of an existing resource. @@ -102,17 +104,24 @@ class PlansItemController(object): # TODO(JD): Support replan of more than one existing resource if not isinstance(resources, list) or len(resources) != 1: - error('/errors/invalid', _('resources must be a list of length 1.')) + error('/errors/invalid', + _('resources must be a list of length 1.')) # We either got a resource or orchestration id. the_id = resources[0] - placement = Placement.query.filter_by(resource_id=the_id).first() # pylint: disable=E1101 + placement = Placement.query.filter_by(resource_id=the_id).first() + # pylint: disable=E1101 if not placement: - placement = Placement.query.filter_by(orchestration_id=the_id).first() # pylint: disable=E1101 + placement = Placement.query.filter_by( + orchestration_id=the_id).first() # pylint: disable=E1101 if not placement: - error('/errors/invalid', _('Unknown resource or orchestration id: %s') % the_id) + error('/errors/invalid', _('Unknown resource or ' + 'orchestration id: %s') % the_id) + + LOG.info(_('Migration request for resource id {0}, ' + 'orchestration id {1}.').format( + placement.resource_id, placement.orchestration_id)) - LOG.info(_('Migration request for resource id {0}, orchestration id {1}.').format(placement.resource_id, placement.orchestration_id)) args = { "stack_id": self.plan.stack_id, "excluded_hosts": excluded_hosts, @@ -136,7 +145,8 @@ class PlansItemController(object): # Flush so that the DB is current. self.plan.flush() - self.plan = Plan.query.filter_by(stack_id=self.plan.stack_id).first() # pylint: disable=E1101 + self.plan = Plan.query.filter_by( + stack_id=self.plan.stack_id).first() # pylint: disable=E1101 LOG.info(_('Plan with stack id %s updated.'), self.plan.stack_id) return {"plan": self.plan} @@ -186,7 +196,7 @@ class PlansItemController(object): @index.when(method='DELETE', template='json') def index_delete(self): - '''Delete a Plan''' + """Delete a Plan.""" for placement in self.plan.placements(): placement.delete() stack_id = self.plan.stack_id @@ -196,29 +206,29 @@ class PlansItemController(object): class PlansController(object): - ''' Plans Controller /v1/plans ''' + """Plans Controller /v1/plans.""" @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET,POST' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Get all the plans''' + """Get all the plans.""" plans_array = [] for plan in Plan.query.all(): # pylint: disable=E1101 plans_array.append(plan) @@ -227,7 +237,7 @@ class PlansController(object): @index.when(method='POST', template='json') @validate(CREATE_SCHEMA, '/errors/schema') def index_post(self): - '''Create a Plan''' + """Create a Plan.""" ostro = Ostro() args = request.json @@ -277,5 +287,5 @@ class PlansController(object): @expose() def _lookup(self, uuid4, *remainder): - '''Pecan subcontroller routing callback''' + """Pecan subcontroller routing callback.""" return PlansItemController(uuid4), remainder diff --git a/valet/api/v1/controllers/root.py b/valet/api/v1/controllers/root.py index bdd5645..1df6d2f 100644 --- a/valet/api/v1/controllers/root.py +++ b/valet/api/v1/controllers/root.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Root''' +"""Root.""" import logging @@ -31,32 +31,32 @@ LOG = logging.getLogger(__name__) class RootController(object): - ''' Root Controller / ''' + """Root Controller.""" errors = ErrorsController() v1 = V1Controller() # pylint: disable=C0103 @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Get canonical URL for each version''' + """Get canonical URL for each version.""" ver = { "versions": [ @@ -78,7 +78,7 @@ class RootController(object): @error_wrapper def error(self, status): - '''Error handler''' + """Error handler.""" try: status = int(status) except ValueError: # pragma: no cover diff --git a/valet/api/v1/controllers/status.py b/valet/api/v1/controllers/status.py index e86baf5..cdf2c28 100644 --- a/valet/api/v1/controllers/status.py +++ b/valet/api/v1/controllers/status.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Status''' +"""Status.""" import logging @@ -28,11 +28,11 @@ LOG = logging.getLogger(__name__) class StatusController(object): - ''' Status Controller /v1/status ''' + """Status Controller /v1/status.""" @classmethod def _ping_ostro(cls): - '''Ping Ostro''' + """Ping Ostro.""" ostro = Ostro() ostro.ping() ostro.send() @@ -40,7 +40,7 @@ class StatusController(object): @classmethod def _ping(cls): - '''Ping each subsystem.''' + """Ping each subsystem.""" ostro_response = StatusController._ping_ostro() # TODO(JD): Ping Music plus any others. @@ -54,32 +54,31 @@ class StatusController(object): @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'HEAD,GET' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='HEAD', template='json') def index_head(self): - '''Ping each subsystem and return summary response''' + """Ping each subsystem and return summary response.""" self._ping() # pylint: disable=W0612 response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Ping each subsystem and return detailed response''' - + """Ping each subsystem and return detailed response.""" _response = self._ping() response.status = 200 return _response diff --git a/valet/api/v1/controllers/v1.py b/valet/api/v1/controllers/v1.py index 05588ca..c88e095 100644 --- a/valet/api/v1/controllers/v1.py +++ b/valet/api/v1/controllers/v1.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''v1''' +"""v1.""" import logging @@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__) class V1Controller(SecureController): - ''' v1 Controller /v1 ''' + """v1 Controller /v1.""" groups = GroupsController() placements = PlacementsController() @@ -46,7 +46,7 @@ class V1Controller(SecureController): @classmethod def check_permissions(cls): - '''SecureController permission check callback''' + """SecureController permission check callback.""" token = None auth_token = request.headers.get('X-Auth-Token') msg = "Unauthorized - No auth token" @@ -74,7 +74,10 @@ class V1Controller(SecureController): @classmethod def _action_is_migrate(cls, request): - return "plan" in request.path and hasattr(request, "json") and "action" in request.json and request.json["action"] == "migrate" + return "plan" in request.path \ + and hasattr(request, "json") \ + and "action" in request.json \ + and request.json["action"] == "migrate" @classmethod def _permission_granted(cls, request, token): @@ -84,25 +87,25 @@ class V1Controller(SecureController): @classmethod def allow(cls): - '''Allowed methods''' + """Allowed methods.""" return 'GET' @expose(generic=True, template='json') def index(self): - '''Catchall for unallowed methods''' + """Catchall for unallowed methods.""" message = _('The %s method is not allowed.') % request.method kwargs = {'allow': self.allow()} error('/errors/not_allowed', message, **kwargs) @index.when(method='OPTIONS', template='json') def index_options(self): - '''Options''' + """Index Options.""" response.headers['Allow'] = self.allow() response.status = 204 @index.when(method='GET', template='json') def index_get(self): - '''Get canonical URL for each endpoint''' + """Get canonical URL for each endpoint.""" links = [] for endpoint in V1Controller.endpoints: links.append({ diff --git a/valet/api/wsgi.py b/valet/api/wsgi.py index c9632ff..fe89b94 100644 --- a/valet/api/wsgi.py +++ b/valet/api/wsgi.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''WSGI Wrapper''' +"""WSGI Wrapper.""" from common.i18n import _ import os @@ -21,7 +21,7 @@ from pecan.deploy import deploy def config_file(file_name=None): - """Returns absolute location of the config file""" + """Return absolute location of the config file.""" file_name = file_name or 'config.py' _file = os.path.abspath(__file__) @@ -32,7 +32,7 @@ def config_file(file_name=None): def application(environ, start_response): - """Returns a WSGI app object""" + """Return a WSGI app object.""" wsgi_app = deploy(config_file('prod.py')) return wsgi_app(environ, start_response) @@ -45,7 +45,8 @@ if __name__ == '__main__': from valet.api.conf import register_conf, set_domain register_conf() set_domain() - HTTPD = make_server('', 8090, deploy(config_file('/var/www/valet/config.py'))) + HTTPD = make_server('', 8090, + deploy(config_file('/var/www/valet/config.py'))) print(_("Serving HTTP on port 8090...")) # Respond to requests until process is killed diff --git a/valet/cli/groupcli.py b/valet/cli/groupcli.py index beb4b6a..7d2b77b 100644 --- a/valet/cli/groupcli.py +++ b/valet/cli/groupcli.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Group Cli.""" + import argparse import json from oslo_config import cfg @@ -23,81 +25,120 @@ CONF = cfg.CONF class ResponseError(Exception): + """Response Error Exception.""" + pass class ConnectionError(Exception): + """Connection Error Exception.""" + pass def print_verbose(verbose, url, headers, body, rest_cmd, timeout): + """Print verbose data.""" + # TODO(Chris Martin): Replace prints with logs if verbose: - print("Sending Request:\nurl: %s\nheaders: %s\nbody: %s\ncmd: %s\ntimeout: %d\n" - % (url, headers, body, rest_cmd.__name__ if rest_cmd is not None else None, timeout)) + print("Sending Request:\nurl: %s\nheaders: " + "%s\nbody: %s\ncmd: %s\ntimeout: %d\n" + % (url, headers, body, + rest_cmd.__name__ if rest_cmd is not None else None, timeout)) def pretty_print_json(json_thing, sort=True, indents=4): + """Print parser in nice format.""" + # TODO(Chris Martin): Replace prints with logs if type(json_thing) is str: - print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + print(json.dumps(json.loads(json_thing), sort_keys=sort, + indent=indents)) else: print(json.dumps(json_thing, sort_keys=sort, indent=indents)) return None def add_to_parser(service_sub): + """Return parser.""" parser = service_sub.add_parser('group', help='Group Management', - formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, - width=120)) + formatter_class=lambda + prog: argparse.HelpFormatter( + prog, max_help_position=30, + width=120)) parser.add_argument('--version', action='version', version='%(prog)s 1.1') - parser.add_argument('--timeout', type=int, help='Set request timeout in seconds (default: 10)') - parser.add_argument('--host', type=str, help='Hostname or ip of valet server') + parser.add_argument('--timeout', type=int, + help='Set request timeout in seconds (default: 10)') + parser.add_argument('--host', type=str, + help='Hostname or ip of valet server') parser.add_argument('--port', type=str, help='Port number of valet server') parser.add_argument('--os-tenant-name', type=str, help='Tenant name') - parser.add_argument('--os-user-name', dest='os_username', type=str, help='Username') + parser.add_argument('--os-user-name', dest='os_username', type=str, + help='Username') parser.add_argument('--os-password', type=str, help="User's password") - parser.add_argument('--verbose', '-v', help='Show details', action="store_true") + parser.add_argument('--verbose', '-v', help='Show details', + action="store_true") subparsers = parser.add_subparsers(dest='subcmd', metavar='') # create group - parser_create_group = subparsers.add_parser('create', help='Create new group.') + parser_create_group = subparsers.add_parser('create', + help='Create new group.') parser_create_group.add_argument('name', type=str, help='') - parser_create_group.add_argument('type', type=str, help=' (exclusivity)') - parser_create_group.add_argument('--description', type=str, help='') + parser_create_group.add_argument('type', type=str, + help=' (exclusivity)') + parser_create_group.add_argument('--description', type=str, + help='') # delete group - parser_delete_group = subparsers.add_parser('delete', help='Delete specified group.') + parser_delete_group = subparsers.add_parser('delete', + help='Delete specified group.') parser_delete_group.add_argument('groupid', type=str, help='') # delete group member - parser_delete_group_member = subparsers.add_parser('delete-member', help='Delete members from specified group.') - parser_delete_group_member.add_argument('groupid', type=str, help='') - parser_delete_group_member.add_argument('memberid', type=str, help='') + parser_delete_group_member = subparsers.add_parser('delete-member', + help='Delete member from' + 'specified group.') + parser_delete_group_member.add_argument('groupid', type=str, + help='') + parser_delete_group_member.add_argument('memberid', type=str, + help='') # delete all group members - parser_delete_all_group_members = subparsers.add_parser('delete-all-members', help='Delete all members from ' - 'specified group.') - parser_delete_all_group_members.add_argument('groupid', type=str, help='') + parser_delete_all_group_members = subparsers.add_parser( + 'delete-all-members', help='Delete all members from ' + 'specified group.') + parser_delete_all_group_members.add_argument('groupid', type=str, + help='') # list group subparsers.add_parser('list', help='List all groups.') # show group details - parser_show_group_details = subparsers.add_parser('show', help='Show details about the given group.') - parser_show_group_details.add_argument('groupid', type=str, help='') + parser_show_group_details = subparsers.add_parser('show', + help='Show details about' + 'the given group.') + parser_show_group_details.add_argument('groupid', type=str, + help='') # update group - parser_update_group = subparsers.add_parser('update', help='Update group description.') + parser_update_group = subparsers.add_parser('update', + help='Update group' + 'description.') parser_update_group.add_argument('groupid', type=str, help='') - parser_update_group.add_argument('--description', type=str, help='') + parser_update_group.add_argument('--description', type=str, + help='') - parser_update_group_members = subparsers.add_parser('update-member', help='Update group members.') - parser_update_group_members.add_argument('groupid', type=str, help='') - parser_update_group_members.add_argument('members', type=str, help='') + parser_update_group_members = subparsers.add_parser('update-member', + help='Update' + 'group members.') + parser_update_group_members.add_argument('groupid', type=str, + help='') + parser_update_group_members.add_argument('members', type=str, + help='') return parser def cmd_details(args): + """Command details.""" if args.subcmd == 'create': return requests.post, '' elif args.subcmd == 'update': @@ -105,21 +146,25 @@ def cmd_details(args): elif args.subcmd == 'update-member': return requests.put, '/%s/members' % args.groupid elif args.subcmd == 'delete': - return requests.delete, '/%s' % (args.groupid) + return requests.delete, '/%s' % args.groupid elif args.subcmd == 'delete-all-members': - return requests.delete, '/%s/members' % (args.groupid) + return requests.delete, '/%s/members' % args.groupid elif args.subcmd == 'delete-member': return requests.delete, '/%s/members/%s' % (args.groupid, args.memberid) elif args.subcmd == 'show': - return requests.get, '/%s' % (args.groupid) + return requests.get, '/%s' % args.groupid elif args.subcmd == 'list': return requests.get, '' def get_token(timeout, args): - tenant_name = args.os_tenant_name if args.os_tenant_name else CONF.identity.project_name - auth_name = args.os_username if args.os_username else CONF.identity.username - password = args.os_password if args.os_password else CONF.identity.password + """Return JSON of access token id.""" + tenant_name = args.os_tenant_name if args.os_tenant_name \ + else CONF.identity.project_name + auth_name = args.os_username if args.os_username \ + else CONF.identity.username + password = args.os_password if args.os_password \ + else CONF.identity.password headers = { 'Content-Type': 'application/json', } @@ -149,19 +194,23 @@ def get_token(timeout, args): def populate_args_request_body(args): + """Return JSON of filtered body dictionary.""" body_args_list = ['name', 'type', 'description', 'members'] - # assign values to dictionary (if val exist). members will be assign as a list + # assign values to dict (if val exist) members will be assign as a list body_dict = {} for body_arg in body_args_list: if hasattr(args, body_arg): - body_dict[body_arg] = getattr(args, body_arg) if body_arg != 'members' else [getattr(args, body_arg)] + body_dict[body_arg] = getattr(args, body_arg) \ + if body_arg != 'members' else [getattr(args, body_arg)] # remove keys without values - filtered_body_dict = dict((k, v) for k, v in body_dict.iteritems() if v is not None) + filtered_body_dict = dict( + (k, v) for k, v in body_dict.iteritems() if v is not None) # check if dictionary is not empty, convert body dictionary to json format return json.dumps(filtered_body_dict) if bool(filtered_body_dict) else None def run(args): + """Run.""" register_conf() set_domain(project='valet') args.host = args.host or CONF.server.host @@ -177,23 +226,27 @@ def run(args): args.body = populate_args_request_body(args) try: - print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd, args.timeout) + print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd, + args.timeout) if args.body: - resp = rest_cmd(args.url, timeout=args.timeout, data=args.body, headers=args.headers) + resp = rest_cmd(args.url, timeout=args.timeout, data=args.body, + headers=args.headers) else: - resp = rest_cmd(args.url, timeout=args.timeout, headers=args.headers) + resp = rest_cmd(args.url, timeout=args.timeout, + headers=args.headers) except Exception as e: print(e) exit(1) if not 200 <= resp.status_code < 300: content = resp.json() if resp.status_code == 500 else '' - print('API error: %s %s (Reason: %d)\n%s' % (rest_cmd.func_name.upper(), args.url, resp.status_code, content)) + print('API error: %s %s (Reason: %d)\n%s' % ( + rest_cmd.func_name.upper(), args.url, resp.status_code, content)) exit(1) try: if resp.content: rj = resp.json() pretty_print_json(rj) except Exception as e: - print (e) + print(e) exit(1) diff --git a/valet/cli/valetcli.py b/valet/cli/valetcli.py index a925b04..fab5e2b 100755 --- a/valet/cli/valetcli.py +++ b/valet/cli/valetcli.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Valet cli.""" + import argparse import sys import valet.cli.groupcli as groupcli @@ -20,27 +22,36 @@ import valet.cli.groupcli as groupcli class Cli(object): + """Cli.""" + def __init__(self): + """Init cli.""" self.args = None self.submod = None self.parser = None def create_parser(self): - self.parser = argparse.ArgumentParser(prog='valet', description='VALET REST CLI') - service_sub = self.parser.add_subparsers(dest='service', metavar='') + """Create parser.""" + self.parser = argparse.ArgumentParser(prog='valet', + description='VALET REST CLI') + service_sub = self.parser.add_subparsers(dest='service', + metavar='') self.submod = {'group': groupcli} for s in self.submod.values(): s.add_to_parser(service_sub) def parse(self, argv=sys.argv): + """Parse args.""" sys.argv = argv self.args = self.parser.parse_args() def logic(self): + """Logic.""" self.submod[self.args.service].run(self.args) def main(argv): + """Main.""" cli = Cli() cli.create_parser() cli.parse(argv) diff --git a/valet/engine/conf.py b/valet/engine/conf.py index af66ba1..fd6a948 100644 --- a/valet/engine/conf.py +++ b/valet/engine/conf.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Conf.""" + from oslo_config import cfg from valet.api import conf as api @@ -28,53 +30,178 @@ ostro_cli_opts = [ engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf') engine_opts = [ - cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'), - cfg.StrOpt('mode', default='live', - help='sim will let Ostro simulate datacenter, while live will let it handle a real datacenter'), - cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'), - cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'), - cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'), - cfg.StrOpt('ip', default='localhost'), - cfg.IntOpt('priority', default=1, help='this instance priority (master=1)'), - cfg.StrOpt('rpc_server_ip', default='localhost', - help='Set RPC server ip and port if used. Otherwise, ignore these parameters'), - cfg.StrOpt('rpc_server_port', default='8002'), - cfg.StrOpt('logger_name', default='engine.log'), - cfg.StrOpt('logging_level', default='debug'), - cfg.StrOpt('logging_dir', default='/var/log/valet/'), - cfg.StrOpt('max_main_log_size', default=5000000), - cfg.IntOpt('max_log_size', default=1000000), - cfg.IntOpt('max_num_of_logs', default=20), - cfg.StrOpt('datacenter_name', default='bigsite', - help='Inform the name of datacenter (region name), where Valet/Ostro is deployed.'), - cfg.IntOpt('num_of_region_chars', default='3', help='number of chars that indicates the region code'), - cfg.StrOpt('rack_code_list', default='r', help='rack indicator.'), - cfg.ListOpt('node_code_list', default='a,c,u,f,o,p,s', - help='indicates the node type. a: network, c KVM compute, u: ESXi compute, f: ?, o: operation, ' - 'p: power, s: storage.'), - cfg.StrOpt('compute_trigger_time', default='1:00', - help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'), - cfg.IntOpt('compute_trigger_frequency', default=3600, - help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'), - cfg.StrOpt('topology_trigger_time', default='2:00', - help='Set trigger time or frequency for checking datacenter topology'), - cfg.IntOpt('topology_trigger_frequency', default=3600, - help='Set trigger time or frequency for checking datacenter topology'), - cfg.IntOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. ' - 'Note that each compute node can have its own ratios'), - cfg.IntOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. ' - 'Note that each compute node can have its own ratios'), - cfg.IntOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. ' - 'Note that each compute node can have its own ratios'), - cfg.IntOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' - 'that are set aside for applications workload spikes.'), - cfg.IntOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' - 'that are set aside for applications workload spikes.'), - cfg.IntOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' - 'that are set aside for applications workload spikes.'), + cfg.StrOpt( + 'pid', + default='/var/run/valet/ostro-daemon.pid' + ), + cfg.StrOpt( + 'mode', + default='live', + help=""" +Sim will let Ostro simulate datacenter, while live will +let it handle a real datacenter. +"""), + cfg.StrOpt( + 'sim_cfg_loc', + default='/etc/valet/engine/ostro_sim.cfg'), + cfg.BoolOpt( + 'network_control', + default=False, + help=""" +Whether network controller (i.e., Tegu) has been deployed +"""), + cfg.StrOpt( + 'network_control_url', + default='http://network_control:29444/tegu/api'), + cfg.StrOpt( + 'ip', + default='localhost'), + cfg.IntOpt( + 'priority', + default=1, + help=""" +This instance priority (master=1) +"""), + cfg.StrOpt( + 'rpc_server_ip', + default='localhost', + help=""" +Set RPC server ip and port if used. Otherwise, ignore these parameters +"""), + cfg.StrOpt( + 'rpc_server_port', + default='8002' + ), + cfg.StrOpt( + 'logger_name', + default='engine.log' + ), + cfg.StrOpt( + 'logging_level', + default='debug' + ), + cfg.StrOpt( + 'logging_dir', + default='/var/log/valet/' + ), + cfg.StrOpt( + 'max_main_log_size', + default=5000000 + ), + cfg.IntOpt( + 'max_log_size', + default=1000000 + ), + cfg.IntOpt( + 'max_num_of_logs', + default=20 + ), + cfg.StrOpt( + 'datacenter_name', + default='bigsite', + help=""" +Inform the name of datacenter (region name), where Valet/Ostro is deployed. +"""), + cfg.IntOpt( + 'num_of_region_chars', + default='3', + help=""" +Number of chars that indicates the region code +"""), + cfg.StrOpt( + 'rack_code_list', + default='r', + help=""" +Rack indicator. +"""), + cfg.ListOpt( + 'node_code_list', + default='a,c,u,f,o,p,s', + help=""" +Indicates the node type. + +Values: + +* a: network +* c KVM compute +* u: ESXi compute +* f: ? +* o: operation +* p: power +* s: storage. +"""), + cfg.StrOpt( + 'compute_trigger_time', + default='1:00', + help=""" +Trigger time or frequency for checking compute hosting server status +(i.e., call Nova) +"""), + cfg.IntOpt( + 'compute_trigger_frequency', + default=3600, + help=""" +Trigger time or frequency for checking compute hosting server status +(i.e., call Nova). +"""), + cfg.StrOpt( + 'topology_trigger_time', + default='2:00', + help=""" +Set trigger time or frequency for checking datacenter topology. +"""), + cfg.IntOpt( + 'topology_trigger_frequency', + default=3600, + help=""" +Set trigger time or frequency for checking datacenter topology. +"""), + cfg.IntOpt( + 'default_cpu_allocation_ratio', + default=16, + help=""" +Set default overbooking ratios. +Note that each compute node can have its own ratios. +"""), + cfg.IntOpt( + 'default_ram_allocation_ratio', + default=1.5, + help=""" +Set default overbooking ratios. +Note that each compute node can have its own ratios. +"""), + cfg.IntOpt( + 'default_disk_allocation_ratio', + default=1, + help=""" +Set default overbooking ratios. +Note that each compute node can have its own ratios. +"""), + cfg.IntOpt( + 'static_cpu_standby_ratio', + default=20, + help=""" +Unused percentages of resources (i.e. standby) that are set +aside for applications workload spikes. +"""), + cfg.IntOpt( + 'static_mem_standby_ratio', + default=20, + help=""" +Unused percentages of resources (i.e. standby) that are set +aside for applications workload spikes. +"""), + cfg.IntOpt( + 'static_local_disk_standby_ratio', + default=20, + help=""" +Unused percentages of resources (i.e. standby) that are set +aside for applications workload spikes. +"""), ] -listener_group = cfg.OptGroup(name='events_listener', title='Valet Engine listener') +listener_group = cfg.OptGroup(name='events_listener', + title='Valet Engine listener') listener_opts = [ cfg.StrOpt('exchange', default='nova'), cfg.StrOpt('exchange_type', default='topic'), @@ -89,6 +216,7 @@ listener_opts = [ def register_conf(): + """Function calls api and registers configs opts.""" api.register_conf() CONF.register_group(engine_group) CONF.register_opts(engine_opts, engine_group) diff --git a/valet/engine/listener/listener_manager.py b/valet/engine/listener/listener_manager.py index 25e2b6f..3e1944f 100644 --- a/valet/engine/listener/listener_manager.py +++ b/valet/engine/listener/listener_manager.py @@ -1,18 +1,20 @@ # # Copyright 2015-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Listener Manager.""" + from datetime import datetime import json import pika @@ -26,8 +28,10 @@ import yaml class ListenerManager(threading.Thread): + """Listener Manager Thread Class.""" def __init__(self, _t_id, _t_name, _config): + """Init.""" threading.Thread.__init__(self) self.thread_id = _t_id self.thread_name = _t_name @@ -36,14 +40,16 @@ class ListenerManager(threading.Thread): self.MUSIC = None def run(self): - '''Entry point + """Entry point. - Connect to localhost rabbitmq servers, use username:password@ipaddress:port. - The port is typically 5672, and the default username and password are guest and guest. - credentials = pika.PlainCredentials("guest", "PASSWORD") - ''' + Connect to localhost rabbitmq servers, use + username:password@ipaddress:port. The port is typically 5672, + and the default username and password are guest and guest. + credentials = pika.PlainCredentials("guest", "PASSWORD"). + """ try: - self.listener_logger.info("ListenerManager: start " + self.thread_name + " ......") + self.listener_logger.info("ListenerManager: start " + + self.thread_name + " ......") if self.config.events_listener.store: @@ -54,12 +60,20 @@ class ListenerManager(threading.Thread): } engine = Music(**kwargs) engine.create_keyspace(self.config.music.keyspace) - self.MUSIC = {'engine': engine, 'keyspace': self.config.music.keyspace} - self.listener_logger.debug('Storing in music on %s, keyspace %s' % (self.config.music.host, self.config.music.keyspace)) + self.MUSIC = {'engine': engine, + 'keyspace': self.config.music.keyspace} + self.listener_logger.debug('Storing in music on %s, keyspace %s' + % (self.config.music.host, + self.config.music.keyspace)) - self.listener_logger.debug('Connecting to %s, with %s' % (self.config.messaging.host, self.config.messaging.username)) - credentials = pika.PlainCredentials(self.config.messaging.username, self.config.messaging.password) - parameters = pika.ConnectionParameters(self.config.messaging.host, self.config.messaging.port, '/', credentials) + self.listener_logger.debug('Connecting to %s, with %s' % + (self.config.messaging.host, + self.config.messaging.username)) + credentials = pika.PlainCredentials(self.config.messaging.username, + self.config.messaging.password) + parameters = pika.ConnectionParameters(self.config.messaging.host, + self.config.messaging.port, + '/', credentials) connection = pika.BlockingConnection(parameters) channel = connection.channel() @@ -73,9 +87,9 @@ class ListenerManager(threading.Thread): # to receive. '#' is a wild card -- meaning receive all messages binding_key = "#" - # Check whether or not an exchange with the given name and type exists. + # Check whether an exchange with the given name and type exists. # Make sure that the exchange is multicast "fanout" or "topic" type - # otherwise our queue will consume the messages intended for other queues + # otherwise queue will consume messages intended for other queues channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, auto_delete=auto_delete) @@ -85,8 +99,11 @@ class ListenerManager(threading.Thread): queue_name = result.method.queue # Bind the queue to the selected exchange - channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=binding_key) - self.listener_logger.info('Channel is bound, listening on %s exchange %s', self.config.messaging.host, self.config.events_listener.exchange) + channel.queue_bind(exchange=exchange_name, queue=queue_name, + routing_key=binding_key) + self.listener_logger.info('Channel is bound,listening on%s ' + 'exchange %s', self.config.messaging.host, + self.config.events_listener.exchange) # Start consuming messages channel.basic_consume(self.on_message, queue_name) @@ -103,8 +120,9 @@ class ListenerManager(threading.Thread): channel.close() connection.close() - def on_message(self, channel, method_frame, _, body): # pylint: disable=W0613 - '''Specify the action to be taken on a message received''' + def on_message(self, channel, method_frame, _, body): + """Specify the action to be taken on a message received.""" + # pylint: disable=W0613 message = yaml.load(body) try: if 'oslo.message' in message.keys(): @@ -115,12 +133,14 @@ class ListenerManager(threading.Thread): else: return - self.listener_logger.debug("\nMessage No: %s\n", method_frame.delivery_tag) + self.listener_logger.debug("\nMessage No: %s\n", + method_frame.delivery_tag) message_obj = yaml.load(body) if 'oslo.message' in message_obj.keys(): message_obj = yaml.load(message_obj['oslo.message']) if self.config.events_listener.output_format == 'json': - self.listener_logger.debug(json.dumps(message_obj, sort_keys=True, indent=2)) + self.listener_logger.debug(json.dumps(message_obj, + sort_keys=True, indent=2)) elif self.config.events_listener.output_format == 'yaml': self.listener_logger.debug(yaml.dump(message_obj)) else: @@ -131,25 +151,34 @@ class ListenerManager(threading.Thread): return def is_message_wanted(self, message): - ''' Based on markers from Ostro, determine if this is a wanted message. ''' + """Based on markers from Ostro. + + Determine if this is a wanted message. + """ method = message.get('method', None) args = message.get('args', None) - nova_props = {'nova_object.changes', 'nova_object.data', 'nova_object.name'} + nova_props = {'nova_object.changes', 'nova_object.data', + 'nova_object.name'} args_props = {'filter_properties', 'instance'} is_data = method and args - is_nova = is_data and 'objinst' in args and nova_props.issubset(args['objinst']) + is_nova = is_data and 'objinst' in args \ + and nova_props.issubset(args['objinst']) - action_instance = is_nova and method == 'object_action' and self.is_nova_name(args) and self.is_nova_state(args) + action_instance = is_nova and method == 'object_action' \ + and self.is_nova_name(args) \ + and self.is_nova_state(args) action_compute = is_nova and self.is_compute_name(args) - create_instance = is_data and method == 'build_and_run_instance' and args_props.issubset(args) and 'nova_object.data' in args['instance'] + create_instance = is_data and method == 'build_and_run_instance' \ + and args_props.issubset(args) \ + and 'nova_object.data' in args['instance'] return action_instance or action_compute or create_instance def store_message(self, message): - '''Store message in Music''' + """Store message in Music.""" timestamp = datetime.now().isoformat() args = json.dumps(message.get('args', None)) exchange = self.config.events_listener.exchange @@ -165,10 +194,14 @@ class ListenerManager(threading.Thread): OsloMessage(**kwargs) # pylint: disable=W0612 def is_nova_name(self, args): + """Return True if object name is Instance.""" return args['objinst']['nova_object.name'] == 'Instance' def is_nova_state(self, args): - return args['objinst']['nova_object.data']['vm_state'] in ['deleted', 'active'] + """Return True if object vm_state is deleted or active.""" + return args['objinst']['nova_object.data']['vm_state'] \ + in ['deleted', 'active'] def is_compute_name(self, args): + """Return True if object name is ComputeNode.""" return args['objinst']['nova_object.name'] == 'ComputeNode' diff --git a/valet/engine/listener/oslo_messages.py b/valet/engine/listener/oslo_messages.py index 42bca0f..6d0146b 100644 --- a/valet/engine/listener/oslo_messages.py +++ b/valet/engine/listener/oslo_messages.py @@ -1,19 +1,19 @@ # # Copyright 2015-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -'''OsloMessage Database Model''' +"""OsloMessage Database Model.""" # This is based on Music models used in Valet. @@ -21,6 +21,8 @@ import uuid class OsloMessage(object): + """OsloMessage class.""" + __tablename__ = 'oslo_messages' _database = None @@ -32,7 +34,7 @@ class OsloMessage(object): @classmethod def schema(cls): - '''Return schema.''' + """Return schema.""" schema = { 'timestamp': 'text', 'args': 'text', @@ -44,13 +46,15 @@ class OsloMessage(object): @classmethod def pk_name(cls): + """Return timestamp string.""" return 'timestamp' def pk_value(self): + """Return self.timestamp.""" return self.timestamp def insert(self): - '''Insert row.''' + """Insert row.""" keyspace = self._database.get('keyspace') kwargs = { 'keyspace': keyspace, @@ -66,6 +70,7 @@ class OsloMessage(object): engine.create_row(**kwargs) def values(self): + """Return values.""" return { 'timestamp': self.timestamp, 'args': self.args, @@ -75,6 +80,7 @@ class OsloMessage(object): def __init__(self, timestamp, args, exchange, method, database, _insert=True): + """Init.""" self._database = database self.timestamp = timestamp self.args = args @@ -84,6 +90,7 @@ class OsloMessage(object): self.insert() def __json__(self): + """Return json.""" json_ = {} json_['timestamp'] = self.timestamp json_['args'] = self.args diff --git a/valet/engine/optimizer/app_manager/app_handler.py b/valet/engine/optimizer/app_manager/app_handler.py index 2aecef0..0231f07 100755 --- a/valet/engine/optimizer/app_manager/app_handler.py +++ b/valet/engine/optimizer/app_manager/app_handler.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""App Handler.""" + import json from valet.engine.optimizer.app_manager.app_topology import AppTopology @@ -22,14 +24,21 @@ from valet.engine.optimizer.util import util as util class AppHandler(object): + """App Handler Class. + + This class handles operations for the management of applications. + Functions related to adding apps and adding/removing them from + placement and updating topology info. + """ def __init__(self, _resource, _db, _config, _logger): + """Init App Handler Class.""" self.resource = _resource self.db = _db self.config = _config self.logger = _logger - ''' current app requested, a temporary copy ''' + """ current app requested, a temporary copy """ self.apps = {} self.last_log_index = 0 @@ -37,6 +46,7 @@ class AppHandler(object): self.status = "success" def add_app(self, _app_data): + """Add app and set or regenerate topology, return updated topology.""" self.apps.clear() app_topology = AppTopology(self.resource, self.logger) @@ -60,10 +70,12 @@ class AppHandler(object): if action == "ping": self.logger.debug("AppHandler: got ping") elif action == "replan" or action == "migrate": - re_app = self._regenerate_app_topology(stack_id, app, app_topology, action) + re_app = self._regenerate_app_topology(stack_id, app, + app_topology, action) if re_app is None: self.apps[stack_id] = None - self.status = "cannot locate the original plan for stack = " + stack_id + self.status = "cannot locate the original plan for " \ + "stack = " + stack_id return None if action == "replan": @@ -93,6 +105,7 @@ class AppHandler(object): return app_topology def add_placement(self, _placement_map, _timestamp): + """Change requested apps to scheduled and place them.""" for v in _placement_map.keys(): if self.apps[v.app_uuid].status == "requested": self.apps[v.app_uuid].status = "scheduled" @@ -116,11 +129,12 @@ class AppHandler(object): def _store_app_placements(self): (app_logfile, last_index, mode) = util.get_last_logfile( - self.config.app_log_loc, self.config.max_log_size, self.config.max_num_of_logs, - self.resource.datacenter.name, self.last_log_index) + self.config.app_log_loc, self.config.max_log_size, + self.config.max_num_of_logs, self.resource.datacenter.name, + self.last_log_index) self.last_log_index = last_index - # TODO: error handling + # TODO(UNKNOWN): error handling logging = open(self.config.app_log_loc + app_logfile, mode) @@ -141,19 +155,23 @@ class AppHandler(object): if self.db.add_app(appk, json_info) is False: return False - if self.db.update_app_log_index(self.resource.datacenter.name, self.last_log_index) is False: + if self.db.update_app_log_index(self.resource.datacenter.name, + self.last_log_index) is False: return False return True def remove_placement(self): + """Remove App from placement.""" if self.db is not None: for appk, _ in self.apps.iteritems(): if self.db.add_app(appk, None) is False: - self.logger.error("AppHandler: error while adding app info to MUSIC") + self.logger.error("AppHandler: error while adding app " + "info to MUSIC") # NOTE: ignore? def get_vm_info(self, _s_uuid, _h_uuid, _host): + """Return vm_info from database.""" vm_info = {} if _h_uuid is not None and _h_uuid != "none" and \ @@ -163,6 +181,7 @@ class AppHandler(object): return vm_info def update_vm_info(self, _s_uuid, _h_uuid): + """Update vm info (the ids) in the database.""" s_uuid_exist = bool(_s_uuid is not None and _s_uuid != "none") h_uuid_exist = bool(_h_uuid is not None and _h_uuid != "none") if s_uuid_exist and h_uuid_exist: @@ -216,26 +235,32 @@ class AppHandler(object): if _action == "replan": if vmk == _app["orchestration_id"]: - _app_topology.candidate_list_map[vmk] = _app["locations"] + _app_topology.candidate_list_map[vmk] = \ + _app["locations"] - self.logger.debug("AppHandler: re-requested vm = " + vm["name"] + " in") + self.logger.debug("AppHandler: re-requested vm = " + + vm["name"] + " in") for hk in _app["locations"]: self.logger.debug(" " + hk) elif vmk in _app["exclusions"]: _app_topology.planned_vm_map[vmk] = vm["host"] - self.logger.debug("AppHandler: exception from replan = " + vm["name"]) + self.logger.debug("AppHandler: exception from " + "replan = " + vm["name"]) elif _action == "migrate": if vmk == _app["orchestration_id"]: - _app_topology.exclusion_list_map[vmk] = _app["excluded_hosts"] + _app_topology.exclusion_list_map[vmk] = _app[ + "excluded_hosts"] if vm["host"] not in _app["excluded_hosts"]: - _app_topology.exclusion_list_map[vmk].append(vm["host"]) + _app_topology.exclusion_list_map[vmk].append( + vm["host"]) else: _app_topology.planned_vm_map[vmk] = vm["host"] - _app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"], vm["mem"], vm["local_volume"]) + _app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"], + vm["mem"], vm["local_volume"]) if "VGroups" in old_app.keys(): for gk, affinity in old_app["VGroups"].iteritems(): @@ -251,14 +276,16 @@ class AppHandler(object): resources[gk]["properties"] = properties if len(affinity["diversity_groups"]) > 0: - for divk, level_name in affinity["diversity_groups"].iteritems(): + for divk, level_name in \ + affinity["diversity_groups"].iteritems(): div_id = divk + ":" + level_name if div_id not in diversity_groups.keys(): diversity_groups[div_id] = [] diversity_groups[div_id].append(gk) if len(affinity["exclusivity_groups"]) > 0: - for exk, level_name in affinity["exclusivity_groups"].iteritems(): + for exk, level_name in \ + affinity["exclusivity_groups"].iteritems(): ex_id = exk + ":" + level_name if ex_id not in exclusivity_groups.keys(): exclusivity_groups[ex_id] = [] @@ -269,7 +296,8 @@ class AppHandler(object): for div_id, resource_list in diversity_groups.iteritems(): divk_level_name = div_id.split(":") resources[divk_level_name[0]] = {} - resources[divk_level_name[0]]["type"] = "ATT::Valet::GroupAssignment" + resources[divk_level_name[0]]["type"] = \ + "ATT::Valet::GroupAssignment" properties = {} properties["group_type"] = "diversity" properties["group_name"] = divk_level_name[2] diff --git a/valet/engine/optimizer/app_manager/app_topology.py b/valet/engine/optimizer/app_manager/app_topology.py index 9e08a17..2704277 100755 --- a/valet/engine/optimizer/app_manager/app_topology.py +++ b/valet/engine/optimizer/app_manager/app_topology.py @@ -1,41 +1,49 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""App Topology.""" + from valet.engine.optimizer.app_manager.app_topology_base import VM, VGroup from valet.engine.optimizer.app_manager.app_topology_parser import Parser class AppTopology(object): + """App Topology Class. + + This class contains functions for parsing and setting each app, as well as + calculating and setting optimization. + """ def __init__(self, _resource, _logger): + """Init App Topology Class.""" self.vgroups = {} self.vms = {} self.volumes = {} - ''' for replan ''' + """ for replan """ self.old_vm_map = {} self.planned_vm_map = {} self.candidate_list_map = {} - ''' for migration-tip ''' + """ for migration-tip """ self.exclusion_list_map = {} self.resource = _resource self.logger = _logger - ''' restriction of host naming convention ''' + """ restriction of host naming convention """ high_level_allowed = True if "none" in self.resource.datacenter.region_code_list: high_level_allowed = False @@ -51,15 +59,19 @@ class AppTopology(object): self.status = "success" - ''' parse and set each app ''' def set_app_topology(self, _app_graph): + """Set app topology (Parse and set each app). + + Set app topology by calling parser to determine vgroups, + vms and volumes. Then return parsed stack_id, app_name and action. + """ (vgroups, vms, volumes) = self.parser.set_topology(_app_graph) if len(vgroups) == 0 and len(vms) == 0 and len(volumes) == 0: self.status = self.parser.status return None - ''' cumulate virtual resources ''' + """ cumulate virtual resources """ for _, vgroup in vgroups.iteritems(): self.vgroups[vgroup.uuid] = vgroup for _, vm in vms.iteritems(): @@ -67,9 +79,11 @@ class AppTopology(object): for _, vol in volumes.iteritems(): self.volumes[vol.uuid] = vol - return self.parser.stack_id, self.parser.application_name, self.parser.action + return self.parser.stack_id, self.parser.application_name, \ + self.parser.action def set_weight(self): + """Set weight of vms and vgroups.""" for _, vm in self.vms.iteritems(): self._set_vm_weight(vm) for _, vg in self.vgroups.iteritems(): @@ -87,19 +101,22 @@ class AppTopology(object): self._set_vm_weight(sg) else: if self.resource.CPU_avail > 0: - _v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail) + _v.vCPU_weight = float(_v.vCPUs) / \ + float(self.resource.CPU_avail) else: _v.vCPU_weight = 1.0 self.total_CPU += _v.vCPUs if self.resource.mem_avail > 0: - _v.mem_weight = float(_v.mem) / float(self.resource.mem_avail) + _v.mem_weight = float(_v.mem) / \ + float(self.resource.mem_avail) else: _v.mem_weight = 1.0 self.total_mem += _v.mem if self.resource.local_disk_avail > 0: - _v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail) + _v.local_volume_weight = float(_v.local_volume_size) / \ + float(self.resource.local_disk_avail) else: if _v.local_volume_size > 0: _v.local_volume_weight = 1.0 @@ -110,7 +127,8 @@ class AppTopology(object): bandwidth = _v.nw_bandwidth + _v.io_bandwidth if self.resource.nw_bandwidth_avail > 0: - _v.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail) + _v.bandwidth_weight = float(bandwidth) / \ + float(self.resource.nw_bandwidth_avail) else: if bandwidth > 0: _v.bandwidth_weight = 1.0 @@ -129,8 +147,10 @@ class AppTopology(object): _vg.local_volume_size += sg.local_volume_size def _set_vgroup_weight(self, _vgroup): + """Calculate weights for vgroup.""" if self.resource.CPU_avail > 0: - _vgroup.vCPU_weight = float(_vgroup.vCPUs) / float(self.resource.CPU_avail) + _vgroup.vCPU_weight = float(_vgroup.vCPUs) / \ + float(self.resource.CPU_avail) else: if _vgroup.vCPUs > 0: _vgroup.vCPU_weight = 1.0 @@ -138,7 +158,8 @@ class AppTopology(object): _vgroup.vCPU_weight = 0.0 if self.resource.mem_avail > 0: - _vgroup.mem_weight = float(_vgroup.mem) / float(self.resource.mem_avail) + _vgroup.mem_weight = float(_vgroup.mem) / \ + float(self.resource.mem_avail) else: if _vgroup.mem > 0: _vgroup.mem_weight = 1.0 @@ -146,7 +167,8 @@ class AppTopology(object): _vgroup.mem_weight = 0.0 if self.resource.local_disk_avail > 0: - _vgroup.local_volume_weight = float(_vgroup.local_volume_size) / float(self.resource.local_disk_avail) + _vgroup.local_volume_weight = float(_vgroup.local_volume_size) / \ + float(self.resource.local_disk_avail) else: if _vgroup.local_volume_size > 0: _vgroup.local_volume_weight = 1.0 @@ -156,7 +178,8 @@ class AppTopology(object): bandwidth = _vgroup.nw_bandwidth + _vgroup.io_bandwidth if self.resource.nw_bandwidth_avail > 0: - _vgroup.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail) + _vgroup.bandwidth_weight = float(bandwidth) / \ + float(self.resource.nw_bandwidth_avail) else: if bandwidth > 0: _vgroup.bandwidth_weight = 1.0 @@ -168,12 +191,20 @@ class AppTopology(object): self._set_vgroup_weight(svg) def set_optimization_priority(self): - if len(self.vgroups) == 0 and len(self.vms) == 0 and len(self.volumes) == 0: + """Set Optimization Priority. + + This function calculates weights for bandwidth, cpu, memory, local + and overall volume for an app. Then Sorts the results and sets + optimization order accordingly. + """ + if len(self.vgroups) == 0 and len(self.vms) == 0 and \ + len(self.volumes) == 0: return app_nw_bandwidth_weight = -1 if self.resource.nw_bandwidth_avail > 0: - app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / float(self.resource.nw_bandwidth_avail) + app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / \ + float(self.resource.nw_bandwidth_avail) else: if self.total_nw_bandwidth > 0: app_nw_bandwidth_weight = 1.0 @@ -182,7 +213,8 @@ class AppTopology(object): app_CPU_weight = -1 if self.resource.CPU_avail > 0: - app_CPU_weight = float(self.total_CPU) / float(self.resource.CPU_avail) + app_CPU_weight = float(self.total_CPU) / \ + float(self.resource.CPU_avail) else: if self.total_CPU > 0: app_CPU_weight = 1.0 @@ -191,7 +223,8 @@ class AppTopology(object): app_mem_weight = -1 if self.resource.mem_avail > 0: - app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail) + app_mem_weight = float(self.total_mem) / \ + float(self.resource.mem_avail) else: if self.total_mem > 0: app_mem_weight = 1.0 @@ -200,7 +233,8 @@ class AppTopology(object): app_local_vol_weight = -1 if self.resource.local_disk_avail > 0: - app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail) + app_local_vol_weight = float(self.total_local_vol) / \ + float(self.resource.local_disk_avail) else: if self.total_local_vol > 0: app_local_vol_weight = 1.0 @@ -213,7 +247,8 @@ class AppTopology(object): app_vol_weight = -1 if self.resource.disk_avail > 0: - app_vol_weight = float(sum(total_vol_list)) / float(self.resource.disk_avail) + app_vol_weight = float(sum(total_vol_list)) / \ + float(self.resource.disk_avail) else: if sum(total_vol_list) > 0: app_vol_weight = 1.0 @@ -226,4 +261,6 @@ class AppTopology(object): ("lvol", app_local_vol_weight), ("vol", app_vol_weight)] - self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True) + self.optimization_priority = sorted(opt, + key=lambda resource: resource[1], + reverse=True) diff --git a/valet/engine/optimizer/app_manager/app_topology_base.py b/valet/engine/optimizer/app_manager/app_topology_base.py index e0dedcb..b74d584 100755 --- a/valet/engine/optimizer/app_manager/app_topology_base.py +++ b/valet/engine/optimizer/app_manager/app_topology_base.py @@ -1,24 +1,38 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""App Topology Base. + +This file contains different datatype base classes to be used when +buliding out app topology. These classes include VGroups, Volumes and Vms, +as well as 'Link' classes for each. +""" + LEVELS = ["host", "rack", "cluster"] class VGroup(object): + """VGroup Class. + + This class represents a VGroup object (virtual group). It contains + data about the volumes or vms it contains (such as compute resources), + and data about the group itself (group type, etc). + """ def __init__(self, _app_uuid, _uuid): + """Init VGroup Class.""" self.app_uuid = _app_uuid self.uuid = _uuid self.name = None @@ -55,6 +69,7 @@ class VGroup(object): self.host = None def get_json_info(self): + """Return JSON info of VGroup Object.""" survgroup_id = None if self.survgroup is None: survgroup_id = "none" @@ -95,8 +110,14 @@ class VGroup(object): class VM(object): + """VM Class. + + This class represents a Virtual Machine object. Examples of data this + class contains are compute resources, the host, and status. + """ def __init__(self, _app_uuid, _uuid): + """Init VM Class.""" self.app_uuid = _app_uuid self.uuid = _uuid self.name = None @@ -129,6 +150,7 @@ class VM(object): self.host = None # where this vm is placed def get_json_info(self): + """Return JSON info for VM object.""" survgroup_id = None if self.survgroup is None: survgroup_id = "none" @@ -172,8 +194,15 @@ class VM(object): class Volume(object): + """Volume Class. + + This class represents a volume, containing an app id and name, as well as + a list of links to VMs and the groups it belongs to. This also contains + data about the resources needed such as size, bandwidth and weight. + """ def __init__(self, _app_uuid, _uuid): + """Init Volume Class.""" self.app_uuid = _app_uuid self.uuid = _uuid self.name = None @@ -198,6 +227,7 @@ class Volume(object): self.storage_host = None def get_json_info(self): + """Return JSON info for a Volume.""" survgroup_id = None if self.survgroup is None: survgroup_id = "none" @@ -229,35 +259,53 @@ class Volume(object): class VGroupLink(object): + """VGroup Link Class. + + This class represents a link between VGroups. + """ def __init__(self, _n): + """Init VGroup Link.""" self.node = _n # target VM or Volume self.nw_bandwidth = 0 self.io_bandwidth = 0 def get_json_info(self): + """Return JSON info of VGroup Link Object.""" return {'target': self.node.uuid, 'nw_bandwidth': self.nw_bandwidth, 'io_bandwidth': self.io_bandwidth} class VMLink(object): + """VM Link Class. + + This class represents a link between VMs. + """ def __init__(self, _n): + """Init VM Link.""" self.node = _n # target VM self.nw_bandwidth = 0 # Mbps def get_json_info(self): + """Return JSON info of VM Link Object.""" return {'target': self.node.uuid, 'nw_bandwidth': self.nw_bandwidth} class VolumeLink(object): + """Volume Link Class. + + This class represents a link between volumes. + """ def __init__(self, _n): + """Init Volume Link.""" self.node = _n # target Volume self.io_bandwidth = 0 # Mbps def get_json_info(self): + """Return JSON info of Volume Link Object.""" return {'target': self.node.uuid, 'io_bandwidth': self.io_bandwidth} diff --git a/valet/engine/optimizer/app_manager/app_topology_parser.py b/valet/engine/optimizer/app_manager/app_topology_parser.py index 92de5d3..9601b61 100755 --- a/valet/engine/optimizer/app_manager/app_topology_parser.py +++ b/valet/engine/optimizer/app_manager/app_topology_parser.py @@ -1,22 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupLink, VM, VMLink, LEVELS +"""App Topology Parser. - -''' - Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV - VM/group cannot exist in multiple EX groups - Nested group's level cannot be higher than nesting group @@ -26,12 +24,21 @@ from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupL OS::Heat::Stack OS::Heat::ResourceGroup OS::Heat::ResourceGroup -''' +""" + +from valet.engine.optimizer.app_manager.app_topology_base \ + import VGroup, VGroupLink, VM, VMLink, LEVELS class Parser(object): + """Parser Class. + + This class handles parsing out the data related to the desired + topology from a template. + """ def __init__(self, _high_level_allowed, _logger): + """Init Parser Class.""" self.logger = _logger self.high_level_allowed = _high_level_allowed @@ -44,6 +51,7 @@ class Parser(object): self.status = "success" def set_topology(self, _graph): + """Return result of set_topology which parses input to get topology.""" if "version" in _graph.keys(): self.format_version = _graph["version"] else: @@ -71,7 +79,7 @@ class Parser(object): vgroup_captured = False vms = {} - ''' empty at this version ''' + """ empty at this version """ volumes = {} for rk, r in _elements.iteritems(): @@ -96,7 +104,8 @@ class Parser(object): self.logger.debug("Parser: get a vm = " + vm.name) elif r["type"] == "OS::Cinder::Volume": - self.logger.warn("Parser: do nothing for volume at this version") + self.logger.warn("Parser: do nothing for volume at this " + "version") elif r["type"] == "ATT::Valet::GroupAssignment": vgroup = VGroup(self.stack_id, rk) @@ -110,7 +119,8 @@ class Parser(object): elif r["properties"]["group_type"] == "exclusivity": vgroup.vgroup_type = "EX" else: - self.status = "unknown group = " + r["properties"]["group_type"] + self.status = "unknown group = " + \ + r["properties"]["group_type"] return {}, {}, {} else: self.status = "no group type" @@ -129,8 +139,9 @@ class Parser(object): vgroup.level = r["properties"]["level"] if vgroup.level != "host": if self.high_level_allowed is False: - self.status = "only host level of affinity group allowed " + \ - "due to the mis-match of host naming convention" + self.status = "only host level of affinity group " \ + "allowed due to the mis-match of " \ + "host naming convention" return {}, {}, {} else: self.status = "no grouping level" @@ -150,16 +161,19 @@ class Parser(object): self.logger.debug("Parser: all vms parsed") - if self._merge_diversity_groups(_elements, vgroups, vms, volumes) is False: + if self._merge_diversity_groups(_elements, vgroups, vms, volumes) \ + is False: return {}, {}, {} - if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) is False: + if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) \ + is False: return {}, {}, {} - if self._merge_affinity_groups(_elements, vgroups, vms, volumes) is False: + if self._merge_affinity_groups(_elements, vgroups, vms, volumes) \ + is False: return {}, {}, {} - ''' delete all EX and DIV vgroups after merging ''' + """ delete all EX and DIV vgroups after merging """ for vgk in vgroups.keys(): vg = vgroups[vgk] if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": @@ -186,13 +200,15 @@ class Parser(object): if vk2 in _vms.keys(): link = VMLink(_vms[vk2]) if "bandwidth" in r["properties"].keys(): - link.nw_bandwidth = r["properties"]["bandwidth"]["min"] + link.nw_bandwidth = \ + r["properties"]["bandwidth"]["min"] vm.vm_list.append(link) def _set_volume_links(self, _elements, _vms, _volumes): for rk, r in _elements.iteritems(): if r["type"] == "OS::Cinder::VolumeAttachment": - self.logger.warn("Parser: do nothing for volume attachment at this version") + self.logger.warn("Parser: do nothing for volume attachment at " + "this version") return True @@ -219,23 +235,31 @@ class Parser(object): for vk in r["properties"]["resources"]: if vk in _vms.keys(): vgroup.subvgroups[vk] = _vms[vk] - _vms[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name + _vms[vk].diversity_groups[rk] = \ + vgroup.level + ":" + vgroup.name elif vk in _volumes.keys(): vgroup.subvgroups[vk] = _volumes[vk] - _volumes[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name + _volumes[vk].diversity_groups[rk] = \ + vgroup.level + ":" + vgroup.name elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(level): - self.status = "grouping scope: nested group's level is higher" + self.status = "grouping scope: nested " \ + "group's level is higher" return False - if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in diversity group at this version" + if vg.vgroup_type == "DIV" or \ + vg.vgroup_type == "EX": + self.status = "group type (" + \ + vg.vgroup_type + ") not allowd " \ + "to be nested in diversity " \ + "group at this version" return False vgroup.subvgroups[vk] = vg - vg.diversity_groups[rk] = vgroup.level + ":" + vgroup.name + vg.diversity_groups[rk] = vgroup.level + ":" + \ + vgroup.name else: self.status = "invalid resource = " + vk return False @@ -254,23 +278,34 @@ class Parser(object): for vk in r["properties"]["resources"]: if vk in _vms.keys(): vgroup.subvgroups[vk] = _vms[vk] - _vms[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + _vms[vk].exclusivity_groups[rk] = \ + vgroup.level + ":" + vgroup.name elif vk in _volumes.keys(): vgroup.subvgroups[vk] = _volumes[vk] - _volumes[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + _volumes[vk].exclusivity_groups[rk] = \ + vgroup.level + ":" + vgroup.name elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(level): - self.status = "grouping scope: nested group's level is higher" + self.status = "grouping scope: nested " \ + "group's level is higher" return False - if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in exclusivity group at this version" + if vg.vgroup_type == "DIV" or \ + vg.vgroup_type == "EX": + self.status = "group type (" + \ + vg.vgroup_type + ") not allowd " \ + "to be nested " \ + "in " \ + "exclusivity " \ + "group at " \ + "this version" return False vgroup.subvgroups[vk] = vg - vg.exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + vg.exclusivity_groups[rk] = vgroup.level + ":" + \ + vgroup.name else: self.status = "invalid resource = " + vk return False @@ -278,7 +313,8 @@ class Parser(object): return True def _merge_affinity_groups(self, _elements, _vgroups, _vms, _volumes): - affinity_map = {} # key is uuid of vm, volume, or vgroup & value is its parent vgroup + # key is uuid of vm, volume, or vgroup & value is its parent vgroup + affinity_map = {} for level in LEVELS: for rk, r in _elements.iteritems(): @@ -292,7 +328,8 @@ class Parser(object): else: continue - self.logger.debug("Parser: merge for affinity = " + vgroup.name) + self.logger.debug("Parser: merge for affinity = " + + vgroup.name) for vk in r["properties"]["resources"]: @@ -302,8 +339,10 @@ class Parser(object): affinity_map[vk] = vgroup - self._add_implicit_diversity_groups(vgroup, _vms[vk].diversity_groups) - self._add_implicit_exclusivity_groups(vgroup, _vms[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + vgroup, _vms[vk].exclusivity_groups) self._add_memberships(vgroup, _vms[vk]) del _vms[vk] @@ -314,8 +353,10 @@ class Parser(object): affinity_map[vk] = vgroup - self._add_implicit_diversity_groups(vgroup, _volumes[vk].diversity_groups) - self._add_implicit_exclusivity_groups(vgroup, _volumes[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + vgroup, _volumes[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + vgroup, _volumes[vk].exclusivity_groups) self._add_memberships(vgroup, _volumes[vk]) del _volumes[vk] @@ -324,19 +365,23 @@ class Parser(object): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(level): - self.status = "grouping scope: nested group's level is higher" + self.status = "grouping scope: nested " \ + "group's level is higher" return False if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - if self._merge_subgroups(vgroup, vg.subvgroups, _vms, _volumes, _vgroups, - _elements, affinity_map) is False: + if self._merge_subgroups( + vgroup, vg.subvgroups, _vms, _volumes, + _vgroups, _elements, affinity_map) \ + is False: return False del _vgroups[vk] else: if self._exist_in_subgroups(vk, vgroup) is None: - if self._get_subgroups(vg, _elements, - _vgroups, _vms, _volumes, - affinity_map) is False: + if self._get_subgroups( + vg, _elements, _vgroups, _vms, + _volumes, affinity_map) \ + is False: return False vgroup.subvgroups[vk] = vg @@ -344,24 +389,29 @@ class Parser(object): affinity_map[vk] = vgroup - self._add_implicit_diversity_groups(vgroup, vg.diversity_groups) - self._add_implicit_exclusivity_groups(vgroup, vg.exclusivity_groups) + self._add_implicit_diversity_groups( + vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups( + vgroup, vg.exclusivity_groups) self._add_memberships(vgroup, vg) del _vgroups[vk] - - else: # vk belongs to the other vgroup already or refer to invalid resource + else: + # vk belongs to the other vgroup already + # or refer to invalid resource if vk not in affinity_map.keys(): self.status = "invalid resource = " + vk return False if affinity_map[vk].uuid != vgroup.uuid: if self._exist_in_subgroups(vk, vgroup) is None: - self._set_implicit_grouping(vk, vgroup, affinity_map, _vgroups) + self._set_implicit_grouping( + vk, vgroup, affinity_map, _vgroups) return True - def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups, _elements, _affinity_map): + def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups, + _elements, _affinity_map): for vk, _ in _subgroups.iteritems(): if vk in _vms.keys(): _vgroup.subvgroups[vk] = _vms[vk] @@ -369,8 +419,10 @@ class Parser(object): _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, _vms[vk].exclusivity_groups) self._add_memberships(_vgroup, _vms[vk]) del _vms[vk] @@ -381,8 +433,10 @@ class Parser(object): _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, _volumes[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, _volumes[vk].exclusivity_groups) self._add_memberships(_vgroup, _volumes[vk]) del _volumes[vk] @@ -391,7 +445,8 @@ class Parser(object): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): - self.status = "grouping scope: nested group's level is higher" + self.status = "grouping scope: nested group's level is " \ + "higher" return False if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": @@ -402,7 +457,9 @@ class Parser(object): del _vgroups[vk] else: if self._exist_in_subgroups(vk, _vgroup) is None: - if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False: + if self._get_subgroups(vg, _elements, _vgroups, _vms, + _volumes, _affinity_map) \ + is False: return False _vgroup.subvgroups[vk] = vg @@ -410,13 +467,16 @@ class Parser(object): _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, vg.exclusivity_groups) self._add_memberships(_vgroup, vg) del _vgroups[vk] - - else: # vk belongs to the other vgroup already or refer to invalid resource + else: + # vk belongs to the other vgroup already + # or refer to invalid resource if vk not in _affinity_map.keys(): self.status = "invalid resource = " + vk return False @@ -427,7 +487,8 @@ class Parser(object): return True - def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes, _affinity_map): + def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes, + _affinity_map): for vk in _elements[_vgroup.uuid]["properties"]["resources"]: @@ -437,8 +498,10 @@ class Parser(object): _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, _vms[vk].exclusivity_groups) self._add_memberships(_vgroup, _vms[vk]) del _vms[vk] @@ -449,8 +512,10 @@ class Parser(object): _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, _volumes[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, _volumes[vk].exclusivity_groups) self._add_memberships(_vgroup, _volumes[vk]) del _volumes[vk] @@ -459,7 +524,8 @@ class Parser(object): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): - self.status = "grouping scope: nested group's level is higher" + self.status = "grouping scope: nested group's level is " \ + "higher" return False if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": @@ -470,7 +536,9 @@ class Parser(object): del _vgroups[vk] else: if self._exist_in_subgroups(vk, _vgroup) is None: - if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False: + if self._get_subgroups( + vg, _elements, _vgroups, _vms, _volumes, + _affinity_map) is False: return False _vgroup.subvgroups[vk] = vg @@ -478,8 +546,10 @@ class Parser(object): _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, vg.exclusivity_groups) self._add_memberships(_vgroup, vg) del _vgroups[vk] @@ -490,7 +560,8 @@ class Parser(object): if _affinity_map[vk].uuid != _vgroup.uuid: if self._exist_in_subgroups(vk, _vgroup) is None: - self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups) + self._set_implicit_grouping( + vk, _vgroup, _affinity_map, _vgroups) return True @@ -529,26 +600,25 @@ class Parser(object): def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _vgroups): t_vg = _affinity_map[_vk] # where _vk currently belongs to - if t_vg.uuid in _affinity_map.keys(): # if the parent belongs to the other parent vgroup - self._set_implicit_grouping(t_vg.uuid, _s_vg, _affinity_map, _vgroups) + # if the parent belongs to the other parent vgroup + if t_vg.uuid in _affinity_map.keys(): + self._set_implicit_grouping( + t_vg.uuid, _s_vg, _affinity_map, _vgroups) else: if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level): t_vg.level = _s_vg.level - ''' - self.status = "Grouping scope: sub-group's level is larger" - return False - ''' - if self._exist_in_subgroups(t_vg.uuid, _s_vg) is None: _s_vg.subvgroups[t_vg.uuid] = t_vg t_vg.survgroup = _s_vg _affinity_map[t_vg.uuid] = _s_vg - self._add_implicit_diversity_groups(_s_vg, t_vg.diversity_groups) - self._add_implicit_exclusivity_groups(_s_vg, t_vg.exclusivity_groups) + self._add_implicit_diversity_groups( + _s_vg, t_vg.diversity_groups) + self._add_implicit_exclusivity_groups( + _s_vg, t_vg.exclusivity_groups) self._add_memberships(_s_vg, t_vg) del _vgroups[t_vg.uuid] @@ -567,16 +637,19 @@ class Parser(object): return containing_vg_uuid def _set_vgroup_links(self, _vgroup, _vgroups, _vms, _volumes): - for _, svg in _vgroup.subvgroups.iteritems(): # currently, not define vgroup itself in pipe + for _, svg in _vgroup.subvgroups.iteritems(): + # currently, not define vgroup itself in pipe if isinstance(svg, VM): for vml in svg.vm_list: found = False for _, tvgroup in _vgroups.iteritems(): - containing_vg_uuid = self._exist_in_subgroups(vml.node.uuid, tvgroup) + containing_vg_uuid = self._exist_in_subgroups( + vml.node.uuid, tvgroup) if containing_vg_uuid is not None: found = True if containing_vg_uuid != _vgroup.uuid and \ - self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: + self._exist_in_subgroups( + containing_vg_uuid, _vgroup) is None: self._add_nw_link(vml, _vgroup) break if found is False: @@ -587,11 +660,13 @@ class Parser(object): for voll in svg.volume_list: found = False for _, tvgroup in _vgroups.iteritems(): - containing_vg_uuid = self._exist_in_subgroups(voll.node.uuid, tvgroup) + containing_vg_uuid = self._exist_in_subgroups( + voll.node.uuid, tvgroup) if containing_vg_uuid is not None: found = True if containing_vg_uuid != _vgroup.uuid and \ - self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: + self._exist_in_subgroups( + containing_vg_uuid, _vgroup) is None: self._add_io_link(voll, _vgroup) break if found is False: @@ -603,7 +678,8 @@ class Parser(object): self._set_vgroup_links(svg, _vgroups, _vms, _volumes) for svgl in svg.vgroup_list: # svgl is a link to VM or Volume - if self._exist_in_subgroups(svgl.node.uuid, _vgroup) is None: + if self._exist_in_subgroups(svgl.node.uuid, _vgroup) \ + is None: self._add_nw_link(svgl, _vgroup) self._add_io_link(svgl, _vgroup) diff --git a/valet/engine/optimizer/app_manager/application.py b/valet/engine/optimizer/app_manager/application.py index 23e98b8..7af0f1c 100755 --- a/valet/engine/optimizer/app_manager/application.py +++ b/valet/engine/optimizer/app_manager/application.py @@ -1,21 +1,30 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""App.""" + + class App(object): + """App Class. + + This class represents an app object that consists of the name and id of + the app, as well as the status and vms/volumes/vgroups it belogns to. + """ def __init__(self, _app_id, _app_name, _action): + """Init App.""" self.app_id = _app_id self.app_name = _app_name @@ -30,21 +39,25 @@ class App(object): self.status = 'requested' # Moved to "scheduled" (and then "placed") def add_vm(self, _vm, _host_name): + """Add vm to app, set status to scheduled.""" self.vms[_vm.uuid] = _vm self.vms[_vm.uuid].status = "scheduled" self.vms[_vm.uuid].host = _host_name def add_volume(self, _vol, _host_name): + """Add volume to app, set status to scheduled.""" self.vms[_vol.uuid] = _vol self.vms[_vol.uuid].status = "scheduled" self.vms[_vol.uuid].storage_host = _host_name def add_vgroup(self, _vg, _host_name): + """Add vgroup to app, set status to scheduled.""" self.vgroups[_vg.uuid] = _vg self.vgroups[_vg.uuid].status = "scheduled" self.vgroups[_vg.uuid].host = _host_name def get_json_info(self): + """Return JSON info of App including vms, vols and vgs.""" vms = {} for vmk, vm in self.vms.iteritems(): vms[vmk] = vm.get_json_info() @@ -66,6 +79,7 @@ class App(object): 'VGroups': vgs} def log_in_info(self): + """Return in info related to login (time of login, app name, etc).""" return {'action': self.request_type, 'timestamp': self.timestamp_scheduled, 'stack_id': self.app_id, diff --git a/valet/engine/optimizer/db_connect/client.cfg b/valet/engine/optimizer/db_connect/client.cfg index 8f0825f..7b10c14 100644 --- a/valet/engine/optimizer/db_connect/client.cfg +++ b/valet/engine/optimizer/db_connect/client.cfg @@ -1,4 +1,4 @@ -# Version 2.0.2: Feb. 9, 2016 +# Version 2.0.2: # Set database keyspace db_keyspace=valet_test @@ -12,6 +12,3 @@ db_app_table=app db_uuid_table=uuid_map #replication_factor=3 - - - diff --git a/valet/engine/optimizer/db_connect/configuration.py b/valet/engine/optimizer/db_connect/configuration.py index 84a6b3a..1e0f2d5 100644 --- a/valet/engine/optimizer/db_connect/configuration.py +++ b/valet/engine/optimizer/db_connect/configuration.py @@ -1,24 +1,32 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Configuration.""" + import sys class Config(object): + """Config Class. + + This class consists of one function that reads client config options + from a file and sets the corresponding config variables of this class. + """ def __init__(self): + """Init Config class.""" self.mode = None self.db_keyspace = None @@ -32,6 +40,7 @@ class Config(object): self.db_uuid_table = None def configure(self): + """Read client config file for config options and return success.""" try: f = open("./client.cfg", "r") line = f.readline() diff --git a/valet/engine/optimizer/db_connect/event.py b/valet/engine/optimizer/db_connect/event.py index 8d1c957..3592eac 100644 --- a/valet/engine/optimizer/db_connect/event.py +++ b/valet/engine/optimizer/db_connect/event.py @@ -1,24 +1,33 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Event.""" + import json class Event(object): + """Event Class. + + This class represents an event and all the necessary metadata to + properly track it and set the data for the event. Handles object_action + events and build and run instance events. + """ def __init__(self, _id): + """Init Event Class.""" self.event_id = _id self.exchange = None self.method = None @@ -56,6 +65,13 @@ class Event(object): self.uuid = None def set_data(self): + """Set event data depending on method(action) performed. + + - If object_action, change data and calculate correct + compute resources for instance or Compute Node. + - If building_and_run_instance, get data from scheduler + and set heat values. + """ if self.method == 'object_action': self.change_list = self.args['objinst']['nova_object.changes'] self.change_data = self.args['objinst']['nova_object.data'] @@ -103,33 +119,43 @@ class Event(object): if 'host' in self.change_data.keys(): self.host = self.change_data['host'] - if 'deleted' in self.change_list and 'deleted' in self.change_data.keys(): - if self.change_data['deleted'] == "true" or self.change_data['deleted'] is True: + if 'deleted' in self.change_list and 'deleted' in \ + self.change_data.keys(): + if self.change_data['deleted'] == "true" or \ + self.change_data['deleted'] is True: self.status = "disabled" - if 'vcpus' in self.change_list and 'vcpus' in self.change_data.keys(): + if 'vcpus' in self.change_list and 'vcpus' in \ + self.change_data.keys(): self.vcpus = self.change_data['vcpus'] - if 'vcpus_used' in self.change_list and 'vcpus_used' in self.change_data.keys(): + if 'vcpus_used' in self.change_list and 'vcpus_used' in \ + self.change_data.keys(): self.vcpus_used = self.change_data['vcpus_used'] - if 'memory_mb' in self.change_list and 'memory_mb' in self.change_data.keys(): + if 'memory_mb' in self.change_list and 'memory_mb' in \ + self.change_data.keys(): self.mem = self.change_data['memory_mb'] - if 'free_ram_mb' in self.change_list and 'free_ram_mb' in self.change_data.keys(): + if 'free_ram_mb' in self.change_list and 'free_ram_mb' in \ + self.change_data.keys(): self.free_mem = self.change_data['free_ram_mb'] - if 'local_gb' in self.change_list and 'local_gb' in self.change_data.keys(): + if 'local_gb' in self.change_list and 'local_gb' in \ + self.change_data.keys(): self.local_disk = self.change_data['local_gb'] - if 'free_disk_gb' in self.change_list and 'free_disk_gb' in self.change_data.keys(): + if 'free_disk_gb' in self.change_list and 'free_disk_gb' in \ + self.change_data.keys(): self.free_local_disk = self.change_data['free_disk_gb'] if 'disk_available_least' in self.change_list and \ 'disk_available_least' in self.change_data.keys(): - self.disk_available_least = self.change_data['disk_available_least'] + self.disk_available_least = \ + self.change_data['disk_available_least'] - if 'numa_topology' in self.change_list and 'numa_topology' in self.change_data.keys(): + if 'numa_topology' in self.change_list and 'numa_topology' in \ + self.change_data.keys(): str_numa_topology = self.change_data['numa_topology'] try: numa_topology = json.loads(str_numa_topology) @@ -137,7 +163,10 @@ class Event(object): if 'nova_object.data' in numa_topology.keys(): if 'cells' in numa_topology['nova_object.data']: - for cell in numa_topology['nova_object.data']['cells']: + for cell in \ + numa_topology[ + 'nova_object.data' + ]['cells']: self.numa_cell_list.append(cell) except (ValueError, KeyError, TypeError): @@ -146,13 +175,18 @@ class Event(object): elif self.method == 'build_and_run_instance': if 'scheduler_hints' in self.args['filter_properties'].keys(): - scheduler_hints = self.args['filter_properties']['scheduler_hints'] + scheduler_hints = self.args[ + 'filter_properties' + ]['scheduler_hints'] if 'heat_resource_name' in scheduler_hints.keys(): - self.heat_resource_name = scheduler_hints['heat_resource_name'] + self.heat_resource_name = \ + scheduler_hints['heat_resource_name'] if 'heat_resource_uuid' in scheduler_hints.keys(): - self.heat_resource_uuid = scheduler_hints['heat_resource_uuid'] + self.heat_resource_uuid = \ + scheduler_hints['heat_resource_uuid'] if 'heat_root_stack_id' in scheduler_hints.keys(): - self.heat_root_stack_id = scheduler_hints['heat_root_stack_id'] + self.heat_root_stack_id = \ + scheduler_hints['heat_root_stack_id'] if 'heat_stack_name' in scheduler_hints.keys(): self.heat_stack_name = scheduler_hints['heat_stack_name'] diff --git a/valet/engine/optimizer/db_connect/music_handler.py b/valet/engine/optimizer/db_connect/music_handler.py index 493bb17..79df55b 100644 --- a/valet/engine/optimizer/db_connect/music_handler.py +++ b/valet/engine/optimizer/db_connect/music_handler.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Music Handler.""" + import json import operator from valet.api.db.models.music import Music @@ -20,8 +22,14 @@ from valet.engine.optimizer.db_connect.event import Event class MusicHandler(object): + """Music Handler Class. + + This Class consists of functions that interact with the music + database for valet and returns/deletes/updates objects within it. + """ def __init__(self, _config, _logger): + """Init Music Handler.""" self.config = _config self.logger = _logger @@ -32,9 +40,17 @@ class MusicHandler(object): if self.config.mode.startswith("sim"): self.music = Music() elif self.config.mode.startswith("live"): - self.music = Music(hosts=self.config.db_hosts, replication_factor=self.config.replication_factor) + self.music = Music( + hosts=self.config.db_hosts, + replication_factor=self.config.replication_factor) def init_db(self): + """Init Database. + + This function initializes a database in Music by creating all the + necessary tables with the proper schemas in Music using API calls. + Return True if no exceptions are caught. + """ self.logger.info("MusicHandler.init_db: create table") try: @@ -51,7 +67,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(stack_id)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_request_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_request_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -62,7 +79,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(stack_id)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_response_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_response_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -75,7 +93,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(timestamp)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_event_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_event_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -86,7 +105,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(site_name)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_resource_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_resource_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -97,7 +117,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(stack_id)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_app_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_app_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -108,7 +129,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(site_name)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_app_index_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_app_index_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -119,7 +141,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(site_name)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_resource_index_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_resource_index_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -131,7 +154,8 @@ class MusicHandler(object): 'PRIMARY KEY': '(uuid)' } try: - self.music.create_table(self.config.db_keyspace, self.config.db_uuid_table, schema) + self.music.create_table(self.config.db_keyspace, + self.config.db_uuid_table, schema) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -139,11 +163,18 @@ class MusicHandler(object): return True def get_events(self): + """Get Events. + + This function obtains all events from the database and then + iterates through all of them to check the method and perform the + corresponding action on them. Return Event list. + """ event_list = [] events = {} try: - events = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) + events = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_event_table) except Exception as e: self.logger.error("MUSIC error while reading events: " + str(e)) return None @@ -155,30 +186,37 @@ class MusicHandler(object): method = row['method'] args_data = row['args'] - self.logger.debug("MusicHandler.get_events: event (" + event_id + ") is entered") + self.logger.debug("MusicHandler.get_events: event (" + + event_id + ") is entered") if exchange != "nova": if self.delete_event(event_id) is False: return None - self.logger.debug("MusicHandler.get_events: event exchange (" + exchange + ") is not supported") + self.logger.debug("MusicHandler.get_events: event exchange " + "(" + exchange + ") is not supported") continue - if method != 'object_action' and method != 'build_and_run_instance': + if method != 'object_action' and method != 'build_and_run_' \ + 'instance': if self.delete_event(event_id) is False: return None - self.logger.debug("MusicHandler.get_events: event method (" + method + ") is not considered") + self.logger.debug("MusicHandler.get_events: event method " + "(" + method + ") is not considered") continue if len(args_data) == 0: if self.delete_event(event_id) is False: return None - self.logger.debug("MusicHandler.get_events: event does not have args") + self.logger.debug("MusicHandler.get_events: event does not " + "have args") continue try: args = json.loads(args_data) except (ValueError, KeyError, TypeError): - self.logger.warn("MusicHandler.get_events: error while decoding to JSON event = " + method + ":" + event_id) + self.logger.warn("MusicHandler.get_events: error while " + "decoding to JSON event = " + method + + ":" + event_id) continue if method == 'object_action': @@ -193,15 +231,19 @@ class MusicHandler(object): change_data = objinst['nova_object.data'] if 'vm_state' in change_list and \ 'vm_state' in change_data.keys(): - if change_data['vm_state'] == 'deleted' or \ - change_data['vm_state'] == 'active': + if change_data['vm_state'] == \ + 'deleted' \ + or change_data[ + 'vm_state' + ] == 'active': e = Event(event_id) e.exchange = exchange e.method = method e.args = args event_list.append(e) else: - if self.delete_event(event_id) is False: + if self.delete_event(event_id) \ + is False: return None else: if self.delete_event(event_id) is False: @@ -257,7 +299,8 @@ class MusicHandler(object): for e in event_list: e.set_data() - self.logger.debug("MusicHandler.get_events: event (" + e.event_id + ") is parsed") + self.logger.debug("MusicHandler.get_events: event (" + + e.event_id + ") is parsed") if e.method == "object_action": if e.object_name == 'Instance': @@ -265,17 +308,20 @@ class MusicHandler(object): e.host is None or e.host == "none" or \ e.vcpus == -1 or e.mem == -1: error_event_list.append(e) - self.logger.warn("MusicHandler.get_events: data missing in instance object event") + self.logger.warn("MusicHandler.get_events: data " + "missing in instance object event") elif e.object_name == 'ComputeNode': if e.host is None or e.host == "none": error_event_list.append(e) - self.logger.warn("MusicHandler.get_events: data missing in compute object event") + self.logger.warn("MusicHandler.get_events: data " + "missing in compute object event") elif e.method == "build_and_run_instance": if e.uuid is None or e.uuid == "none": error_event_list.append(e) - self.logger.warn("MusicHandler.get_events: data missing in build event") + self.logger.warn("MusicHandler.get_events: data missing " + "in build event") if len(error_event_list) > 0: event_list[:] = [e for e in event_list if e not in error_event_list] @@ -286,6 +332,7 @@ class MusicHandler(object): return event_list def delete_event(self, _event_id): + """Return True after deleting corresponding event row in db.""" try: self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_event_table, @@ -297,12 +344,14 @@ class MusicHandler(object): return True def get_uuid(self, _uuid): + """Return h_uuid and s_uuid from matching _uuid row in music db.""" h_uuid = "none" s_uuid = "none" row = {} try: - row = self.music.read_row(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _uuid) + row = self.music.read_row(self.config.db_keyspace, + self.config.db_uuid_table, 'uuid', _uuid) except Exception as e: self.logger.error("MUSIC error while reading uuid: " + str(e)) return None @@ -311,18 +360,22 @@ class MusicHandler(object): h_uuid = row[row.keys()[0]]['h_uuid'] s_uuid = row[row.keys()[0]]['s_uuid'] - self.logger.info("MusicHandler.get_uuid: get heat uuid (" + h_uuid + ") for uuid = " + _uuid) + self.logger.info("MusicHandler.get_uuid: get heat uuid (" + + h_uuid + ") for uuid = " + _uuid) else: self.logger.debug("MusicHandler.get_uuid: heat uuid not found") return h_uuid, s_uuid def put_uuid(self, _e): + """Insert uuid, h_uuid and s_uuid from event into new row in db.""" heat_resource_uuid = "none" heat_root_stack_id = "none" - if _e.heat_resource_uuid is not None and _e.heat_resource_uuid != "none": + if _e.heat_resource_uuid is not None and \ + _e.heat_resource_uuid != "none": heat_resource_uuid = _e.heat_resource_uuid - if _e.heat_root_stack_id is not None and _e.heat_root_stack_id != "none": + if _e.heat_root_stack_id is not None and \ + _e.heat_root_stack_id != "none": heat_root_stack_id = _e.heat_root_stack_id data = { @@ -332,7 +385,8 @@ class MusicHandler(object): } try: - self.music.create_row(self.config.db_keyspace, self.config.db_uuid_table, data) + self.music.create_row(self.config.db_keyspace, + self.config.db_uuid_table, data) except Exception as e: self.logger.error("MUSIC error while inserting uuid: " + str(e)) return False @@ -342,8 +396,11 @@ class MusicHandler(object): return True def delete_uuid(self, _k): + """Return True after deleting row corresponding to event uuid.""" try: - self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _k) + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_uuid_table, 'uuid', + _k) except Exception as e: self.logger.error("MUSIC error while deleting uuid: " + str(e)) return False @@ -351,17 +408,20 @@ class MusicHandler(object): return True def get_requests(self): + """Return list of requests that consists of all rows in a db table.""" request_list = [] requests = {} try: - requests = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) + requests = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_request_table) except Exception as e: self.logger.error("MUSIC error while reading requests: " + str(e)) return None if len(requests) > 0: - self.logger.info("MusicHandler.get_requests: placement request arrived") + self.logger.info("MusicHandler.get_requests: placement request " + "arrived") for _, row in requests.iteritems(): self.logger.info(" request_id = " + row['stack_id']) @@ -373,6 +433,7 @@ class MusicHandler(object): return request_list def put_result(self, _result): + """Return True after putting result in db(create and delete rows).""" for appk, app_placement in _result.iteritems(): data = { 'stack_id': appk, @@ -380,12 +441,15 @@ class MusicHandler(object): } try: - self.music.create_row(self.config.db_keyspace, self.config.db_response_table, data) + self.music.create_row(self.config.db_keyspace, + self.config.db_response_table, data) except Exception as e: - self.logger.error("MUSIC error while putting placement result: " + str(e)) + self.logger.error("MUSIC error while putting placement " + "result: " + str(e)) return False - self.logger.info("MusicHandler.put_result: " + appk + " placement result added") + self.logger.info("MusicHandler.put_result: " + appk + + " placement result added") for appk in _result.keys(): try: @@ -393,37 +457,48 @@ class MusicHandler(object): self.config.db_request_table, 'stack_id', appk) except Exception as e: - self.logger.error("MUSIC error while deleting handled request: " + str(e)) + self.logger.error("MUSIC error while deleting handled " + "request: " + str(e)) return False - self.logger.info("MusicHandler.put_result: " + appk + " placement request deleted") + self.logger.info("MusicHandler.put_result: " + + appk + " placement request deleted") return True def get_resource_status(self, _k): + """Get Row of resource related to '_k' and return resource as json.""" json_resource = {} row = {} try: - row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k, self.logger) + row = self.music.read_row(self.config.db_keyspace, + self.config.db_resource_table, + 'site_name', _k, self.logger) except Exception as e: - self.logger.error("MUSIC error while reading resource status: " + str(e)) + self.logger.error("MUSIC error while reading resource status: " + + str(e)) return None if len(row) > 0: str_resource = row[row.keys()[0]]['resource'] json_resource = json.loads(str_resource) - self.logger.info("MusicHandler.get_resource_status: get resource status") + self.logger.info("MusicHandler.get_resource_status: get resource " + "status") return json_resource def update_resource_status(self, _k, _status): + """Update resource _k to the new _status (flavors, lgs, hosts, etc).""" row = {} try: - row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k) + row = self.music.read_row(self.config.db_keyspace, + self.config.db_resource_table, + 'site_name', _k) except Exception as e: - self.logger.error("MUSIC error while reading resource status: " + str(e)) + self.logger.error("MUSIC error while reading resource status: " + + str(e)) return False json_resource = {} @@ -485,7 +560,8 @@ class MusicHandler(object): self.config.db_resource_table, 'site_name', _k) except Exception as e: - self.logger.error("MUSIC error while deleting resource status: " + str(e)) + self.logger.error("MUSIC error while deleting resource " + "status: " + str(e)) return False else: @@ -497,34 +573,40 @@ class MusicHandler(object): } try: - self.music.create_row(self.config.db_keyspace, self.config.db_resource_table, data) + self.music.create_row(self.config.db_keyspace, + self.config.db_resource_table, data) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False - self.logger.info("MusicHandler.update_resource_status: resource status updated") + self.logger.info("MusicHandler.update_resource_status: resource status " + "updated") return True def update_resource_log_index(self, _k, _index): + """Update resource log index in database and return True.""" data = { 'site_name': _k, 'resource_log_index': str(_index) } try: - self.music.update_row_eventually(self.config.db_keyspace, - self.config.db_resource_index_table, - 'site_name', _k, data) + self.music.update_row_eventually( + self.config.db_keyspace, self.config.db_resource_index_table, + 'site_name', _k, data) except Exception as e: - self.logger.error("MUSIC error while updating resource log index: " + str(e)) + self.logger.error("MUSIC error while updating resource log " + "index: " + str(e)) return False - self.logger.info("MusicHandler.update_resource_log_index: resource log index updated") + self.logger.info("MusicHandler.update_resource_log_index: resource log " + "index updated") return True def update_app_log_index(self, _k, _index): + """Update app log index in database and return True.""" data = { 'site_name': _k, 'app_log_index': str(_index) @@ -535,16 +617,21 @@ class MusicHandler(object): self.config.db_app_index_table, 'site_name', _k, data) except Exception as e: - self.logger.error("MUSIC error while updating app log index: " + str(e)) + self.logger.error("MUSIC error while updating app log index: " + + str(e)) return False - self.logger.info("MusicHandler.update_app_log_index: app log index updated") + self.logger.info("MusicHandler.update_app_log_index: app log index " + "updated") return True def add_app(self, _k, _app_data): + """Add app to database in music and return True.""" try: - self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _k) + self.music.delete_row_eventually( + self.config.db_keyspace, self.config.db_app_table, + 'stack_id', _k) except Exception as e: self.logger.error("MUSIC error while deleting app: " + str(e)) return False @@ -558,7 +645,8 @@ class MusicHandler(object): } try: - self.music.create_row(self.config.db_keyspace, self.config.db_app_table, data) + self.music.create_row(self.config.db_keyspace, + self.config.db_app_table, data) except Exception as e: self.logger.error("MUSIC error while inserting app: " + str(e)) return False @@ -568,11 +656,14 @@ class MusicHandler(object): return True def get_app_info(self, _s_uuid): + """Get app info for stack id and return as json object.""" json_app = {} row = {} try: - row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) + row = self.music.read_row(self.config.db_keyspace, + self.config.db_app_table, 'stack_id', + _s_uuid) except Exception as e: self.logger.error("MUSIC error while reading app info: " + str(e)) return None @@ -583,8 +674,9 @@ class MusicHandler(object): return json_app - # TODO: get all other VMs related to this VM + # TODO(UNKNOWN): get all other VMs related to this VM def get_vm_info(self, _s_uuid, _h_uuid, _host): + """Return vm info connected with ids and host passed in.""" updated = False json_app = {} @@ -592,7 +684,9 @@ class MusicHandler(object): row = {} try: - row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) + row = self.music.read_row(self.config.db_keyspace, + self.config.db_app_table, 'stack_id', + _s_uuid) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return None @@ -608,8 +702,10 @@ class MusicHandler(object): if vm["host"] != _host: vm["planned_host"] = vm["host"] vm["host"] = _host - self.logger.warn("db: conflicted placement decision from Ostro") - # TODO: affinity, diversity, exclusivity validation check + self.logger.warn("db: conflicted placement " + "decision from Ostro") + # TODO(UNKOWN): affinity, diversity, + # exclusivity check updated = True else: self.logger.debug("db: placement as expected") @@ -621,10 +717,12 @@ class MusicHandler(object): vm_info = vm break else: - self.logger.error("MusicHandler.get_vm_info: vm is missing from stack") + self.logger.error("MusicHandler.get_vm_info: vm is missing " + "from stack") else: - self.logger.warn("MusicHandler.get_vm_info: not found stack for update = " + _s_uuid) + self.logger.warn("MusicHandler.get_vm_info: not found stack for " + "update = " + _s_uuid) if updated is True: if self.add_app(_s_uuid, json_app) is False: @@ -633,12 +731,15 @@ class MusicHandler(object): return vm_info def update_vm_info(self, _s_uuid, _h_uuid): + """Return true if vm's heat and heat stack ids are updated in db.""" updated = False json_app = {} row = {} try: - row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) + row = self.music.read_row(self.config.db_keyspace, + self.config.db_app_table, 'stack_id', + _s_uuid) except Exception as e: self.logger.error("MUSIC error: " + str(e)) return False @@ -659,10 +760,12 @@ class MusicHandler(object): break else: - self.logger.error("MusicHandler.update_vm_info: vm is missing from stack") + self.logger.error("MusicHandler.update_vm_info: vm is missing " + "from stack") else: - self.logger.warn("MusicHandler.update_vm_info: not found stack for update = " + _s_uuid) + self.logger.warn("MusicHandler.update_vm_info: not found stack for " + "update = " + _s_uuid) if updated is True: if self.add_app(_s_uuid, json_app) is False: diff --git a/valet/engine/optimizer/ostro/constraint_solver.py b/valet/engine/optimizer/ostro/constraint_solver.py index ae0a017..019cb03 100755 --- a/valet/engine/optimizer/ostro/constraint_solver.py +++ b/valet/engine/optimizer/ostro/constraint_solver.py @@ -1,29 +1,37 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, LEVELS -from valet.engine.optimizer.ostro.openstack_filters import AggregateInstanceExtraSpecsFilter -from valet.engine.optimizer.ostro.openstack_filters import AvailabilityZoneFilter +"""ConstraintSolver.""" + +from valet.engine.optimizer.app_manager.app_topology_base \ + import VGroup, VM, LEVELS +from valet.engine.optimizer.ostro.openstack_filters \ + import AggregateInstanceExtraSpecsFilter +from valet.engine.optimizer.ostro.openstack_filters \ + import AvailabilityZoneFilter from valet.engine.optimizer.ostro.openstack_filters import CoreFilter from valet.engine.optimizer.ostro.openstack_filters import DiskFilter from valet.engine.optimizer.ostro.openstack_filters import RamFilter class ConstraintSolver(object): + """ConstraintSolver.""" def __init__(self, _logger): + """Initialization.""" + """Instantiate filters to help enforce constraints.""" self.logger = _logger self.openstack_AZ = AvailabilityZoneFilter(self.logger) @@ -34,12 +42,15 @@ class ConstraintSolver(object): self.status = "success" - def compute_candidate_list(self, _level, _n, _node_placements, _avail_resources, _avail_logical_groups): + def compute_candidate_list(self, _level, _n, _node_placements, + _avail_resources, _avail_logical_groups): + """Compute candidate list for the given VGroup or VM.""" candidate_list = [] - ''' when replanning ''' + """When replanning.""" if _n.node.host is not None and len(_n.node.host) > 0: - self.logger.debug("ConstraintSolver: reconsider with given candidates") + self.logger.debug("ConstraintSolver: reconsider with given " + "candidates") for hk in _n.node.host: for ark, ar in _avail_resources.iteritems(): if hk == ark: @@ -52,135 +63,166 @@ class ConstraintSolver(object): self.logger.warn("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: num of candidates = " + str(len(candidate_list))) + self.logger.debug("ConstraintSolver: num of candidates = " + + str(len(candidate_list))) - ''' availability zone constraint ''' + """Availability zone constraint.""" if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): - if (isinstance(_n.node, VM) and _n.node.availability_zone is not None) or \ - (isinstance(_n.node, VGroup) and len(_n.node.availability_zone_list) > 0): + if (isinstance(_n.node, VM) and _n.node.availability_zone + is not None) or (isinstance(_n.node, VGroup) and + len(_n.node.availability_zone_list) > 0): self._constrain_availability_zone(_level, _n, candidate_list) if len(candidate_list) == 0: - self.status = "violate availability zone constraint for node = " + _n.node.name + self.status = "violate availability zone constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done availability_zone constraint") + self.logger.debug("ConstraintSolver: done availability_" + "zone constraint") - ''' host aggregate constraint ''' + """Host aggregate constraint.""" if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): if len(_n.node.extra_specs_list) > 0: self._constrain_host_aggregates(_level, _n, candidate_list) if len(candidate_list) == 0: - self.status = "violate host aggregate constraint for node = " + _n.node.name + self.status = "violate host aggregate constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done host_aggregate constraint") + self.logger.debug("ConstraintSolver: done host_aggregate " + "constraint") - ''' cpu capacity constraint ''' + """CPU capacity constraint.""" if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): self._constrain_cpu_capacity(_level, _n, candidate_list) if len(candidate_list) == 0: - self.status = "violate cpu capacity constraint for node = " + _n.node.name + self.status = "violate cpu capacity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done cpu capacity constraint") + self.logger.debug("ConstraintSolver: done cpu capacity " + "constraint") - ''' memory capacity constraint ''' + """Memory capacity constraint.""" if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): self._constrain_mem_capacity(_level, _n, candidate_list) if len(candidate_list) == 0: - self.status = "violate memory capacity constraint for node = " + _n.node.name + self.status = "violate memory capacity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done memory capacity constraint") + self.logger.debug("ConstraintSolver: done memory capacity " + "constraint") - ''' local disk capacity constraint ''' + """Local disk capacity constraint.""" if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): self._constrain_local_disk_capacity(_level, _n, candidate_list) if len(candidate_list) == 0: - self.status = "violate local disk capacity constraint for node = " + _n.node.name + self.status = "violate local disk capacity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done local disk capacity constraint") + self.logger.debug("ConstraintSolver: done local disk capacity " + "constraint") - ''' network bandwidth constraint ''' - self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements, candidate_list) + """Network bandwidth constraint.""" + self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements, + candidate_list) if len(candidate_list) == 0: - self.status = "violate nw bandwidth capacity constraint for node = " + _n.node.name + self.status = "violate nw bandwidth capacity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done bandwidth capacity constraint") + self.logger.debug("ConstraintSolver: done bandwidth capacity " + "constraint") - ''' diversity constraint ''' + """Diversity constraint.""" if len(_n.node.diversity_groups) > 0: for _, diversity_id in _n.node.diversity_groups.iteritems(): if diversity_id.split(":")[0] == _level: if diversity_id in _avail_logical_groups.keys(): - self._constrain_diversity_with_others(_level, diversity_id, candidate_list) + self._constrain_diversity_with_others(_level, + diversity_id, + candidate_list) if len(candidate_list) == 0: break if len(candidate_list) == 0: - self.status = "violate diversity constraint for node = " + _n.node.name + self.status = "violate diversity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self._constrain_diversity(_level, _n, _node_placements, candidate_list) + self._constrain_diversity(_level, _n, _node_placements, + candidate_list) if len(candidate_list) == 0: - self.status = "violate diversity constraint for node = " + _n.node.name + self.status = "violate diversity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done diversity_group constraint") + self.logger.debug("ConstraintSolver: done diversity_group " + "constraint") - ''' exclusivity constraint ''' - exclusivities = self.get_exclusivities(_n.node.exclusivity_groups, _level) + """Exclusivity constraint.""" + exclusivities = self.get_exclusivities(_n.node.exclusivity_groups, + _level) if len(exclusivities) > 1: - self.status = "violate exclusivity constraint (more than one exclusivity) for node = " + _n.node.name + self.status = "violate exclusivity constraint (more than one " \ + "exclusivity) for node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return [] else: if len(exclusivities) == 1: exclusivity_id = exclusivities[exclusivities.keys()[0]] if exclusivity_id.split(":")[0] == _level: - self._constrain_exclusivity(_level, exclusivity_id, candidate_list) + self._constrain_exclusivity(_level, exclusivity_id, + candidate_list) if len(candidate_list) == 0: - self.status = "violate exclusivity constraint for node = " + _n.node.name + self.status = "violate exclusivity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done exclusivity_group constraint") + self.logger.debug("ConstraintSolver: done exclusivity " + "group constraint") else: self._constrain_non_exclusivity(_level, candidate_list) if len(candidate_list) == 0: - self.status = "violate non-exclusivity constraint for node = " + _n.node.name + self.status = "violate non-exclusivity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done non-exclusivity_group constraint") + self.logger.debug("ConstraintSolver: done non-exclusivity_" + "group constraint") - ''' affinity constraint ''' + """Affinity constraint.""" affinity_id = _n.get_affinity_id() # level:name, except name == "any" if affinity_id is not None: if affinity_id.split(":")[0] == _level: if affinity_id in _avail_logical_groups.keys(): - self._constrain_affinity(_level, affinity_id, candidate_list) + self._constrain_affinity(_level, affinity_id, + candidate_list) if len(candidate_list) == 0: - self.status = "violate affinity constraint for node = " + _n.node.name + self.status = "violate affinity constraint for " \ + "node = " + _n.node.name self.logger.error("ConstraintSolver: " + self.status) return candidate_list else: - self.logger.debug("ConstraintSolver: done affinity_group constraint") + self.logger.debug("ConstraintSolver: done affinity_" + "group constraint") return candidate_list - ''' - constraint modules - ''' + """ + Constraint modules. + """ def _constrain_affinity(self, _level, _affinity_id, _candidate_list): conflict_list = [] @@ -191,11 +233,14 @@ class ConstraintSolver(object): conflict_list.append(r) debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: not exist affinity in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: not exist affinity " + "in resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] - def _constrain_diversity_with_others(self, _level, _diversity_id, _candidate_list): + def _constrain_diversity_with_others(self, _level, _diversity_id, + _candidate_list): conflict_list = [] for r in _candidate_list: @@ -204,11 +249,17 @@ class ConstraintSolver(object): conflict_list.append(r) debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: conflict diversity in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: conflict diversity " + "in resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def exist_group(self, _level, _id, _group_type, _candidate): + """Check if group esists.""" + """Return True if there exists a group within the candidate's + membership list that matches the provided id and group type. + """ match = False memberships = _candidate.get_memberships(_level) @@ -219,7 +270,8 @@ class ConstraintSolver(object): return match - def _constrain_diversity(self, _level, _n, _node_placements, _candidate_list): + def _constrain_diversity(self, _level, _n, _node_placements, + _candidate_list): conflict_list = [] for r in _candidate_list: @@ -228,29 +280,40 @@ class ConstraintSolver(object): conflict_list.append(r) resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: conflict the diversity in resource = " + resource_name) + self.logger.debug("ConstraintSolver: conflict the " + "diversity in resource = " + + resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def conflict_diversity(self, _level, _n, _node_placements, _candidate): + """Return True if the candidate has a placement conflict.""" conflict = False for v in _node_placements.keys(): diversity_level = _n.get_common_diversity(v.diversity_groups) - if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level): + if diversity_level != "ANY" and \ + LEVELS.index(diversity_level) >= \ + LEVELS.index(_level): if diversity_level == "host": - if _candidate.cluster_name == _node_placements[v].cluster_name and \ - _candidate.rack_name == _node_placements[v].rack_name and \ - _candidate.host_name == _node_placements[v].host_name: + if _candidate.cluster_name == \ + _node_placements[v].cluster_name and \ + _candidate.rack_name == \ + _node_placements[v].rack_name and \ + _candidate.host_name == \ + _node_placements[v].host_name: conflict = True break elif diversity_level == "rack": - if _candidate.cluster_name == _node_placements[v].cluster_name and \ + if _candidate.cluster_name == \ + _node_placements[v].cluster_name and \ _candidate.rack_name == _node_placements[v].rack_name: conflict = True break elif diversity_level == "cluster": - if _candidate.cluster_name == _node_placements[v].cluster_name: + if _candidate.cluster_name == \ + _node_placements[v].cluster_name: conflict = True break @@ -265,21 +328,31 @@ class ConstraintSolver(object): conflict_list.append(r) debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: exclusivity defined in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: exclusivity defined " + "in resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def conflict_exclusivity(self, _level, _candidate): + """Check for an exculsivity conflict.""" + """Check if the candidate contains an exclusivity group within its + list of memberships.""" conflict = False memberships = _candidate.get_memberships(_level) for mk in memberships.keys(): - if memberships[mk].group_type == "EX" and mk.split(":")[0] == _level: + if memberships[mk].group_type == "EX" and \ + mk.split(":")[0] == _level: conflict = True return conflict def get_exclusivities(self, _exclusivity_groups, _level): + """Return a list of filtered exclusivities.""" + """Extract and return only those exclusivities that exist at the + specified level. + """ exclusivities = {} for exk, level in _exclusivity_groups.iteritems(): @@ -289,15 +362,20 @@ class ConstraintSolver(object): return exclusivities def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list): - candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id, _candidate_list) + candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id, + _candidate_list) if len(candidate_list) == 0: - candidate_list = self._get_hibernated_candidates(_level, _candidate_list) - _candidate_list[:] = [x for x in _candidate_list if x in candidate_list] + candidate_list = self._get_hibernated_candidates(_level, + _candidate_list) + _candidate_list[:] = [x for x in _candidate_list + if x in candidate_list] else: - _candidate_list[:] = [x for x in _candidate_list if x in candidate_list] + _candidate_list[:] = [x for x in _candidate_list + if x in candidate_list] - def _get_exclusive_candidates(self, _level, _exclusivity_id, _candidate_list): + def _get_exclusive_candidates(self, _level, _exclusivity_id, + _candidate_list): candidate_list = [] for r in _candidate_list: @@ -306,7 +384,8 @@ class ConstraintSolver(object): candidate_list.append(r) else: debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: exclusivity not exist in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: exclusivity not exist in " + "resource = " + debug_resource_name) return candidate_list @@ -319,11 +398,16 @@ class ConstraintSolver(object): candidate_list.append(r) else: debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: exclusivity not allowed in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: exclusivity not allowed " + "in resource = " + debug_resource_name) return candidate_list def check_hibernated(self, _level, _candidate): + """Check if the candidate is hibernated.""" + """Return True if the candidate has no placed VMs at the specified + level. + """ match = False num_of_placed_vms = _candidate.get_num_of_placed_vms(_level) @@ -341,11 +425,14 @@ class ConstraintSolver(object): conflict_list.append(r) debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: not meet aggregate in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: not meet aggregate " + "in resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def check_host_aggregates(self, _level, _candidate, _v): + """Check if the candidate passes the aggregate instance extra specs zone filter.""" return self.openstack_AIES.host_passes(_level, _candidate, _v) def _constrain_availability_zone(self, _level, _n, _candidate_list): @@ -357,11 +444,14 @@ class ConstraintSolver(object): conflict_list.append(r) debug_resource_name = r.get_resource_name(_level) - self.logger.debug("ConstraintSolver: not meet az in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: not meet az in " + "resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def check_availability_zone(self, _level, _candidate, _v): + """Check if the candidate passes the availability zone filter.""" return self.openstack_AZ.host_passes(_level, _candidate, _v) def _constrain_cpu_capacity(self, _level, _n, _candidate_list): @@ -372,11 +462,14 @@ class ConstraintSolver(object): conflict_list.append(ch) debug_resource_name = ch.get_resource_name(_level) - self.logger.debug("ConstraintSolver: lack of cpu in " + debug_resource_name) + self.logger.debug("ConstraintSolver: lack of cpu in " + + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def check_cpu_capacity(self, _level, _v, _candidate): + """Check if the candidate passes the core filter.""" return self.openstack_C.host_passes(_level, _candidate, _v) def _constrain_mem_capacity(self, _level, _n, _candidate_list): @@ -387,11 +480,14 @@ class ConstraintSolver(object): conflict_list.append(ch) debug_resource_name = ch.get_resource_name(_level) - self.logger.debug("ConstraintSolver: lack of mem in " + debug_resource_name) + self.logger.debug("ConstraintSolver: lack of mem in " + + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def check_mem_capacity(self, _level, _v, _candidate): + """Check if the candidate passes the RAM filter.""" return self.openstack_R.host_passes(_level, _candidate, _v) def _constrain_local_disk_capacity(self, _level, _n, _candidate_list): @@ -402,11 +498,14 @@ class ConstraintSolver(object): conflict_list.append(ch) debug_resource_name = ch.get_resource_name(_level) - self.logger.debug("ConstraintSolver: lack of local disk in " + debug_resource_name) + self.logger.debug("ConstraintSolver: lack of local disk in " + + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def check_local_disk_capacity(self, _level, _v, _candidate): + """Check if the candidate passes the disk filter.""" return self.openstack_D.host_passes(_level, _candidate, _v) def _constrain_storage_capacity(self, _level, _n, _candidate_list): @@ -434,11 +533,14 @@ class ConstraintSolver(object): if vc == "any" or s.storage_class == vc: avail_disks.append(s.storage_avail_disk) - self.logger.debug("ConstraintSolver: storage constrained in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: storage constrained in" + "resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] def check_storage_availability(self, _level, _v, _ch): + """Return True if there is sufficient storage availability.""" available = False volume_sizes = [] @@ -462,21 +564,28 @@ class ConstraintSolver(object): return available - def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements, _candidate_list): + def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements, + _candidate_list): conflict_list = [] for cr in _candidate_list: - if self.check_nw_bandwidth_availability(_level, _n, _node_placements, cr) is False: + if self.check_nw_bandwidth_availability( + _level, _n, _node_placements, cr) is False: if cr not in conflict_list: conflict_list.append(cr) debug_resource_name = cr.get_resource_name(_level) - self.logger.debug("ConstraintSolver: bw constrained in resource = " + debug_resource_name) + self.logger.debug("ConstraintSolver: bw constrained in " + "resource = " + debug_resource_name) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in conflict_list] - def check_nw_bandwidth_availability(self, _level, _n, _node_placements, _cr): - # NOTE: 3rd entry for special node requiring bandwidth of out-going from spine switch + def check_nw_bandwidth_availability(self, _level, _n, _node_placements, + _cr): + """Return True if there is sufficient network availability.""" + # NOTE: 3rd entry for special node requiring bandwidth of out-going + # from spine switch total_req_bandwidths = [0, 0, 0] link_list = _n.get_all_links() @@ -486,26 +595,35 @@ class ConstraintSolver(object): placement_level = None if vl.node in _node_placements.keys(): # vl.node is VM or Volume - placement_level = _node_placements[vl.node].get_common_placement(_cr) + placement_level = \ + _node_placements[vl.node].get_common_placement(_cr) else: # in the open list - placement_level = _n.get_common_diversity(vl.node.diversity_groups) + placement_level = \ + _n.get_common_diversity(vl.node.diversity_groups) if placement_level == "ANY": - implicit_diversity = self.get_implicit_diversity(_n.node, link_list, vl.node, _level) + implicit_diversity = self.get_implicit_diversity(_n.node, + link_list, + vl.node, + _level) if implicit_diversity[0] is not None: placement_level = implicit_diversity[1] - self.get_req_bandwidths(_level, placement_level, bandwidth, total_req_bandwidths) + self.get_req_bandwidths(_level, placement_level, bandwidth, + total_req_bandwidths) - return self._check_nw_bandwidth_availability(_level, total_req_bandwidths, _cr) + return self._check_nw_bandwidth_availability(_level, + total_req_bandwidths, _cr) # to find any implicit diversity relation caused by the other links of _v # (i.e., intersection between _v and _target_v) def get_implicit_diversity(self, _v, _link_list, _target_v, _level): + """Get the maximum implicit diversity between _v and _target_v.""" max_implicit_diversity = (None, 0) for vl in _link_list: diversity_level = _v.get_common_diversity(vl.node.diversity_groups) - if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level): + if diversity_level != "ANY" \ + and LEVELS.index(diversity_level) >= LEVELS.index(_level): for dk, dl in vl.node.diversity_groups.iteritems(): if LEVELS.index(dl) > LEVELS.index(diversity_level): if _target_v.uuid != vl.node.uuid: @@ -515,7 +633,9 @@ class ConstraintSolver(object): return max_implicit_diversity - def get_req_bandwidths(self, _level, _placement_level, _bandwidth, _total_req_bandwidths): + def get_req_bandwidths(self, _level, _placement_level, _bandwidth, + _total_req_bandwidths): + """Calculate and update total required bandwidths.""" if _level == "cluster" or _level == "rack": if _placement_level == "cluster" or _placement_level == "rack": _total_req_bandwidths[1] += _bandwidth @@ -526,7 +646,8 @@ class ConstraintSolver(object): elif _placement_level == "host": _total_req_bandwidths[0] += _bandwidth - def _check_nw_bandwidth_availability(self, _level, _req_bandwidths, _candidate_resource): + def _check_nw_bandwidth_availability(self, _level, _req_bandwidths, + _candidate_resource): available = True if _level == "cluster": @@ -557,7 +678,8 @@ class ConstraintSolver(object): for _, sr in _candidate_resource.rack_avail_switches.iteritems(): rack_avail_bandwidths.append(max(sr.avail_bandwidths)) - avail_bandwidth = min(max(host_avail_bandwidths), max(rack_avail_bandwidths)) + avail_bandwidth = min(max(host_avail_bandwidths), + max(rack_avail_bandwidths)) if avail_bandwidth < _req_bandwidths[1]: available = False diff --git a/valet/engine/optimizer/ostro/openstack_filters.py b/valet/engine/optimizer/ostro/openstack_filters.py index 18c504a..5b87546 100755 --- a/valet/engine/optimizer/ostro/openstack_filters.py +++ b/valet/engine/optimizer/ostro/openstack_filters.py @@ -1,22 +1,24 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""AggregateInstanceExtraSpecsFilter.""" + import six from valet.engine.optimizer.app_manager.app_topology_base import VM -import valet.engine.optimizer.ostro.openstack_utils +from valet.engine.optimizer.ostro import openstack_utils _SCOPE = 'aggregate_instance_extra_specs' @@ -28,14 +30,13 @@ class AggregateInstanceExtraSpecsFilter(object): run_filter_once_per_request = True def __init__(self, _logger): + """Initialization.""" self.logger = _logger def host_passes(self, _level, _host, _v): - """Return a list of hosts that can create instance_type - - Check that the extra specs associated with the instance type match - the metadata provided by aggregates. If not present return False. - """ + """Return a list of hosts that can create instance_type.""" + """Check that the extra specs associated with the instance type match + the metadata provided by aggregates. If not present return False.""" # If 'extra_specs' is not present or extra_specs are empty then we # need not proceed further @@ -47,12 +48,14 @@ class AggregateInstanceExtraSpecsFilter(object): if len(extra_specs_list) == 0: return True - metadatas = openstack_utils.aggregate_metadata_get_by_host(_level, _host) + metadatas = openstack_utils.aggregate_metadata_get_by_host(_level, + _host) matched_logical_group_list = [] for extra_specs in extra_specs_list: for lgk, metadata in metadatas.iteritems(): - if self._match_metadata(_host.get_resource_name(_level), lgk, extra_specs, metadata) is True: + if self._match_metadata(_host.get_resource_name(_level), lgk, + extra_specs, metadata) is True: matched_logical_group_list.append(lgk) break else: @@ -64,7 +67,8 @@ class AggregateInstanceExtraSpecsFilter(object): break else: host_aggregate_extra_specs = {} - host_aggregate_extra_specs["host_aggregates"] = matched_logical_group_list + host_aggregate_extra_specs["host_aggregates"] = \ + matched_logical_group_list _v.extra_specs_list.append(host_aggregate_extra_specs) return True @@ -85,13 +89,17 @@ class AggregateInstanceExtraSpecsFilter(object): aggregate_vals = _metadata.get(key, None) if not aggregate_vals: - self.logger.debug("key (" + key + ") not exists in logical_group (" + _lg_name + ") " + " of host (" + _h_name + ")") + self.logger.debug("key (" + key + ") not exists in logical_" + "group (" + _lg_name + ") " + + " of host (" + _h_name + ")") return False for aggregate_val in aggregate_vals: if openstack_utils.match(aggregate_val, req): break else: - self.logger.debug("key (" + key + ")'s value (" + req + ") not exists in logical_group " + "(" + _lg_name + ") " + " of host (" + _h_name + ")") + self.logger.debug("key (" + key + ")'s value (" + req + ") not " + "exists in logical_group " + "(" + _lg_name + + ") " + " of host (" + _h_name + ")") return False return True @@ -99,9 +107,9 @@ class AggregateInstanceExtraSpecsFilter(object): # NOTE: originally, OpenStack used the metadata of host_aggregate class AvailabilityZoneFilter(object): - """ Filters Hosts by availability zone. + """AvailabilityZoneFilter filters Hosts by availability zone.""" - Works with aggregate metadata availability zones, using the key + """Work with aggregate metadata availability zones, using the key 'availability_zone' Note: in theory a compute node can be part of multiple availability_zones """ @@ -110,9 +118,11 @@ class AvailabilityZoneFilter(object): run_filter_once_per_request = True def __init__(self, _logger): + """Initialization.""" self.logger = _logger def host_passes(self, _level, _host, _v): + """Return True if all availalibility zones in _v exist in the host.""" az_request_list = [] if isinstance(_v, VM): az_request_list.append(_v.availability_zone) @@ -123,43 +133,54 @@ class AvailabilityZoneFilter(object): if len(az_request_list) == 0: return True - availability_zone_list = openstack_utils.availability_zone_get_by_host(_level, _host) + availability_zone_list = \ + openstack_utils.availability_zone_get_by_host(_level, _host) for azr in az_request_list: if azr not in availability_zone_list: - self.logger.debug("AZ (" + azr + ") not exists in host " + "(" + _host.get_resource_name(_level) + ")") + self.logger.debug("AZ (" + azr + ") not exists in host " + "(" + + _host.get_resource_name(_level) + ")") return False return True class RamFilter(object): + """RamFilter.""" def __init__(self, _logger): + """Initialization.""" self.logger = _logger def host_passes(self, _level, _host, _v): - """Only return hosts with sufficient available RAM.""" + """Return True if host has sufficient available RAM.""" requested_ram = _v.mem # MB (total_ram, usable_ram) = _host.get_mem(_level) - # Do not allow an instance to overcommit against itself, only against other instances. + # Do not allow an instance to overcommit against itself, only against + # other instances. if not total_ram >= requested_ram: - self.logger.debug("requested mem (" + str(requested_ram) + ") more than total mem (" + - str(total_ram) + ") in host (" + _host.get_resource_name(_level) + ")") + self.logger.debug("requested mem (" + str(requested_ram) + + ") more than total mem (" + + str(total_ram) + ") in host (" + + _host.get_resource_name(_level) + ")") return False if not usable_ram >= requested_ram: - self.logger.debug("requested mem (" + str(requested_ram) + ") more than avail mem (" + - str(usable_ram) + ") in host (" + _host.get_resource_name(_level) + ")") + self.logger.debug("requested mem (" + str(requested_ram) + + ") more than avail mem (" + + str(usable_ram) + ") in host (" + + _host.get_resource_name(_level) + ")") return False return True class CoreFilter(object): + """CoreFilter.""" def __init__(self, _logger): + """Initialization.""" self.logger = _logger def host_passes(self, _level, _host, _v): @@ -168,33 +189,42 @@ class CoreFilter(object): instance_vCPUs = _v.vCPUs - # Do not allow an instance to overcommit against itself, only against other instances. + # Do not allow an instance to overcommit against itself, only against + # other instances. if instance_vCPUs > vCPUs: - self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than total vCPUs (" + - str(vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")") + self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + + ") more than total vCPUs (" + + str(vCPUs) + ") in host (" + + _host.get_resource_name(_level) + ")") return False if avail_vCPUs < instance_vCPUs: - self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than avail vCPUs (" + - str(avail_vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")") + self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + + ") more than avail vCPUs (" + + str(avail_vCPUs) + ") in host (" + + _host.get_resource_name(_level) + ")") return False return True class DiskFilter(object): + """DiskFilter.""" def __init__(self, _logger): + """Initialization.""" self.logger = _logger def host_passes(self, _level, _host, _v): - """Filter based on disk usage.""" + """Return True if the requested disk is less than the available disk.""" requested_disk = _v.local_volume_size (_, usable_disk) = _host.get_local_disk(_level) if not usable_disk >= requested_disk: - self.logger.debug("requested disk (" + str(requested_disk) + ") more than avail disk (" + - str(usable_disk) + ") in host (" + _host.get_resource_name(_level) + ")") + self.logger.debug("requested disk (" + str(requested_disk) + + ") more than avail disk (" + + str(usable_disk) + ") in host (" + + _host.get_resource_name(_level) + ")") return False return True diff --git a/valet/engine/optimizer/ostro/openstack_utils.py b/valet/engine/optimizer/ostro/openstack_utils.py index f3743dd..a7bd899 100755 --- a/valet/engine/optimizer/ostro/openstack_utils.py +++ b/valet/engine/optimizer/ostro/openstack_utils.py @@ -1,27 +1,29 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Openstack utlity functions.""" + import collections import operator # 1. The following operations are supported: -# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= +# =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, -# it is ignored. +# it is ignored. op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: all(val in x for val in y), @@ -38,6 +40,7 @@ op_methods = {'=': lambda x, y: float(x) >= float(y), def match(value, req): + """Return True if value matches request.""" words = req.split() op = method = None @@ -70,7 +73,10 @@ def match(value, req): def aggregate_metadata_get_by_host(_level, _host, _key=None): - """Returns a dict of all metadata based on a metadata key for a specific host. If the key is not provided, returns a dict of all metadata.""" + """Return a dict of metadata for a specific host.""" + """Base dict on a metadata key. If the key is not provided, + return a dict of all metadata. + """ metadatas = {} @@ -90,6 +96,7 @@ def aggregate_metadata_get_by_host(_level, _host, _key=None): # NOTE: this function not exist in OpenStack def availability_zone_get_by_host(_level, _host): + """Return a list of availability zones for a specific host.""" availability_zone_list = [] logical_groups = _host.get_memberships(_level) diff --git a/valet/engine/optimizer/ostro/optimizer.py b/valet/engine/optimizer/ostro/optimizer.py index 2e10bb9..462fa91 100755 --- a/valet/engine/optimizer/ostro/optimizer.py +++ b/valet/engine/optimizer/ostro/optimizer.py @@ -1,27 +1,32 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Optimizer.""" + import time -from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume +from valet.engine.optimizer.app_manager.app_topology_base \ + import VGroup, VM, Volume from valet.engine.optimizer.ostro.search import Search class Optimizer(object): + """Optimizer.""" def __init__(self, _resource, _logger): + """Initialization.""" self.resource = _resource self.logger = _logger @@ -30,6 +35,8 @@ class Optimizer(object): self.status = "success" def place(self, _app_topology): + """Perform a replan, migration, or create operation.""" + """Return a placement map for VMs, Volumes, and VGroups.""" success = False uuid_map = None @@ -59,7 +66,8 @@ class Optimizer(object): uuid_map = self._delete_old_vms(_app_topology.old_vm_map) self.resource.update_topology(store=False) - self.logger.debug("Optimizer: remove old placements for replan") + self.logger.debug("Optimizer: remove old placements for " + "replan") else: success = self.search.place_nodes(_app_topology, self.resource) @@ -67,26 +75,35 @@ class Optimizer(object): if success is True: - self.logger.debug("Optimizer: search running time = " + str(end_ts - start_ts) + " sec") - self.logger.debug("Optimizer: total bandwidth = " + str(self.search.bandwidth_usage)) - self.logger.debug("Optimizer: total number of hosts = " + str(self.search.num_of_hosts)) + self.logger.debug("Optimizer: search running time = " + + str(end_ts - start_ts) + " sec") + self.logger.debug("Optimizer: total bandwidth = " + + str(self.search.bandwidth_usage)) + self.logger.debug("Optimizer: total number of hosts = " + + str(self.search.num_of_hosts)) placement_map = {} for v in self.search.node_placements.keys(): if isinstance(v, VM): placement_map[v] = self.search.node_placements[v].host_name elif isinstance(v, Volume): - placement_map[v] = self.search.node_placements[v].host_name + "@" - placement_map[v] += self.search.node_placements[v].storage.storage_name + placement_map[v] = \ + self.search.node_placements[v].host_name + "@" + placement_map[v] += \ + self.search.node_placements[v].storage.storage_name elif isinstance(v, VGroup): if v.level == "host": - placement_map[v] = self.search.node_placements[v].host_name + placement_map[v] = \ + self.search.node_placements[v].host_name elif v.level == "rack": - placement_map[v] = self.search.node_placements[v].rack_name + placement_map[v] = \ + self.search.node_placements[v].rack_name elif v.level == "cluster": - placement_map[v] = self.search.node_placements[v].cluster_name + placement_map[v] = \ + self.search.node_placements[v].cluster_name - self.logger.debug(" " + v.name + " placed in " + placement_map[v]) + self.logger.debug(" " + v.name + " placed in " + + placement_map[v]) self._update_resource_status(uuid_map) @@ -104,7 +121,8 @@ class Optimizer(object): if uuid is not None: uuid_map[h_uuid] = uuid - self.resource.remove_vm_by_h_uuid_from_host(info[0], h_uuid, info[1], info[2], info[3]) + self.resource.remove_vm_by_h_uuid_from_host( + info[0], h_uuid, info[1], info[2], info[3]) self.resource.update_host_time(info[0]) host = self.resource.hosts[info[0]] @@ -123,58 +141,75 @@ class Optimizer(object): self.resource.add_vm_to_host(np.host_name, (v.uuid, v.name, uuid), - v.vCPUs, v.mem, v.local_volume_size) + v.vCPUs, v.mem, + v.local_volume_size) for vl in v.vm_list: tnp = self.search.node_placements[vl.node] placement_level = np.get_common_placement(tnp) - self.resource.deduct_bandwidth(np.host_name, placement_level, vl.nw_bandwidth) + self.resource.deduct_bandwidth(np.host_name, + placement_level, + vl.nw_bandwidth) for voll in v.volume_list: tnp = self.search.node_placements[voll.node] placement_level = np.get_common_placement(tnp) - self.resource.deduct_bandwidth(np.host_name, placement_level, voll.io_bandwidth) + self.resource.deduct_bandwidth(np.host_name, + placement_level, + voll.io_bandwidth) - self._update_logical_grouping(v, self.search.avail_hosts[np.host_name], uuid) + self._update_logical_grouping( + v, self.search.avail_hosts[np.host_name], uuid) self.resource.update_host_time(np.host_name) elif isinstance(v, Volume): - self.resource.add_vol_to_host(np.host_name, np.storage.storage_name, v.name, v.volume_size) + self.resource.add_vol_to_host(np.host_name, + np.storage.storage_name, v.name, + v.volume_size) for vl in v.vm_list: tnp = self.search.node_placements[vl.node] placement_level = np.get_common_placement(tnp) - self.resource.deduct_bandwidth(np.host_name, placement_level, vl.io_bandwidth) + self.resource.deduct_bandwidth(np.host_name, + placement_level, + vl.io_bandwidth) self.resource.update_storage_time(np.storage.storage_name) def _update_logical_grouping(self, _v, _avail_host, _uuid): for lgk, lg in _avail_host.host_memberships.iteritems(): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": lg_name = lgk.split(":") if lg_name[0] == "host" and lg_name[1] != "any": - self.resource.add_logical_group(_avail_host.host_name, lgk, lg.group_type) + self.resource.add_logical_group(_avail_host.host_name, + lgk, lg.group_type) if _avail_host.rack_name != "any": for lgk, lg in _avail_host.rack_memberships.iteritems(): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": lg_name = lgk.split(":") if lg_name[0] == "rack" and lg_name[1] != "any": - self.resource.add_logical_group(_avail_host.rack_name, lgk, lg.group_type) + self.resource.add_logical_group(_avail_host.rack_name, + lgk, lg.group_type) if _avail_host.cluster_name != "any": for lgk, lg in _avail_host.cluster_memberships.iteritems(): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": lg_name = lgk.split(":") if lg_name[0] == "cluster" and lg_name[1] != "any": - self.resource.add_logical_group(_avail_host.cluster_name, lgk, lg.group_type) + self.resource.add_logical_group( + _avail_host.cluster_name, lgk, lg.group_type) vm_logical_groups = [] self._collect_logical_groups_of_vm(_v, vm_logical_groups) host = self.resource.hosts[_avail_host.host_name] - self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid), vm_logical_groups) + self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid), + vm_logical_groups) def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups): if isinstance(_v, VM): diff --git a/valet/engine/optimizer/ostro/ostro.py b/valet/engine/optimizer/ostro/ostro.py index dc6ce8c..6d563d8 100755 --- a/valet/engine/optimizer/ostro/ostro.py +++ b/valet/engine/optimizer/ostro/ostro.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Valet Engine.""" + from oslo_config import cfg import threading import time @@ -30,21 +32,25 @@ CONF = cfg.CONF class Ostro(object): + """Valet Engine.""" def __init__(self, _config, _logger): + """Initialization.""" self.config = _config self.logger = _logger self.db = MusicHandler(self.config, self.logger) if self.db.init_db() is False: - self.logger.error("Ostro.__init__: error while initializing MUSIC database") + self.logger.error("Ostro.__init__: error while initializing MUSIC " + "database") else: self.logger.debug("Ostro.__init__: done init music") self.resource = Resource(self.db, self.config, self.logger) self.logger.debug("done init resource") - self.app_handler = AppHandler(self.resource, self.db, self.config, self.logger) + self.app_handler = AppHandler(self.resource, self.db, self.config, + self.logger) self.logger.debug("done init apphandler") self.optimizer = Optimizer(self.resource, self.logger) @@ -53,10 +59,13 @@ class Ostro(object): self.data_lock = threading.Lock() self.thread_list = [] - self.topology = TopologyManager(1, "Topology", self.resource, self.data_lock, self.config, self.logger) + self.topology = TopologyManager(1, "Topology", self.resource, + self.data_lock, self.config, + self.logger) self.logger.debug("done init topology") - self.compute = ComputeManager(2, "Compute", self.resource, self.data_lock, self.config, self.logger) + self.compute = ComputeManager(2, "Compute", self.resource, + self.data_lock, self.config, self.logger) self.logger.debug("done init compute") self.listener = ListenerManager(3, "Listener", CONF) @@ -66,6 +75,10 @@ class Ostro(object): self.end_of_process = False def run_ostro(self): + """Start main engine process.""" + """Start topology, compute, and listener processes. Start process of + retrieving and handling events and requests from the db every 1 second. + """ self.logger.info("Ostro.run_ostro: start Ostro ......") self.topology.start() @@ -102,6 +115,10 @@ class Ostro(object): self.logger.info("Ostro.run_ostro: exit Ostro") def stop_ostro(self): + """Stop main engine process.""" + """Stop process of retrieving and handling events and requests from + the db. Stop topology and compute processes. + """ self.end_of_process = True while len(self.thread_list) > 0: @@ -111,10 +128,12 @@ class Ostro(object): self.thread_list.remove(t) def bootstrap(self): + """Start bootstrap and update the engine's resource topology.""" self.logger.info("Ostro.bootstrap: start bootstrap") try: - resource_status = self.db.get_resource_status(self.resource.datacenter.name) + resource_status = self.db.get_resource_status( + self.resource.datacenter.name) if resource_status is None: return False @@ -140,7 +159,8 @@ class Ostro(object): self.resource.update_topology() except Exception: - self.logger.critical("Ostro.bootstrap failed: " + traceback.format_exc()) + self.logger.critical("Ostro.bootstrap failed: " + + traceback.format_exc()) self.logger.info("Ostro.bootstrap: done bootstrap") @@ -173,6 +193,7 @@ class Ostro(object): return True def place_app(self, _app_data): + """Place results of query and placement requests in the db.""" self.data_lock.acquire() start_time = time.time() @@ -190,7 +211,8 @@ class Ostro(object): query_results = self._query(query_request_list) - result = self._get_json_results("query", "ok", self.status, query_results) + result = self._get_json_results("query", "ok", self.status, + query_results) if self.db.put_result(result) is False: self.data_lock.release() @@ -207,9 +229,11 @@ class Ostro(object): placement_map = self._place_app(placement_request_list) if placement_map is None: - result = self._get_json_results("placement", "error", self.status, placement_map) + result = self._get_json_results("placement", "error", + self.status, placement_map) else: - result = self._get_json_results("placement", "ok", "success", placement_map) + result = self._get_json_results("placement", "ok", "success", + placement_map) if self.db.put_result(result) is False: self.data_lock.release() @@ -219,7 +243,8 @@ class Ostro(object): end_time = time.time() - self.logger.info("Ostro.place_app: total decision delay of request = " + str(end_time - start_time) + " sec") + self.logger.info("Ostro.place_app: total decision delay of request = " + + str(end_time - start_time) + " sec") self.data_lock.release() return True @@ -233,7 +258,8 @@ class Ostro(object): if "parameters" in q.keys(): params = q["parameters"] if "group_name" in params.keys(): - vm_list = self._get_vms_from_logical_group(params["group_name"]) + vm_list = self._get_vms_from_logical_group( + params["group_name"]) query_results[q["stack_id"]] = vm_list else: self.status = "unknown paramenter in query" @@ -261,7 +287,8 @@ class Ostro(object): vm_id_list = [] for lgk, lg in self.resource.logical_groups.iteritems(): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": lg_id = lgk.split(":") if lg_id[1] == _group_name: vm_id_list = lg.vm_list @@ -282,14 +309,15 @@ class Ostro(object): return logical_groups def _place_app(self, _app_data): - ''' set application topology ''' + """Set application topology.""" app_topology = self.app_handler.add_app(_app_data) if app_topology is None: self.status = self.app_handler.status - self.logger.debug("Ostro._place_app: error while register requested apps: " + self.status) + self.logger.debug("Ostro._place_app: error while register " + "requested apps: " + self.status) return None - ''' check and set vm flavor information ''' + """Check and set vm flavor information.""" for _, vm in app_topology.vms.iteritems(): if self._set_vm_flavor_information(vm) is False: self.status = "fail to set flavor information" @@ -301,22 +329,25 @@ class Ostro(object): self.logger.error("Ostro._place_app: " + self.status) return None - ''' set weights for optimization ''' + """Set weights for optimization.""" app_topology.set_weight() app_topology.set_optimization_priority() - ''' perform search for optimal placement of app topology ''' + """Perform search for optimal placement of app topology.""" placement_map = self.optimizer.place(app_topology) if placement_map is None: self.status = self.optimizer.status - self.logger.debug("Ostro._place_app: error while optimizing app placement: " + self.status) + self.logger.debug("Ostro._place_app: error while optimizing app " + "placement: " + self.status) return None - ''' update resource and app information ''' + """Update resource and app information.""" if len(placement_map) > 0: self.resource.update_topology() - self.app_handler.add_placement(placement_map, self.resource.current_timestamp) - if len(app_topology.exclusion_list_map) > 0 and len(app_topology.planned_vm_map) > 0: + self.app_handler.add_placement(placement_map, + self.resource.current_timestamp) + if len(app_topology.exclusion_list_map) > 0 and \ + len(app_topology.planned_vm_map) > 0: for vk in app_topology.planned_vm_map.keys(): if vk in placement_map.keys(): del placement_map[vk] @@ -336,9 +367,10 @@ class Ostro(object): flavor = self.resource.get_flavor(_vm.flavor) if flavor is None: - self.logger.warn("Ostro._set_vm_flavor_properties: does not exist flavor (" + _vm.flavor + ") and try to refetch") + self.logger.warn("Ostro._set_vm_flavor_properties: does not exist " + "flavor (" + _vm.flavor + ") and try to refetch") - ''' reset flavor resource and try again ''' + """Reset flavor resource and try again.""" if self._set_flavors() is False: return False self.resource.update_topology() @@ -359,6 +391,10 @@ class Ostro(object): return True def handle_events(self, _event_list): + """Handle events in the event list.""" + """Update the engine's resource topology based on the properties of + each event in the event list. + """ self.data_lock.acquire() resource_updated = False @@ -366,101 +402,131 @@ class Ostro(object): for e in _event_list: if e.host is not None and e.host != "none": if self._check_host(e.host) is False: - self.logger.warn("Ostro.handle_events: host (" + e.host + ") related to this event not exists") + self.logger.warn("Ostro.handle_events: host (" + e.host + + ") related to this event not exists") continue - if e.method == "build_and_run_instance": # VM is created (from stack) + if e.method == "build_and_run_instance": + # VM is created (from stack) self.logger.debug("Ostro.handle_events: got build_and_run event") if self.db.put_uuid(e) is False: self.data_lock.release() return False elif e.method == "object_action": - if e.object_name == 'Instance': # VM became active or deleted + if e.object_name == 'Instance': + # VM became active or deleted orch_id = self.db.get_uuid(e.uuid) if orch_id is None: self.data_lock.release() return False if e.vm_state == "active": - self.logger.debug("Ostro.handle_events: got instance_active event") + self.logger.debug("Ostro.handle_events: got instance_" + "active event") vm_info = self.app_handler.get_vm_info(orch_id[1], orch_id[0], e.host) if vm_info is None: - self.logger.error("Ostro.handle_events: error while getting app info from MUSIC") + self.logger.error("Ostro.handle_events: error " + "while getting app info from MUSIC") self.data_lock.release() return False if len(vm_info) == 0: - ''' - h_uuid is None or "none" because vm is not created by stack - or, stack not found because vm is created by the other stack - ''' - self.logger.warn("Ostro.handle_events: no vm_info found in app placement record") - self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + """ + h_uuid is None or "none" because vm is not created + by stack or, stack not found because vm is created + by the other stack + """ + self.logger.warn("Ostro.handle_events: no vm_info " + "found in app placement record") + self._add_vm_to_host(e.uuid, orch_id[0], e.host, + e.vcpus, e.mem, e.local_disk) else: - if "planned_host" in vm_info.keys() and vm_info["planned_host"] != e.host: - ''' + if "planned_host" in vm_info.keys() and \ + vm_info["planned_host"] != e.host: + """ vm is activated in the different host - ''' - self.logger.warn("Ostro.handle_events: vm activated in the different host") - self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + """ + self.logger.warn("Ostro.handle_events: vm " + "activated in the different " + "host") + self._add_vm_to_host( + e.uuid, orch_id[0], e.host, e.vcpus, e.mem, + e.local_disk) - self._remove_vm_from_host(e.uuid, orch_id[0], - vm_info["planned_host"], - float(vm_info["cpus"]), - float(vm_info["mem"]), - float(vm_info["local_volume"])) + self._remove_vm_from_host( + e.uuid, orch_id[0], vm_info["planned_host"], + float(vm_info["cpus"]), + float(vm_info["mem"]), + float(vm_info["local_volume"])) - self._remove_vm_from_logical_groups(e.uuid, orch_id[0], vm_info["planned_host"]) + self._remove_vm_from_logical_groups( + e.uuid, orch_id[0], vm_info["planned_host"]) else: - ''' + """ found vm in the planned host, possibly the vm deleted in the host while batch cleanup - ''' - if self._check_h_uuid(orch_id[0], e.host) is False: - self.logger.debug("Ostro.handle_events: planned vm was deleted") + """ + if self._check_h_uuid(orch_id[0], e.host) \ + is False: + self.logger.debug("Ostro.handle_events: " + "planned vm was deleted") if self._check_uuid(e.uuid, e.host) is True: - self._update_h_uuid_in_host(orch_id[0], e.uuid, e.host) - self._update_h_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) + self._update_h_uuid_in_host(orch_id[0], + e.uuid, + e.host) + self._update_h_uuid_in_logical_groups( + orch_id[0], e.uuid, e.host) else: - self.logger.debug("Ostro.handle_events: vm activated as planned") - self._update_uuid_in_host(orch_id[0], e.uuid, e.host) - self._update_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) + self.logger.debug("Ostro.handle_events: vm " + "activated as planned") + self._update_uuid_in_host(orch_id[0], + e.uuid, e.host) + self._update_uuid_in_logical_groups( + orch_id[0], e.uuid, e.host) resource_updated = True elif e.vm_state == "deleted": - self.logger.debug("Ostro.handle_events: got instance_delete event") + self.logger.debug("Ostro.handle_events: got instance_" + "delete event") - self._remove_vm_from_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) - self._remove_vm_from_logical_groups(e.uuid, orch_id[0], e.host) + self._remove_vm_from_host(e.uuid, orch_id[0], e.host, + e.vcpus, e.mem, e.local_disk) + self._remove_vm_from_logical_groups(e.uuid, orch_id[0], + e.host) - if self.app_handler.update_vm_info(orch_id[1], orch_id[0]) is False: - self.logger.error("Ostro.handle_events: error while updating app in MUSIC") + if self.app_handler.update_vm_info(orch_id[1], + orch_id[0]) is False: + self.logger.error("Ostro.handle_events: error " + "while updating app in MUSIC") self.data_lock.release() return False resource_updated = True else: - self.logger.warn("Ostro.handle_events: unknown vm_state = " + e.vm_state) + self.logger.warn("Ostro.handle_events: unknown vm_" + "state = " + e.vm_state) - elif e.object_name == 'ComputeNode': # Host resource is updated + elif e.object_name == 'ComputeNode': + # Host resource is updated self.logger.debug("Ostro.handle_events: got compute event") # NOTE: what if host is disabled? - if self.resource.update_host_resources(e.host, e.status, - e.vcpus, e.vcpus_used, - e.mem, e.free_mem, - e.local_disk, e.free_local_disk, - e.disk_available_least) is True: + if self.resource.update_host_resources( + e.host, e.status, e.vcpus, e.vcpus_used, e.mem, + e.free_mem, e.local_disk, e.free_local_disk, + e.disk_available_least) is True: self.resource.update_host_time(e.host) resource_updated = True else: - self.logger.warn("Ostro.handle_events: unknown object_name = " + e.object_name) + self.logger.warn("Ostro.handle_events: unknown object_" + "name = " + e.object_name) else: - self.logger.warn("Ostro.handle_events: unknown event method = " + e.method) + self.logger.warn("Ostro.handle_events: unknown event " + "method = " + e.method) if resource_updated is True: self.resource.update_topology() @@ -480,23 +546,30 @@ class Ostro(object): return True - def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk): + def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, + _local_disk): vm_id = None if _h_uuid is None: vm_id = ("none", "none", _uuid) else: vm_id = (_h_uuid, "none", _uuid) - self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem, _local_disk) + self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem, + _local_disk) self.resource.update_host_time(_host_name) - def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk): + def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, + _local_disk): if self._check_h_uuid(_h_uuid, _host_name) is True: - self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid, _vcpus, _mem, _local_disk) + self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid, + _vcpus, _mem, + _local_disk) self.resource.update_host_time(_host_name) else: if self._check_uuid(_uuid, _host_name) is True: - self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid, _vcpus, _mem, _local_disk) + self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid, + _vcpus, _mem, + _local_disk) self.resource.update_host_time(_host_name) def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name): @@ -537,7 +610,8 @@ class Ostro(object): if host.update_uuid(_h_uuid, _uuid) is True: self.resource.update_host_time(_host_name) else: - self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid in host = " + host.name) + self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid " + "in host = " + host.name) def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name): host = self.resource.hosts[_host_name] @@ -554,7 +628,8 @@ class Ostro(object): self.resource.update_h_uuid_in_logical_groups(_h_uuid, _uuid, host) - def _get_json_results(self, _request_type, _status_type, _status_message, _map): + def _get_json_results(self, _request_type, _status_type, _status_message, + _map): result = {} if _request_type == "query": diff --git a/valet/engine/optimizer/ostro/search.py b/valet/engine/optimizer/ostro/search.py index e0b282f..7dd1952 100755 --- a/valet/engine/optimizer/ostro/search.py +++ b/valet/engine/optimizer/ostro/search.py @@ -1,53 +1,60 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Search.""" + import copy import operator -from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume, LEVELS +from valet.engine.optimizer.app_manager.app_topology_base \ + import VGroup, VM, Volume, LEVELS from valet.engine.optimizer.ostro.constraint_solver import ConstraintSolver from valet.engine.optimizer.ostro.search_base import compute_reservation -from valet.engine.optimizer.ostro.search_base import Node, Resource, LogicalGroupResource -from valet.engine.optimizer.ostro.search_base import SwitchResource, StorageResource +from valet.engine.optimizer.ostro.search_base \ + import Node, Resource, LogicalGroupResource +from valet.engine.optimizer.ostro.search_base \ + import SwitchResource, StorageResource from valet.engine.resource_manager.resource_base import Datacenter class Search(object): + """Search.""" def __init__(self, _logger): + """Initialization.""" self.logger = _logger - ''' search inputs ''' + """Search inputs.""" self.resource = None self.app_topology = None - ''' snapshot of current resource status ''' + """Snapshot of current resource status.""" self.avail_hosts = {} self.avail_logical_groups = {} self.avail_storage_hosts = {} self.avail_switches = {} - ''' search results ''' + """Search results.""" self.node_placements = {} self.bandwidth_usage = 0 self.num_of_hosts = 0 - ''' for replan ''' + """For replan.""" self.planned_placements = {} - ''' optimization criteria ''' + """Optimization criteria.""" self.nw_bandwidth_weight = -1 self.CPU_weight = -1 self.mem_weight = -1 @@ -77,6 +84,7 @@ class Search(object): self.disk_weight = -1 def copy_resource_status(self, _resource): + """Copy the resource status.""" self._init_placements() self.resource = _resource @@ -87,11 +95,14 @@ class Search(object): self._create_avail_hosts() def place_nodes(self, _app_topology, _resource): + """Place nodes.""" + """Copy the resource status and utilize the constraint solver + to place nodes based on the app topology.""" self._init_placements() self.app_topology = _app_topology - ''' ping request ''' + """Ping request.""" if self.app_topology.optimization_priority is None: return True @@ -114,11 +125,14 @@ class Search(object): self.app_topology.vgroups, init_level) - ''' start from 'rack' level ''' + """Start from 'rack' level.""" return self._run_greedy(open_node_list, level, self.avail_hosts) def re_place_nodes(self, _app_topology, _resource): + """Re-place nodes.""" + """Copy the resource status and utilize the constraint solver + to re-place nodes based on the app topology.""" self._init_placements() self.app_topology = _app_topology @@ -135,13 +149,14 @@ class Search(object): if len(self.app_topology.old_vm_map) > 0: self._adjust_resources() - self.logger.debug("Search: adjust resources by deducting prior placements") + self.logger.debug("Search: adjust resources by deducting prior " + "placements") self._compute_resource_weights() self.logger.debug("Search: first, place already-planned nodes") - ''' reconsider all vms to be migrated together ''' + """Reconsider all vms to be migrated together.""" if len(_app_topology.exclusion_list_map) > 0: self._set_no_migrated_list() @@ -170,7 +185,8 @@ class Search(object): migrated_vm_id = self.app_topology.candidate_list_map.keys()[0] if migrated_vm_id not in self.app_topology.vms.keys(): - vgroup = self._get_vgroup_of_vm(migrated_vm_id, self.app_topology.vgroups) + vgroup = self._get_vgroup_of_vm(migrated_vm_id, + self.app_topology.vgroups) if vgroup is not None: vm_list = [] self._get_child_vms(vgroup, vm_list, migrated_vm_id) @@ -178,7 +194,8 @@ class Search(object): if vk in self.app_topology.planned_vm_map.keys(): del self.app_topology.planned_vm_map[vk] else: - self.logger.error("Search: migrated " + migrated_vm_id + " is missing while replan") + self.logger.error("Search: migrated " + migrated_vm_id + + " is missing while replan") def _get_child_vms(self, _g, _vm_list, _e_vmk): for sgk, sg in _g.subvgroups.iteritems(): @@ -190,14 +207,16 @@ class Search(object): def _place_planned_nodes(self): init_level = LEVELS[len(LEVELS) - 1] - (planned_node_list, level) = self._open_planned_list(self.app_topology.vms, - self.app_topology.volumes, - self.app_topology.vgroups, - init_level) + (planned_node_list, level) = \ + self._open_planned_list(self.app_topology.vms, + self.app_topology.volumes, + self.app_topology.vgroups, + init_level) if len(planned_node_list) == 0: return True - return self._run_greedy_as_planned(planned_node_list, level, self.avail_hosts) + return self._run_greedy_as_planned(planned_node_list, level, + self.avail_hosts) def _open_planned_list(self, _vms, _volumes, _vgroups, _current_level): planned_node_list = [] @@ -224,7 +243,8 @@ class Search(object): vgroup.host = [] host_name = self._get_host_of_vgroup(hk, vgroup.level) if host_name is None: - self.logger.error("Search: host does not exist while replan with vgroup") + self.logger.error("Search: host does not exist while " + "replan with vgroup") else: # if len(vgroup.host) == 0: if host_name not in vgroup.host: @@ -237,10 +257,9 @@ class Search(object): if node is None: n = Node() n.node = vgroup - n.sort_base = self._set_virtual_capacity_based_sort(vgroup) + n.sort_base = \ + self._set_virtual_capacity_based_sort(vgroup) planned_node_list.append(n) - # else: - # self.logger.warn("Search: " + vmk + " is missing while replan") current_level_index = LEVELS.index(_current_level) next_level_index = current_level_index - 1 @@ -306,18 +325,21 @@ class Search(object): _node_list.sort(key=operator.attrgetter("sort_base"), reverse=True) self.logger.debug("Search: level = " + _level) for on in _node_list: - self.logger.debug(" node = {}, value = {}".format(on.node.name, on.sort_base)) + self.logger.debug(" node = {}, value = {}".format(on.node.name, + on.sort_base)) while len(_node_list) > 0: n = _node_list.pop(0) - self.logger.debug("Search: level = " + _level + ", placing node = " + n.node.name) + self.logger.debug("Search: level = " + _level + + ", placing node = " + n.node.name) - best_resource = self._get_best_resource_for_planned(n, _level, avail_resources) + best_resource = self._get_best_resource_for_planned(n, _level, + avail_resources) if best_resource is not None: debug_best_resource = best_resource.get_resource_name(_level) - # elif isinstance(n.node, Volume): - # debug_best_resource = best_resource.host_name + "@" + best_resource.storage.storage_name - self.logger.debug("Search: best resource = " + debug_best_resource + " for node = " + n.node.name) + self.logger.debug("Search: best resource = " + + debug_best_resource + " for node = " + + n.node.name) self._deduct_reservation(_level, best_resource, n) self._close_planned_placement(_level, best_resource, n.node) @@ -330,7 +352,8 @@ class Search(object): def _get_best_resource_for_planned(self, _n, _level, _avail_resources): best_resource = None - if _level == "host" and (isinstance(_n.node, VM) or isinstance(_n.node, Volume)): + if _level == "host" and (isinstance(_n.node, VM) or isinstance(_n.node, + Volume)): best_resource = copy.deepcopy(_avail_resources[_n.node.host[0]]) best_resource.level = "host" # storage set @@ -355,7 +378,9 @@ class Search(object): elif isinstance(_n.node, Volume): volumes[_n.node.uuid] = _n.node - (planned_node_list, level) = self._open_planned_list(vms, volumes, vgroups, _level) + (planned_node_list, level) = self._open_planned_list(vms, volumes, + vgroups, + _level) host_name = self._get_host_of_level(_n, _level) if host_name is None: @@ -374,7 +399,8 @@ class Search(object): if h.host_name == host_name: avail_hosts[hk] = h - if self._run_greedy_as_planned(planned_node_list, level, avail_hosts) is True: + if self._run_greedy_as_planned(planned_node_list, level, + avail_hosts) is True: best_resource = copy.deepcopy(_avail_resources[host_name]) best_resource.level = _level @@ -411,7 +437,8 @@ class Search(object): for hk, host in self.resource.hosts.iteritems(): if host.check_availability() is False: - self.logger.debug("Search: host (" + host.name + ") not available at this time") + self.logger.debug("Search: host (" + host.name + + ") not available at this time") continue r = Resource() @@ -454,7 +481,8 @@ class Search(object): for rsk in rack.storages.keys(): if rsk in self.avail_storage_hosts.keys(): - r.rack_avail_storages[rsk] = self.avail_storage_hosts[rsk] + r.rack_avail_storages[rsk] = \ + self.avail_storage_hosts[rsk] for rsk in rack.switches.keys(): if rsk in self.avail_switches.keys(): @@ -480,15 +508,18 @@ class Search(object): for mk in cluster.memberships.keys(): if mk in self.avail_logical_groups.keys(): - r.cluster_memberships[mk] = self.avail_logical_groups[mk] + r.cluster_memberships[mk] = \ + self.avail_logical_groups[mk] for csk in cluster.storages.keys(): if csk in self.avail_storage_hosts.keys(): - r.cluster_avail_storages[csk] = self.avail_storage_hosts[csk] + r.cluster_avail_storages[csk] = \ + self.avail_storage_hosts[csk] for csk in cluster.switches.keys(): if csk in self.avail_switches.keys(): - r.cluster_avail_switches[csk] = self.avail_switches[csk] + r.cluster_avail_switches[csk] = \ + self.avail_switches[csk] r.cluster_vCPUs = cluster.original_vCPUs r.cluster_avail_vCPUs = cluster.avail_vCPUs @@ -543,7 +574,8 @@ class Search(object): self.avail_logical_groups[lgk] = lgr def _adjust_resources(self): - for h_uuid, info in self.app_topology.old_vm_map.iteritems(): # info = (host, cpu, mem, disk) + for h_uuid, info in self.app_topology.old_vm_map.iteritems(): + # info = (host, cpu, mem, disk) if info[0] not in self.avail_hosts.keys(): continue @@ -565,7 +597,8 @@ class Search(object): rr.rack_avail_local_disk += info[3] for _, cr in self.avail_hosts.iteritems(): - if cr.cluster_name != "any" and cr.cluster_name == r.cluster_name: + if cr.cluster_name != "any" and \ + cr.cluster_name == r.cluster_name: cr.cluster_num_of_placed_vms -= 1 cr.cluster_avail_vCPUs += info[1] cr.cluster_avail_mem += info[2] @@ -582,11 +615,14 @@ class Search(object): lgr.num_of_placed_vms -= 1 if r.host_name in lgr.num_of_placed_vms_per_host.keys(): lgr.num_of_placed_vms_per_host[r.host_name] -= 1 - if lgr.group_type == "EX" or lgr.group_type == "AFF" or lgr.group_type == "DIV": + if lgr.group_type == "EX" or \ + lgr.group_type == "AFF" or \ + lgr.group_type == "DIV": if lgr.num_of_placed_vms_per_host[r.host_name] == 0: del lgr.num_of_placed_vms_per_host[r.host_name] del r.host_memberships[lgk] - if lgr.group_type == "EX" or lgr.group_type == "AFF" or lgr.group_type == "DIV": + if lgr.group_type == "EX" or lgr.group_type == "AFF" or \ + lgr.group_type == "DIV": if lgr.num_of_placed_vms == 0: del self.avail_logical_groups[lgk] @@ -596,17 +632,25 @@ class Search(object): if lgk not in self.resource.logical_groups.keys(): continue lg = self.resource.logical_groups[lgk] - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == "rack": if lg.exist_vm_by_h_uuid(h_uuid) is True: lgr = r.rack_memberships[lgk] lgr.num_of_placed_vms -= 1 - if r.rack_name in lgr.num_of_placed_vms_per_host.keys(): + if r.rack_name in \ + lgr.num_of_placed_vms_per_host.keys(): lgr.num_of_placed_vms_per_host[r.rack_name] -= 1 - if lgr.num_of_placed_vms_per_host[r.rack_name] == 0: - del lgr.num_of_placed_vms_per_host[r.rack_name] + if lgr.num_of_placed_vms_per_host[ + r.rack_name + ] == 0: + del lgr.num_of_placed_vms_per_host[ + r.rack_name + ] for _, rr in self.avail_hosts.iteritems(): - if rr.rack_name != "any" and rr.rack_name == r.rack_name: + if rr.rack_name != "any" and \ + rr.rack_name == \ + r.rack_name: del rr.rack_memberships[lgk] if lgr.num_of_placed_vms == 0: del self.avail_logical_groups[lgk] @@ -617,17 +661,27 @@ class Search(object): if lgk not in self.resource.logical_groups.keys(): continue lg = self.resource.logical_groups[lgk] - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == "cluster": if lg.exist_vm_by_h_uuid(h_uuid) is True: lgr = r.cluster_memberships[lgk] lgr.num_of_placed_vms -= 1 - if r.cluster_name in lgr.num_of_placed_vms_per_host.keys(): - lgr.num_of_placed_vms_per_host[r.cluster_name] -= 1 - if lgr.num_of_placed_vms_per_host[r.cluster_name] == 0: - del lgr.num_of_placed_vms_per_host[r.cluster_name] + if r.cluster_name in \ + lgr.num_of_placed_vms_per_host.keys(): + lgr.num_of_placed_vms_per_host[ + r.cluster_name + ] -= 1 + if lgr.num_of_placed_vms_per_host[ + r.cluster_name + ] == 0: + del lgr.num_of_placed_vms_per_host[ + r.cluster_name + ] for _, cr in self.avail_hosts.iteritems(): - if cr.cluster_name != "any" and cr.cluster_name == r.cluster_name: + if cr.cluster_name != "any" and \ + cr.cluster_name == \ + r.cluster_name: del cr.cluster_memberships[lgk] if lgr.num_of_placed_vms == 0: del self.avail_logical_groups[lgk] @@ -682,13 +736,15 @@ class Search(object): self.logger.debug("Search: placement priority weights") for (r, w) in self.app_topology.optimization_priority: if r == "bw": - self.logger.debug(" nw weight = " + str(self.nw_bandwidth_weight)) + self.logger.debug(" nw weight = " + + str(self.nw_bandwidth_weight)) elif r == "cpu": self.logger.debug(" cpu weight = " + str(self.CPU_weight)) elif r == "mem": self.logger.debug(" mem weight = " + str(self.mem_weight)) elif r == "lvol": - self.logger.debug(" local disk weight = " + str(self.local_disk_weight)) + self.logger.debug(" local disk weight = " + + str(self.local_disk_weight)) elif r == "vol": self.logger.debug(" disk weight = " + str(self.disk_weight)) @@ -756,13 +812,16 @@ class Search(object): _open_node_list.sort(key=operator.attrgetter("sort_base"), reverse=True) - self.logger.debug("Search: the order of open node list in level = " + _level) + self.logger.debug("Search: the order of open node list in level = " + + _level) for on in _open_node_list: - self.logger.debug(" node = {}, value = {}".format(on.node.name, on.sort_base)) + self.logger.debug(" node = {}, value = {}".format(on.node.name, + on.sort_base)) while len(_open_node_list) > 0: n = _open_node_list.pop(0) - self.logger.debug("Search: level = " + _level + ", node = " + n.node.name) + self.logger.debug("Search: level = " + _level + ", node = " + + n.node.name) best_resource = self._get_best_resource(n, _level, avail_resources) if best_resource is None: @@ -770,25 +829,27 @@ class Search(object): break debug_best_resource = best_resource.get_resource_name(_level) - # if isinstance(n.node, Volume): - # debug_best_resource = debug_best_resource + "@" + best_resource.storage.storage_name - self.logger.debug("Search: best resource = " + debug_best_resource + " for node = " + n.node.name) + self.logger.debug("Search: best resource = " + debug_best_resource + + " for node = " + n.node.name) if n.node not in self.planned_placements.keys(): - ''' for VM or Volume under host level only ''' + """for VM or Volume under host level only""" self._deduct_reservation(_level, best_resource, n) - ''' close all types of nodes under any level, but VM or Volume with above host level ''' + """close all types of nodes under any level, but VM or Volume + with above host level""" self._close_node_placement(_level, best_resource, n.node) else: - self.logger.debug("Search: node (" + n.node.name + ") is already deducted") + self.logger.debug("Search: node (" + n.node.name + + ") is already deducted") return success def _get_best_resource(self, _n, _level, _avail_resources): - ''' already planned vgroup ''' + """Already-planned vgroup.""" planned_host = None if _n.node in self.planned_placements.keys(): - self.logger.debug("Search: already determined node = " + _n.node.name) + self.logger.debug("Search: already determined node = " + + _n.node.name) copied_host = self.planned_placements[_n.node] if _level == "host": planned_host = _avail_resources[copied_host.host_name] @@ -799,10 +860,15 @@ class Search(object): else: if len(self.app_topology.candidate_list_map) > 0: - conflicted_vm_uuid = self.app_topology.candidate_list_map.keys()[0] - candidate_name_list = self.app_topology.candidate_list_map[conflicted_vm_uuid] - if (isinstance(_n.node, VM) and conflicted_vm_uuid == _n.node.uuid) or \ - (isinstance(_n.node, VGroup) and self._check_vm_grouping(_n.node, conflicted_vm_uuid) is True): + conflicted_vm_uuid = \ + self.app_topology.candidate_list_map.keys()[0] + candidate_name_list = \ + self.app_topology.candidate_list_map[conflicted_vm_uuid] + if (isinstance(_n.node, VM) and + conflicted_vm_uuid == _n.node.uuid) or \ + (isinstance(_n.node, VGroup) and + self._check_vm_grouping( + _n.node, conflicted_vm_uuid) is True): host_list = [] for hk in candidate_name_list: host_name = self._get_host_of_vgroup(hk, _level) @@ -810,17 +876,17 @@ class Search(object): if host_name not in host_list: host_list.append(host_name) else: - self.logger.warn("Search: cannot find candidate host while replanning") + self.logger.warn("Search: cannot find candidate " + "host while replanning") _n.node.host = host_list candidate_list = [] if planned_host is not None: candidate_list.append(planned_host) else: - candidate_list = self.constraint_solver.compute_candidate_list(_level, _n, - self.node_placements, - _avail_resources, - self.avail_logical_groups) + candidate_list = self.constraint_solver.compute_candidate_list( + _level, _n, self.node_placements, _avail_resources, + self.avail_logical_groups) if len(candidate_list) == 0: self.status = self.constraint_solver.status return None @@ -844,12 +910,14 @@ class Search(object): candidate_list.remove(c) if len(candidate_list) == 0: - self.status = "no available network bandwidth left, for node = " + _n.node.name + self.status = "no available network bandwidth left, for " \ + "node = " + _n.node.name self.logger.error("Search: " + self.status) return None candidate_list.sort(key=operator.attrgetter("sort_base")) - top_candidate_list = self._sort_highest_consolidation(_n, _level, candidate_list) + top_candidate_list = self._sort_highest_consolidation( + _n, _level, candidate_list) else: if target == "vol": if isinstance(_n.node, VGroup) or isinstance(_n.node, Volume): @@ -864,7 +932,8 @@ class Search(object): volume_class = vck self._set_disk_sort_base(_level, candidate_list, volume_class) - candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + candidate_list.sort(key=operator.attrgetter("sort_base"), + reverse=True) else: self._set_compute_sort_base(_level, candidate_list) candidate_list.sort(key=operator.attrgetter("sort_base")) @@ -873,10 +942,13 @@ class Search(object): self._set_compute_sort_base(_level, candidate_list) candidate_list.sort(key=operator.attrgetter("sort_base")) else: - self._set_disk_sort_base(_level, candidate_list, _n.node.volume_class) - candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + self._set_disk_sort_base(_level, candidate_list, + _n.node.volume_class) + candidate_list.sort(key=operator.attrgetter("sort_base"), + reverse=True) - top_candidate_list = self._sort_lowest_bandwidth_usage(_n, _level, candidate_list) + top_candidate_list = self._sort_lowest_bandwidth_usage( + _n, _level, candidate_list) if len(top_candidate_list) == 0: self.status = "no available network bandwidth left" @@ -884,7 +956,8 @@ class Search(object): return None best_resource = None - if _level == "host" and (isinstance(_n.node, VM) or isinstance(_n.node, Volume)): + if _level == "host" and (isinstance(_n.node, VM) or isinstance(_n.node, + Volume)): best_resource = copy.deepcopy(top_candidate_list[0]) best_resource.level = "host" if isinstance(_n.node, Volume): @@ -895,7 +968,8 @@ class Search(object): while len(top_candidate_list) > 0: cr = top_candidate_list.pop(0) - self.logger.debug("Search: try candidate = " + cr.get_resource_name(_level)) + self.logger.debug("Search: try candidate = " + + cr.get_resource_name(_level)) vms = {} volumes = {} @@ -917,7 +991,8 @@ class Search(object): elif isinstance(_n.node, Volume): volumes[_n.node.uuid] = _n.node - (open_node_list, level) = self._open_list(vms, volumes, vgroups, _level) + (open_node_list, level) = self._open_list(vms, volumes, + vgroups, _level) if open_node_list is None: break @@ -933,24 +1008,27 @@ class Search(object): if h.host_name == cr.host_name: avail_hosts[hk] = h - ''' recursive call ''' - if self._run_greedy(open_node_list, level, avail_hosts) is True: + """Recursive call.""" + if self._run_greedy(open_node_list, level, avail_hosts) \ + is True: best_resource = copy.deepcopy(cr) best_resource.level = _level break else: debug_candidate_name = cr.get_resource_name(_level) - self.logger.debug("Search: rollback of candidate resource = " + debug_candidate_name) + self.logger.debug("Search: rollback of candidate " + "resource = " + debug_candidate_name) if planned_host is None: - ''' recursively rollback deductions of all child VMs and Volumes of _n ''' + """Recursively rollback deductions of all child + VMs and Volumes of _n """ self._rollback_reservation(_n.node) - ''' recursively rollback closing ''' + """ recursively rollback closing """ self._rollback_node_placement(_n.node) else: break - ''' after explore top candidate list for _n ''' + """After explore top candidate list for _n.""" if best_resource is not None: break else: @@ -960,11 +1038,16 @@ class Search(object): break else: if target == "bw": - top_candidate_list = self._sort_highest_consolidation(_n, _level, candidate_list) + top_candidate_list = \ + self._sort_highest_consolidation(_n, _level, + candidate_list) else: - top_candidate_list = self._sort_lowest_bandwidth_usage(_n, _level, candidate_list) + top_candidate_list = \ + self._sort_lowest_bandwidth_usage( + _n, _level, candidate_list) if len(top_candidate_list) == 0: - self.status = "no available network bandwidth left" + self.status = "no available network " \ + "bandwidth left" self.logger.warn("Search: " + self.status) break @@ -974,7 +1057,8 @@ class Search(object): max_storage_size = 0 for sk in _resource.host_avail_storages.keys(): s = self.avail_storage_hosts[sk] - if _n.node.volume_class == "any" or s.storage_class == _n.node.volume_class: + if _n.node.volume_class == "any" or \ + s.storage_class == _n.node.volume_class: if s.storage_avail_disk > max_storage_size: max_storage_size = s.storage_avail_disk _resource.storage = s @@ -991,7 +1075,8 @@ class Search(object): rm_list.append(ch) else: break - _candidate_list[:] = [c for c in _candidate_list if c not in rm_list] + _candidate_list[:] = [c for c in _candidate_list + if c not in rm_list] constrained_list = [] for c in top_candidate_list: @@ -1042,8 +1127,10 @@ class Search(object): if _n.node.volume_sizes[vck] > max_size: max_size = _n.node.volume_sizes[vck] volume_class = vck - self._set_disk_sort_base(_level, top_candidate_list, volume_class) - top_candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + self._set_disk_sort_base(_level, top_candidate_list, + volume_class) + top_candidate_list.sort(key=operator.attrgetter("sort_base"), + reverse=True) else: self._set_compute_sort_base(_level, top_candidate_list) top_candidate_list.sort(key=operator.attrgetter("sort_base")) @@ -1052,8 +1139,10 @@ class Search(object): self._set_compute_sort_base(_level, top_candidate_list) top_candidate_list.sort(key=operator.attrgetter("sort_base")) else: - self._set_disk_sort_base(_level, top_candidate_list, _n.node.volume_class) - top_candidate_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + self._set_disk_sort_base(_level, top_candidate_list, + _n.node.volume_class) + top_candidate_list.sort(key=operator.attrgetter("sort_base"), + reverse=True) return top_candidate_list @@ -1090,23 +1179,33 @@ class Search(object): mem_ratio = -1 local_disk_ratio = -1 if _level == "cluster": - CPU_ratio = float(c.cluster_avail_vCPUs) / float(self.resource.CPU_avail) - mem_ratio = float(c.cluster_avail_mem) / float(self.resource.mem_avail) - local_disk_ratio = float(c.cluster_avail_local_disk) / float(self.resource.local_disk_avail) + CPU_ratio = float(c.cluster_avail_vCPUs) / \ + float(self.resource.CPU_avail) + mem_ratio = float(c.cluster_avail_mem) / \ + float(self.resource.mem_avail) + local_disk_ratio = float(c.cluster_avail_local_disk) / \ + float(self.resource.local_disk_avail) elif _level == "rack": - CPU_ratio = float(c.rack_avail_vCPUs) / float(self.resource.CPU_avail) - mem_ratio = float(c.rack_avail_mem) / float(self.resource.mem_avail) - local_disk_ratio = float(c.rack_avail_local_disk) / float(self.resource.local_disk_avail) + CPU_ratio = float(c.rack_avail_vCPUs) / \ + float(self.resource.CPU_avail) + mem_ratio = float(c.rack_avail_mem) / \ + float(self.resource.mem_avail) + local_disk_ratio = float(c.rack_avail_local_disk) / \ + float(self.resource.local_disk_avail) elif _level == "host": - CPU_ratio = float(c.host_avail_vCPUs) / float(self.resource.CPU_avail) - mem_ratio = float(c.host_avail_mem) / float(self.resource.mem_avail) - local_disk_ratio = float(c.host_avail_local_disk) / float(self.resource.local_disk_avail) + CPU_ratio = float(c.host_avail_vCPUs) / \ + float(self.resource.CPU_avail) + mem_ratio = float(c.host_avail_mem) / \ + float(self.resource.mem_avail) + local_disk_ratio = float(c.host_avail_local_disk) / \ + float(self.resource.local_disk_avail) c.sort_base = (1.0 - self.CPU_weight) * CPU_ratio + \ (1.0 - self.mem_weight) * mem_ratio + \ (1.0 - self.local_disk_weight) * local_disk_ratio def _estimate_max_bandwidth(self, _level, _n, _candidate): - nw_bandwidth_penalty = self._estimate_nw_bandwidth_penalty(_level, _n, _candidate) + nw_bandwidth_penalty = self._estimate_nw_bandwidth_penalty(_level, _n, + _candidate) if nw_bandwidth_penalty >= 0: return nw_bandwidth_penalty @@ -1117,7 +1216,8 @@ class Search(object): sort_base = 0 # Set bandwidth usage penalty by placement # To check the bandwidth constraint at the last moment - # 3rd entry to be used for special node communicating beyond datacenter or zone + # 3rd entry to be used for special node communicating beyond datacenter + # or zone req_bandwidths = [0, 0, 0] link_list = _n.get_all_links() @@ -1129,17 +1229,21 @@ class Search(object): placed_link_list.append(vl) bandwidth = _n.get_bandwidth_of_link(vl) - placement_level = _candidate.get_common_placement(self.node_placements[v]) - if placement_level != "ANY" and LEVELS.index(placement_level) >= LEVELS.index(_level): - sort_base += compute_reservation(_level, placement_level, bandwidth) - self.constraint_solver.get_req_bandwidths(_level, - placement_level, - bandwidth, - req_bandwidths) + placement_level = _candidate.get_common_placement( + self.node_placements[v]) + if placement_level != "ANY" and \ + LEVELS.index(placement_level) >= \ + LEVELS.index(_level): + sort_base += compute_reservation(_level, + placement_level, + bandwidth) + self.constraint_solver.get_req_bandwidths( + _level, placement_level, bandwidth, req_bandwidths) candidate = copy.deepcopy(_candidate) - exclusivity_ids = self.constraint_solver.get_exclusivities(_n.node.exclusivity_groups, _level) + exclusivity_ids = self.constraint_solver.get_exclusivities( + _n.node.exclusivity_groups, _level) exclusivity_id = None if len(exclusivity_ids) > 0: exclusivity_id = exclusivity_ids[exclusivity_ids.keys()[0]] @@ -1153,7 +1257,8 @@ class Search(object): self.avail_logical_groups[exclusivity_id] = temp_lgr temp_exclusivity_insert = True - self._add_exclusivity_to_candidate(_level, candidate, exclusivity_id) + self._add_exclusivity_to_candidate(_level, candidate, + exclusivity_id) affinity_id = _n.get_affinity_id() temp_affinity_insert = False @@ -1168,7 +1273,8 @@ class Search(object): self._add_affinity_to_candidate(_level, candidate, affinity_id) - self._deduct_reservation_from_candidate(candidate, _n, req_bandwidths, _level) + self._deduct_reservation_from_candidate(candidate, _n, req_bandwidths, + _level) handled_vgroups = {} for vl in link_list: @@ -1179,13 +1285,16 @@ class Search(object): diversity_level = _n.get_common_diversity(vl.node.diversity_groups) if diversity_level == "ANY": - implicit_diversity = self.constraint_solver.get_implicit_diversity(_n.node, - link_list, - vl.node, - _level) + implicit_diversity = \ + self.constraint_solver.get_implicit_diversity(_n.node, + link_list, + vl.node, + _level) if implicit_diversity[0] is not None: diversity_level = implicit_diversity[1] - if diversity_level == "ANY" or LEVELS.index(diversity_level) < LEVELS.index(_level): + if diversity_level == "ANY" or \ + LEVELS.index(diversity_level) < \ + LEVELS.index(_level): vg = self._get_top_vgroup(vl.node, _level) if vg.uuid not in handled_vgroups.keys(): handled_vgroups[vg.uuid] = vg @@ -1193,20 +1302,29 @@ class Search(object): temp_n = Node() temp_n.node = vg temp_req_bandwidths = [0, 0, 0] - self.constraint_solver.get_req_bandwidths(_level, _level, bandwidth, temp_req_bandwidths) + self.constraint_solver.get_req_bandwidths( + _level, _level, bandwidth, temp_req_bandwidths) - if self._check_availability(_level, temp_n, candidate) is True: - self._deduct_reservation_from_candidate(candidate, temp_n, temp_req_bandwidths, _level) + if self._check_availability(_level, temp_n, candidate) \ + is True: + self._deduct_reservation_from_candidate( + candidate, temp_n, temp_req_bandwidths, _level) else: - sort_base += compute_reservation(_level, _level, bandwidth) + sort_base += compute_reservation(_level, _level, + bandwidth) req_bandwidths[0] += temp_req_bandwidths[0] req_bandwidths[1] += temp_req_bandwidths[1] req_bandwidths[2] += temp_req_bandwidths[2] else: - self.constraint_solver.get_req_bandwidths(_level, diversity_level, bandwidth, req_bandwidths) - sort_base += compute_reservation(_level, diversity_level, bandwidth) + self.constraint_solver.get_req_bandwidths(_level, + diversity_level, + bandwidth, + req_bandwidths) + sort_base += compute_reservation(_level, diversity_level, + bandwidth) - if self.constraint_solver._check_nw_bandwidth_availability(_level, req_bandwidths, _candidate) is False: + if self.constraint_solver._check_nw_bandwidth_availability( + _level, req_bandwidths, _candidate) is False: sort_base = -1 if temp_exclusivity_insert is True: @@ -1217,7 +1335,8 @@ class Search(object): return sort_base - def _add_exclusivity_to_candidate(self, _level, _candidate, _exclusivity_id): + def _add_exclusivity_to_candidate(self, _level, _candidate, + _exclusivity_id): lgr = self.avail_logical_groups[_exclusivity_id] if _level == "host": @@ -1268,44 +1387,56 @@ class Search(object): def _check_availability(self, _level, _n, _candidate): if isinstance(_n.node, VM): - if self.constraint_solver.check_cpu_capacity(_level, _n.node, _candidate) is False: + if self.constraint_solver.check_cpu_capacity(_level, _n.node, + _candidate) is False: return False - if self.constraint_solver.check_mem_capacity(_level, _n.node, _candidate) is False: + if self.constraint_solver.check_mem_capacity(_level, _n.node, + _candidate) is False: return False - if self.constraint_solver.check_local_disk_capacity(_level, _n.node, _candidate) is False: + if self.constraint_solver.check_local_disk_capacity( + _level, _n.node, _candidate) is False: return False elif isinstance(_n.node, Volume): - if self.constraint_solver.check_storage_availability(_level, _n.node, _candidate) is False: + if self.constraint_solver.check_storage_availability( + _level, _n.node, _candidate) is False: return False else: - if self.constraint_solver.check_cpu_capacity(_level, _n.node, _candidate) is False or \ - self.constraint_solver.check_mem_capacity(_level, _n.node, _candidate) is False or \ - self.constraint_solver.check_local_disk_capacity(_level, _n.node, _candidate) is False or \ - self.constraint_solver.check_storage_availability(_level, _n.node, _candidate) is False: + if self.constraint_solver.check_cpu_capacity( + _level, _n.node, _candidate) is False or \ + self.constraint_solver.check_mem_capacity( + _level, _n.node, _candidate) is False or \ + self.constraint_solver.check_local_disk_capacity( + _level, _n.node, _candidate) is False or \ + self.constraint_solver.check_storage_availability( + _level, _n.node, _candidate) is False: return False - if self.constraint_solver.check_nw_bandwidth_availability(_level, _n, - self.node_placements, - _candidate) is False: + if self.constraint_solver.check_nw_bandwidth_availability( + _level, _n, self.node_placements, _candidate) is False: return False if isinstance(_n.node, VM): if len(_n.node.extra_specs_list) > 0: - if self.constraint_solver.check_host_aggregates(_level, _candidate, _n.node) is False: + if self.constraint_solver.check_host_aggregates( + _level, _candidate, _n.node) is False: return False if isinstance(_n.node, VM): if _n.node.availability_zone is not None: - if self.constraint_solver.check_availability_zone(_level, _candidate, _n.node) is False: + if self.constraint_solver.check_availability_zone( + _level, _candidate, _n.node) is False: return False - if self.constraint_solver.conflict_diversity(_level, _n, self.node_placements, _candidate) is True: + if self.constraint_solver.conflict_diversity( + _level, _n, self.node_placements, _candidate) is True: return False - exclusivities = self.constraint_solver.get_exclusivities(_n.node.exclusivity_groups, _level) + exclusivities = self.constraint_solver.get_exclusivities( + _n.node.exclusivity_groups, _level) if len(exclusivities) == 1: exc_id = exclusivities[exclusivities.keys()[0]] - if self.constraint_solver.exist_group(_level, exc_id, "EX", _candidate) is False: + if self.constraint_solver.exist_group( + _level, exc_id, "EX", _candidate) is False: return False elif len(exclusivities) < 1: if self.constraint_solver.conflict_exclusivity(_level, _candidate): @@ -1314,7 +1445,8 @@ class Search(object): aff_id = _n.get_affinity_id() if aff_id is not None: if aff_id in self.avail_logical_groups.keys(): - if self.constraint_solver.exist_group(_level, aff_id, "AFF", _candidate) is False: + if self.constraint_solver.exist_group( + _level, aff_id, "AFF", _candidate) is False: return False return True @@ -1324,7 +1456,8 @@ class Search(object): self._deduct_candidate_vm_reservation(_level, _n.node, _candidate) if isinstance(_n.node, Volume) or isinstance(_n.node, VGroup): - self._deduct_candidate_volume_reservation(_level, _n.node, _candidate) + self._deduct_candidate_volume_reservation(_level, _n.node, + _candidate) self._deduct_candidate_nw_reservation(_candidate, _rsrv) @@ -1431,14 +1564,16 @@ class Search(object): for srk in _candidate.cluster_avail_switches.keys(): sr = _candidate.cluster_avail_switches[srk] if sr.switch_type == "spine": - sr.avail_bandwidths = [bw - _rsrv[2] for bw in sr.avail_bandwidths] + sr.avail_bandwidths = [bw - _rsrv[2] for bw + in sr.avail_bandwidths] - ''' - deduction modules - ''' + """ + Deduction modules. + """ def _deduct_reservation(self, _level, _best, _n): - exclusivities = self.constraint_solver.get_exclusivities(_n.node.exclusivity_groups, _level) + exclusivities = self.constraint_solver.get_exclusivities( + _n.node.exclusivity_groups, _level) exclusivity_id = None if len(exclusivities) == 1: exclusivity_id = exclusivities[exclusivities.keys()[0]] @@ -1467,7 +1602,8 @@ class Search(object): lgr.group_type = "EX" self.avail_logical_groups[lgr.name] = lgr - self.logger.debug("Search: add new exclusivity (" + _exclusivity_id + ")") + self.logger.debug("Search: add new exclusivity (" + + _exclusivity_id + ")") else: lgr = self.avail_logical_groups[_exclusivity_id] @@ -1484,23 +1620,28 @@ class Search(object): if _exclusivity_id not in chosen_host.host_memberships.keys(): chosen_host.host_memberships[_exclusivity_id] = lgr for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: if _exclusivity_id not in np.rack_memberships.keys(): np.rack_memberships[_exclusivity_id] = lgr - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _exclusivity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_exclusivity_id] = lgr elif _level == "rack": for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: if _exclusivity_id not in np.rack_memberships.keys(): np.rack_memberships[_exclusivity_id] = lgr - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _exclusivity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_exclusivity_id] = lgr elif _level == "cluster": for _, np in self.avail_hosts.iteritems(): - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _exclusivity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_exclusivity_id] = lgr @@ -1529,23 +1670,28 @@ class Search(object): if _affinity_id not in chosen_host.host_memberships.keys(): chosen_host.host_memberships[_affinity_id] = lgr for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: if _affinity_id not in np.rack_memberships.keys(): np.rack_memberships[_affinity_id] = lgr - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _affinity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_affinity_id] = lgr elif _level == "rack": for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: if _affinity_id not in np.rack_memberships.keys(): np.rack_memberships[_affinity_id] = lgr - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _affinity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_affinity_id] = lgr elif _level == "cluster": for _, np in self.avail_hosts.iteritems(): - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _affinity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_affinity_id] = lgr @@ -1557,7 +1703,8 @@ class Search(object): lgr.group_type = "DIV" self.avail_logical_groups[lgr.name] = lgr - self.logger.debug("Search: add new diversity (" + _diversity_id + ")") + self.logger.debug("Search: add new diversity (" + + _diversity_id + ")") else: lgr = self.avail_logical_groups[_diversity_id] @@ -1574,23 +1721,28 @@ class Search(object): if _diversity_id not in chosen_host.host_memberships.keys(): chosen_host.host_memberships[_diversity_id] = lgr for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: if _diversity_id not in np.rack_memberships.keys(): np.rack_memberships[_diversity_id] = lgr - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _diversity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_diversity_id] = lgr elif _level == "rack": for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: if _diversity_id not in np.rack_memberships.keys(): np.rack_memberships[_diversity_id] = lgr - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _diversity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_diversity_id] = lgr elif _level == "cluster": for _, np in self.avail_hosts.iteritems(): - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: if _diversity_id not in np.cluster_memberships.keys(): np.cluster_memberships[_diversity_id] = lgr @@ -1605,12 +1757,14 @@ class Search(object): chosen_host.host_num_of_placed_vms += 1 for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: np.rack_avail_vCPUs -= _n.node.vCPUs np.rack_avail_mem -= _n.node.mem np.rack_avail_local_disk -= _n.node.local_volume_size np.rack_num_of_placed_vms += 1 - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: np.cluster_avail_vCPUs -= _n.node.vCPUs np.cluster_avail_mem -= _n.node.mem np.cluster_avail_local_disk -= _n.node.local_volume_size @@ -1621,14 +1775,16 @@ class Search(object): cn = self.avail_hosts[self.node_placements[vml.node].host_name] placement_level = cn.get_common_placement(chosen_host) bandwidth = vml.nw_bandwidth - self.bandwidth_usage += self._deduct_nw_reservation(placement_level, chosen_host, cn, bandwidth) + self.bandwidth_usage += self._deduct_nw_reservation( + placement_level, chosen_host, cn, bandwidth) for voll in _n.node.volume_list: if voll.node in self.node_placements.keys(): cn = self.avail_hosts[self.node_placements[voll.node].host_name] placement_level = cn.get_common_placement(chosen_host) bandwidth = voll.io_bandwidth - self.bandwidth_usage += self._deduct_nw_reservation(placement_level, chosen_host, cn, bandwidth) + self.bandwidth_usage += self._deduct_nw_reservation( + placement_level, chosen_host, cn, bandwidth) def _deduct_volume_resources(self, _best, _n): storage_host = self.avail_storage_hosts[_best.storage.storage_name] @@ -1641,7 +1797,8 @@ class Search(object): cn = self.avail_hosts[self.node_placements[vml.node].host_name] placement_level = cn.get_common_placement(chosen_host) bandwidth = vml.io_bandwidth - self.bandwidth_usage += self._deduct_nw_reservation(placement_level, chosen_host, cn, bandwidth) + self.bandwidth_usage += self._deduct_nw_reservation( + placement_level, chosen_host, cn, bandwidth) def _deduct_nw_reservation(self, _placement_level, _host1, _host2, _rsrv): nw_reservation = compute_reservation("host", _placement_level, _rsrv) @@ -1677,7 +1834,8 @@ class Search(object): sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] for _, sr in _host2.cluster_avail_switches.iteritems(): if sr.switch_type == "spine": - sr.avail_bandwidths = [bw - _rsrv for bw in sr.avail_bandwidths] + sr.avail_bandwidths = [bw - _rsrv for bw + in sr.avail_bandwidths] return nw_reservation @@ -1686,9 +1844,9 @@ class Search(object): if _level == "host" or isinstance(_v, VGroup): self.node_placements[_v] = _best - ''' - rollback modules - ''' + """ + Rollback modules. + """ def _rollback_reservation(self, _v): if isinstance(_v, VM): @@ -1712,7 +1870,8 @@ class Search(object): if _v.name != "any": self._remove_affinity(chosen_host, affinity_id, level) - exclusivities = self.constraint_solver.get_exclusivities(_v.exclusivity_groups, level) + exclusivities = self.constraint_solver.get_exclusivities( + _v.exclusivity_groups, level) if len(exclusivities) == 1: exclusivity_id = exclusivities[exclusivities.keys()[0]] self._remove_exclusivity(chosen_host, exclusivity_id, level) @@ -1742,27 +1901,35 @@ class Search(object): del _chosen_host.host_memberships[_exclusivity_id] for _, np in self.avail_hosts.iteritems(): - if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _chosen_host.rack_name != "any" and \ + np.rack_name == _chosen_host.rack_name: if _exclusivity_id in np.rack_memberships.keys(): del np.rack_memberships[_exclusivity_id] - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _exclusivity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_exclusivity_id] elif _level == "rack": if _chosen_host.rack_num_of_placed_vms == 0: for _, np in self.avail_hosts.iteritems(): - if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _chosen_host.rack_name != "any" and \ + np.rack_name == _chosen_host.rack_name: if _exclusivity_id in np.rack_memberships.keys(): del np.rack_memberships[_exclusivity_id] - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _exclusivity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_exclusivity_id] elif _level == "cluster": if _chosen_host.cluster_num_of_placed_vms == 0: for _, np in self.avail_hosts.iteritems(): - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _exclusivity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_exclusivity_id] @@ -1790,31 +1957,40 @@ class Search(object): exist_affinity = False if _level == "host": - if exist_affinity is False and _affinity_id in _chosen_host.host_memberships.keys(): + if exist_affinity is False and _affinity_id \ + in _chosen_host.host_memberships.keys(): del _chosen_host.host_memberships[_affinity_id] for _, np in self.avail_hosts.iteritems(): - if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _chosen_host.rack_name != "any" and \ + np.rack_name == _chosen_host.rack_name: if _affinity_id in np.rack_memberships.keys(): del np.rack_memberships[_affinity_id] - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _affinity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_affinity_id] elif _level == "rack": if exist_affinity is False: for _, np in self.avail_hosts.iteritems(): - if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _chosen_host.rack_name != "any" and \ + np.rack_name == _chosen_host.rack_name: if _affinity_id in np.rack_memberships.keys(): del np.rack_memberships[_affinity_id] - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _affinity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_affinity_id] elif _level == "cluster": if exist_affinity is False: for _, np in self.avail_hosts.iteritems(): - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _affinity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_affinity_id] @@ -1842,31 +2018,40 @@ class Search(object): exist_diversity = False if _level == "host": - if exist_diversity is False and _diversity_id in _chosen_host.host_memberships.keys(): + if exist_diversity is False and _diversity_id \ + in _chosen_host.host_memberships.keys(): del _chosen_host.host_memberships[_diversity_id] for _, np in self.avail_hosts.iteritems(): - if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _chosen_host.rack_name != "any" and \ + np.rack_name == _chosen_host.rack_name: if _diversity_id in np.rack_memberships.keys(): del np.rack_memberships[_diversity_id] - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _diversity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_diversity_id] elif _level == "rack": if exist_diversity is False: for _, np in self.avail_hosts.iteritems(): - if _chosen_host.rack_name != "any" and np.rack_name == _chosen_host.rack_name: + if _chosen_host.rack_name != "any" and \ + np.rack_name == _chosen_host.rack_name: if _diversity_id in np.rack_memberships.keys(): del np.rack_memberships[_diversity_id] - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _diversity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_diversity_id] elif _level == "cluster": if exist_diversity is False: for _, np in self.avail_hosts.iteritems(): - if _chosen_host.cluster_name != "any" and np.cluster_name == _chosen_host.cluster_name: + if _chosen_host.cluster_name != "any" and \ + np.cluster_name == \ + _chosen_host.cluster_name: if _diversity_id in np.cluster_memberships.keys(): del np.cluster_memberships[_diversity_id] @@ -1882,12 +2067,14 @@ class Search(object): self.num_of_hosts -= 1 for _, np in self.avail_hosts.iteritems(): - if chosen_host.rack_name != "any" and np.rack_name == chosen_host.rack_name: + if chosen_host.rack_name != "any" and \ + np.rack_name == chosen_host.rack_name: np.rack_avail_vCPUs += _v.vCPUs np.rack_avail_mem += _v.mem np.rack_avail_local_disk += _v.local_volume_size np.rack_num_of_placed_vms -= 1 - if chosen_host.cluster_name != "any" and np.cluster_name == chosen_host.cluster_name: + if chosen_host.cluster_name != "any" and \ + np.cluster_name == chosen_host.cluster_name: np.cluster_avail_vCPUs += _v.vCPUs np.cluster_avail_mem += _v.mem np.cluster_avail_local_disk += _v.local_volume_size @@ -1895,17 +2082,23 @@ class Search(object): for vml in _v.vm_list: if vml.node in self.node_placements.keys(): - cn = self.avail_hosts[self.node_placements[vml.node].host_name] + cn = self.avail_hosts[ + self.node_placements[vml.node].host_name + ] level = cn.get_common_placement(chosen_host) bandwidth = vml.nw_bandwidth - self.bandwidth_usage -= self._rollback_nw_reservation(level, chosen_host, cn, bandwidth) + self.bandwidth_usage -= self._rollback_nw_reservation( + level, chosen_host, cn, bandwidth) for voll in _v.volume_list: if voll.node in self.node_placements.keys(): - cn = self.avail_hosts[self.node_placements[voll.node].host_name] + cn = self.avail_hosts[ + self.node_placements[voll.node].host_name + ] level = cn.get_common_placement(chosen_host) bandwidth = voll.io_bandwidth - self.bandwidth_usage -= self._rollback_nw_reservation(level, chosen_host, cn, bandwidth) + self.bandwidth_usage -= self._rollback_nw_reservation( + level, chosen_host, cn, bandwidth) def _rollback_volume_reservation(self, _v): if _v in self.node_placements.keys(): @@ -1917,10 +2110,13 @@ class Search(object): for vml in _v.vm_list: if vml.node in self.node_placements.keys(): - cn = self.avail_hosts[self.node_placements[vml.node].host_name] + cn = self.avail_hosts[ + self.node_placements[vml.node].host_name + ] level = cn.get_common_placement(chosen_host) bandwidth = vml.io_bandwidth - self.bandwidth_usage -= self._rollback_nw_reservation(level, chosen_host, cn, bandwidth) + self.bandwidth_usage -= self._rollback_nw_reservation( + level, chosen_host, cn, bandwidth) def _rollback_nw_reservation(self, _level, _host1, _host2, _rsrv): nw_reservation = compute_reservation("host", _level, _rsrv) @@ -1953,17 +2149,20 @@ class Search(object): for _, sr in _host1.cluster_avail_switches.iteritems(): if sr.switch_type == "spine": - sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + sr.avail_bandwidths = [bw + _rsrv for bw + in sr.avail_bandwidths] for _, sr in _host2.cluster_avail_switches.iteritems(): if sr.switch_type == "spine": - sr.avail_bandwidths = [bw + _rsrv for bw in sr.avail_bandwidths] + sr.avail_bandwidths = [bw + _rsrv for bw + in sr.avail_bandwidths] return nw_reservation def _rollback_node_placement(self, _v): if _v in self.node_placements.keys(): del self.node_placements[_v] - self.logger.debug("Search: node (" + _v.name + ") removed from placement") + self.logger.debug("Search: node (" + _v.name + + ") removed from placement") if isinstance(_v, VGroup): for _, sg in _v.subvgroups.iteritems(): diff --git a/valet/engine/optimizer/ostro/search_base.py b/valet/engine/optimizer/ostro/search_base.py index 099d500..15c619a 100755 --- a/valet/engine/optimizer/ostro/search_base.py +++ b/valet/engine/optimizer/ostro/search_base.py @@ -1,39 +1,67 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume, LEVELS +"""Resources utlized by search engine.""" + +from valet.engine.optimizer.app_manager.app_topology_base \ + import VGroup, VM, Volume, LEVELS class Resource(object): + """Resource.""" def __init__(self): - self.level = None # level of placement + """Initialization.""" + # level of placement + self.level = None self.host_name = None - self.host_memberships = {} # all mapped logical groups to host - self.host_vCPUs = 0 # original total vCPUs before overcommit - self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit - self.host_mem = 0 # original total mem cap before overcommit - self.host_avail_mem = 0 # remaining mem cap after - self.host_local_disk = 0 # original total local disk cap before overcommit - self.host_avail_local_disk = 0 # remaining local disk cap after overcommit - self.host_avail_switches = {} # all mapped switches to host - self.host_avail_storages = {} # all mapped storage_resources to host - self.host_num_of_placed_vms = 0 # the number of vms currently placed in this host - self.rack_name = None # where this host is located + # all mapped logical groups to host + self.host_memberships = {} + + # original total vCPUs before overcommit + self.host_vCPUs = 0 + + # remaining vCPUs after overcommit + self.host_avail_vCPUs = 0 + + # original total mem cap before overcommit + self.host_mem = 0 + + # remaining mem cap after + self.host_avail_mem = 0 + + # original total local disk cap before overcommit + self.host_local_disk = 0 + + # remaining local disk cap after overcommit + self.host_avail_local_disk = 0 + + # all mapped switches to host + self.host_avail_switches = {} + + # all mapped storage_resources to host + self.host_avail_storages = {} + + # the number of vms currently placed in this host + self.host_num_of_placed_vms = 0 + + # where this host is located + self.rack_name = None + self.rack_memberships = {} self.rack_vCPUs = 0 self.rack_avail_vCPUs = 0 @@ -41,11 +69,18 @@ class Resource(object): self.rack_avail_mem = 0 self.rack_local_disk = 0 self.rack_avail_local_disk = 0 - self.rack_avail_switches = {} # all mapped switches to rack - self.rack_avail_storages = {} # all mapped storage_resources to rack + + # all mapped switches to rack + self.rack_avail_switches = {} + + # all mapped storage_resources to rack + self.rack_avail_storages = {} + self.rack_num_of_placed_vms = 0 - self.cluster_name = None # where this host and rack are located + # where this host and rack are located + self.cluster_name = None + self.cluster_memberships = {} self.cluster_vCPUs = 0 self.cluster_avail_vCPUs = 0 @@ -53,15 +88,24 @@ class Resource(object): self.cluster_avail_mem = 0 self.cluster_local_disk = 0 self.cluster_avail_local_disk = 0 - self.cluster_avail_switches = {} # all mapped switches to cluster - self.cluster_avail_storages = {} # all mapped storage_resources to cluster + + # all mapped switches to cluster + self.cluster_avail_switches = {} + + # all mapped storage_resources to cluster + self.cluster_avail_storages = {} self.cluster_num_of_placed_vms = 0 - self.storage = None # selected best storage for volume among host_avail_storages + # selected best storage for volume among host_avail_storages + self.storage = None - self.sort_base = 0 # order to place + # order to place + self.sort_base = 0 def get_common_placement(self, _resource): + """Get common placement level.""" + """Get the common level between this resource and the one + provided.""" level = None if self.cluster_name != _resource.cluster_name: @@ -78,6 +122,7 @@ class Resource(object): return level def get_resource_name(self, _level): + """Get the name of this resource at the specified level.""" name = "unknown" if _level == "cluster": @@ -90,6 +135,7 @@ class Resource(object): return name def get_memberships(self, _level): + """Get the memberships of this resource at the specified level.""" memberships = None if _level == "cluster": @@ -102,6 +148,7 @@ class Resource(object): return memberships def get_num_of_placed_vms(self, _level): + """Get the number of placed vms of this resource at the specified level.""" num_of_vms = 0 if _level == "cluster": @@ -114,6 +161,7 @@ class Resource(object): return num_of_vms def get_avail_resources(self, _level): + """Get the available vCPUs, memory, local disk of this resource at the specified level.""" avail_vCPUs = 0 avail_mem = 0 avail_local_disk = 0 @@ -134,6 +182,7 @@ class Resource(object): return (avail_vCPUs, avail_mem, avail_local_disk) def get_local_disk(self, _level): + """Get the local disk and available local disk of this resource at the specified level.""" local_disk = 0 avail_local_disk = 0 @@ -150,6 +199,7 @@ class Resource(object): return (local_disk, avail_local_disk) def get_vCPUs(self, _level): + """Get the vCPUs and available vCPUs of this resource at the specified level.""" vCPUs = 0 avail_vCPUs = 0 @@ -166,6 +216,7 @@ class Resource(object): return (vCPUs, avail_vCPUs) def get_mem(self, _level): + """Get the memory and available memory of this resource at the specified level.""" mem = 0 avail_mem = 0 @@ -182,6 +233,7 @@ class Resource(object): return (mem, avail_mem) def get_avail_storages(self, _level): + """Get the available storages of this resource at the specified level.""" avail_storages = None if _level == "cluster": @@ -194,6 +246,7 @@ class Resource(object): return avail_storages def get_avail_switches(self, _level): + """Get the available switches of this resource at the specified level.""" avail_switches = None if _level == "cluster": @@ -207,20 +260,26 @@ class Resource(object): class LogicalGroupResource(object): + """LogicalGroupResource.""" def __init__(self): + """Initialization.""" self.name = None self.group_type = "AGGR" self.metadata = {} self.num_of_placed_vms = 0 - self.num_of_placed_vms_per_host = {} # key = host (i.e., id of host or rack), value = num_of_placed_vms + + # key = host (i.e., id of host or rack), value = num_of_placed_vms + self.num_of_placed_vms_per_host = {} class StorageResource(object): + """StorageResource.""" def __init__(self): + """Initialization.""" self.storage_name = None self.storage_class = None self.storage_avail_disk = 0 @@ -229,8 +288,10 @@ class StorageResource(object): class SwitchResource(object): + """SwitchResource.""" def __init__(self): + """Initialization.""" self.switch_name = None self.switch_type = None self.avail_bandwidths = [] # out-bound bandwidths @@ -239,13 +300,16 @@ class SwitchResource(object): class Node(object): + """Node.""" def __init__(self): + """Initialization.""" self.node = None # VM, Volume, or VGroup self.sort_base = -1 def get_all_links(self): + """Return a list of links for vms, volumes, and/or vgroups.""" link_list = [] if isinstance(self.node, VM): @@ -263,6 +327,7 @@ class Node(object): return link_list def get_bandwidth_of_link(self, _link): + """Return bandwidth of link.""" bandwidth = 0 if isinstance(self.node, VGroup) or isinstance(self.node, VM): @@ -276,6 +341,7 @@ class Node(object): return bandwidth def get_common_diversity(self, _diversity_groups): + """Return the common level of the given diversity groups.""" common_level = "ANY" for dk in self.node.diversity_groups.keys(): @@ -290,16 +356,19 @@ class Node(object): return common_level def get_affinity_id(self): + """Return the affinity id.""" aff_id = None - if isinstance(self.node, VGroup) and self.node.vgroup_type == "AFF" and \ - self.node.name != "any": + if isinstance(self.node, VGroup) and \ + self.node.vgroup_type == "AFF" and \ + self.node.name != "any": aff_id = self.node.level + ":" + self.node.name return aff_id def compute_reservation(_level, _placement_level, _bandwidth): + """Compute and return the reservation.""" reservation = 0 if _placement_level != "ANY": diff --git a/valet/engine/optimizer/ostro_server/configuration.py b/valet/engine/optimizer/ostro_server/configuration.py index c174a1b..3be8f1c 100755 --- a/valet/engine/optimizer/ostro_server/configuration.py +++ b/valet/engine/optimizer/ostro_server/configuration.py @@ -1,12 +1,12 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,6 +15,8 @@ # - Set all configurations to run Ostro +"""Valet Engine Server Configuration.""" + import os from oslo_config import cfg from valet.engine.conf import register_conf @@ -24,9 +26,10 @@ CONF = cfg.CONF class Config(object): + """Valet Engine Server Configuration.""" def __init__(self, *default_config_files): - + """Initialization.""" register_conf() if default_config_files: CONF(default_config_files=default_config_files) @@ -126,7 +129,7 @@ class Config(object): self.base_flavor_disk = 0 def configure(self): - + """Store config info extracted from oslo.""" status = self._init_system() if status != "success": return status @@ -181,17 +184,21 @@ class Config(object): self.network_control_url = CONF.engine.network_control_url - self.default_cpu_allocation_ratio = CONF.engine.default_cpu_allocation_ratio + self.default_cpu_allocation_ratio = \ + CONF.engine.default_cpu_allocation_ratio - self.default_ram_allocation_ratio = CONF.engine.default_ram_allocation_ratio + self.default_ram_allocation_ratio = \ + CONF.engine.default_ram_allocation_ratio - self.default_disk_allocation_ratio = CONF.engine.default_disk_allocation_ratio + self.default_disk_allocation_ratio = \ + CONF.engine.default_disk_allocation_ratio self.static_cpu_standby_ratio = CONF.engine.static_cpu_standby_ratio self.static_mem_standby_ratio = CONF.engine.static_mem_standby_ratio - self.static_local_disk_standby_ratio = CONF.engine.static_local_disk_standby_ratio + self.static_local_disk_standby_ratio = \ + CONF.engine.static_local_disk_standby_ratio self.topology_trigger_time = CONF.engine.topology_trigger_time diff --git a/valet/engine/optimizer/ostro_server/daemon.py b/valet/engine/optimizer/ostro_server/daemon.py index e4f14ac..dad15d1 100644 --- a/valet/engine/optimizer/ostro_server/daemon.py +++ b/valet/engine/optimizer/ostro_server/daemon.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Generic Daemon.""" + import atexit import os from signal import SIGTERM @@ -21,12 +23,14 @@ import time class Daemon(object): - """ A generic daemon class. + """A generic daemon class.""" - Usage: subclass the Daemon class and override the run() method + """Usage: subclass the Daemon class and override the run() method """ - def __init__(self, priority, pidfile, logger, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): + def __init__(self, priority, pidfile, logger, stdin='/dev/null', + stdout='/dev/null', stderr='/dev/null'): + """Initialization.""" self.stdin = stdin self.stdout = stdout self.stderr = stderr @@ -35,9 +39,9 @@ class Daemon(object): self.logger = logger def daemonize(self): - """ Do the UNIX double-fork magic, see Stevens' "Advanced - - Programming in the UNIX Environment" for details (ISBN 0201563177) + """Do the UNIX double-fork magic.""" + """See Stevens' "Advanced Programming in the UNIX Environment" + for details. (ISBN 0201563177). http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: @@ -47,7 +51,8 @@ class Daemon(object): sys.exit(0) except OSError as e: self.logger.error("Daemon error at step1: " + e.strerror) - sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) + sys.stderr.write("fork #1 failed: %d (%s)\n" % + (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment @@ -63,7 +68,8 @@ class Daemon(object): sys.exit(0) except OSError as e: self.logger.error("Daemon error at step2: " + e.strerror) - sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) + sys.stderr.write("fork #2 failed: %d (%s)\n" % + (e.errno, e.strerror)) sys.exit(1) # redirect standard file descriptors @@ -82,10 +88,11 @@ class Daemon(object): file(self.pidfile, 'w+').write("%s\n" % pid) def delpid(self): + """Remove pidfile.""" os.remove(self.pidfile) def getpid(self): - """returns the content of pidfile or None.""" + """Return the content of pidfile or None.""" try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) @@ -95,7 +102,7 @@ class Daemon(object): return pid def checkpid(self, pid): - """ Check For the existence of a unix pid. """ + """Check for the existence of a UNIX pid.""" if pid is None: return False @@ -108,7 +115,7 @@ class Daemon(object): return True def start(self): - """Start the daemon""" + """Start thedaemon.""" # Check for a pidfile to see if the daemon already runs pid = self.getpid() @@ -122,7 +129,7 @@ class Daemon(object): self.run() def stop(self): - """Stop the daemon""" + """Stop the daemon.""" # Get the pid from the pidfile pid = self.getpid() @@ -146,12 +153,12 @@ class Daemon(object): sys.exit(1) def restart(self): - """Restart the daemon""" + """Restart the daemon.""" self.stop() self.start() def status(self): - """ returns instance's priority """ + """Return instance's priority.""" # Check for a pidfile to see if the daemon already runs pid = self.getpid() @@ -161,13 +168,14 @@ class Daemon(object): message = "status: pidfile %s exist. Daemon is running\n" status = self.priority else: - message = "status: pidfile %s does not exist. Daemon is not running\n" + message = "status: pidfile %s does not exist. Daemon is not " \ + "running\n" sys.stderr.write(message % self.pidfile) return status def run(self): - """ You should override this method when you subclass Daemon. - - It will be called after the process has been daemonized by start() or restart(). + """You should override this method when you subclass Daemon.""" + """It will be called after the process has been daemonized by + start() or restart(). """ diff --git a/valet/engine/optimizer/ostro_server/db_cleaner.py b/valet/engine/optimizer/ostro_server/db_cleaner.py index 3c21680..1bfe300 100644 --- a/valet/engine/optimizer/ostro_server/db_cleaner.py +++ b/valet/engine/optimizer/ostro_server/db_cleaner.py @@ -1,12 +1,12 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,28 +15,38 @@ # - Handle user requests -import sys +"""Database Cleaner.""" from configuration import Config - +import sys from valet.api.db.models.music import Music class DBCleaner(object): + """Database Cleaner.""" def __init__(self, _config): + """Initialization.""" self.config = _config self.music = Music() def clean_db_tables(self): - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table) + """Clean tables in Music.""" + """Clean resource, resource_index, request, response, event, + app, app_index, and uuid tables. + """ + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_resource_table) if len(results) > 0: print("resource table result = ", len(results)) for _, row in results.iteritems(): - self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_resource_table, 'site_name', row['site_name']) + self.music.delete_row_eventually(self.config.db_keyspace, + self.config.db_resource_table, + 'site_name', row['site_name']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_request_table) if len(results) > 0: print("request table result = ", len(results)) for _, row in results.iteritems(): @@ -44,7 +54,8 @@ class DBCleaner(object): self.config.db_request_table, 'stack_id', row['stack_id']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_response_table) if len(results) > 0: print("response table result = ", len(results)) for _, row in results.iteritems(): @@ -52,7 +63,8 @@ class DBCleaner(object): self.config.db_response_table, 'stack_id', row['stack_id']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_event_table) if len(results) > 0: print("event table result = ", len(results)) for _, row in results.iteritems(): @@ -60,15 +72,18 @@ class DBCleaner(object): self.config.db_event_table, 'timestamp', row['timestamp']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_resource_index_table) if len(results) > 0: print("resource_index table result = ", len(results)) for _, row in results.iteritems(): - self.music.delete_row_eventually(self.config.db_keyspace, - self.config.db_resource_index_table, - 'site_name', row['site_name']) + self.music.delete_row_eventually( + self.config.db_keyspace, + self.config.db_resource_index_table, + 'site_name', row['site_name']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_app_index_table) if len(results) > 0: print("app_index table result = ", len(results)) for _, row in results.iteritems(): @@ -76,7 +91,8 @@ class DBCleaner(object): self.config.db_app_index_table, 'site_name', row['site_name']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_app_table) if len(results) > 0: print("app table result = ", len(results)) for _, row in results.iteritems(): @@ -84,7 +100,8 @@ class DBCleaner(object): self.config.db_app_table, 'stack_id', row['stack_id']) - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_uuid_table) if len(results) > 0: print("uuid table result = ", len(results)) for _, row in results.iteritems(): @@ -93,49 +110,61 @@ class DBCleaner(object): 'uuid', row['uuid']) def check_db_tables(self): - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table) + """Log whether tables in Music have been cleaned.""" + """Check resource, resource_index, request, response, event, + app, app_index, and uuid tables. + """ + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_resource_table) if len(results) > 0: print("resource table not cleaned ") else: print("resource table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_request_table) if len(results) > 0: print("request table not cleaned ") else: print("request table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_response_table) if len(results) > 0: print("response table not cleaned ") else: print("response table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_event_table) if len(results) > 0: print("event table not cleaned ") else: print("event table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_resource_index_table) if len(results) > 0: print("resource log index table not cleaned ") else: print("resource log index table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_app_index_table) if len(results) > 0: print("app log index table not cleaned ") else: print("app log index table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_app_table) if len(results) > 0: print("app log table not cleaned ") else: print("app log table cleaned") - results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table) + results = self.music.read_all_rows(self.config.db_keyspace, + self.config.db_uuid_table) if len(results) > 0: print("uuid table not cleaned ") else: diff --git a/valet/engine/optimizer/ostro_server/ostro_daemon.py b/valet/engine/optimizer/ostro_server/ostro_daemon.py index 8591ad8..f637bfb 100755 --- a/valet/engine/optimizer/ostro_server/ostro_daemon.py +++ b/valet/engine/optimizer/ostro_server/ostro_daemon.py @@ -1,31 +1,34 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Daemon foe Valet Engine.""" + import os import sys import traceback from valet.engine.optimizer.ostro.ostro import Ostro from valet.engine.optimizer.ostro_server.configuration import Config -from valet.engine.optimizer.ostro_server.daemon import Daemon # implemented for Python v2.7 +from valet.engine.optimizer.ostro_server.daemon import Daemon from valet.engine.optimizer.util.util import init_logger class OstroDaemon(Daemon): + """Daemon foe Valet Engine.""" def run(self): - + """Run the daemon.""" self.logger.info("##### Valet Engine is launched #####") try: ostro = Ostro(config, self.logger) @@ -40,6 +43,7 @@ class OstroDaemon(Daemon): def verify_dirs(list_of_dirs): + """If a directory in the list does not exist, create it.""" for d in list_of_dirs: try: if not os.path.exists(d): @@ -50,7 +54,7 @@ def verify_dirs(list_of_dirs): if __name__ == "__main__": - ''' configuration ''' + """ configuration """ # Configuration try: config = Config() @@ -59,11 +63,12 @@ if __name__ == "__main__": print(config_status) sys.exit(2) - ''' verify directories ''' - dirs_list = [config.logging_loc, config.resource_log_loc, config.app_log_loc, os.path.dirname(config.process)] + """ verify directories """ + dirs_list = [config.logging_loc, config.resource_log_loc, + config.app_log_loc, os.path.dirname(config.process)] verify_dirs(dirs_list) - ''' logger ''' + """ logger """ logger = init_logger(config) # Start daemon process diff --git a/valet/engine/optimizer/ostro_server/ostro_sim.cfg b/valet/engine/optimizer/ostro_server/ostro_sim.cfg index ef3d380..d250498 100644 --- a/valet/engine/optimizer/ostro_server/ostro_sim.cfg +++ b/valet/engine/optimizer/ostro_server/ostro_sim.cfg @@ -1,4 +1,4 @@ -# Version 2.0.2: Feb. 9, 2016 +# Version 2.0.2 # Set simulation parameters num_of_spine_switches=0 diff --git a/valet/engine/optimizer/util/util.py b/valet/engine/optimizer/util/util.py index f3d5637..e4abfc4 100755 --- a/valet/engine/optimizer/util/util.py +++ b/valet/engine/optimizer/util/util.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Util.""" + from os import listdir, stat from os.path import isfile, join import logging @@ -20,6 +22,7 @@ from logging.handlers import RotatingFileHandler def get_logfile(_loc, _max_log_size, _name): + """Get logfile from location and return with file mode.""" files = [f for f in listdir(_loc) if isfile(join(_loc, f))] logfile_index = 0 @@ -50,7 +53,9 @@ def get_logfile(_loc, _max_log_size, _name): return (last_logfile, mode) -def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index): +def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, + _name, _last_index): + """Return last logfile from location with index and mode.""" last_logfile = _name + "_" + str(_last_index) + ".log" mode = None @@ -74,6 +79,7 @@ def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index): def adjust_json_string(_data): + """Adjust data value formatting to be consistent and return.""" _data = _data.replace("None", '"none"') _data = _data.replace("False", '"false"') _data = _data.replace("True", '"true"') @@ -85,7 +91,9 @@ def adjust_json_string(_data): def init_logger(config): - log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") + """Return an initialized logger.""" + log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - " + "%(message)s") log_handler = RotatingFileHandler(config.logging_loc + config.logger_name, mode='a', maxBytes=config.max_main_log_size, @@ -94,7 +102,8 @@ def init_logger(config): delay=0) log_handler.setFormatter(log_formatter) logger = logging.getLogger(config.logger_name) - logger.setLevel(logging.DEBUG if config.logging_level == "debug" else logging.INFO) + logger.setLevel(logging.DEBUG if config.logging_level == "debug" + else logging.INFO) logger.addHandler(log_handler) return logger diff --git a/valet/engine/resource_manager/compute.py b/valet/engine/resource_manager/compute.py index 91e35b8..5585bcc 100755 --- a/valet/engine/resource_manager/compute.py +++ b/valet/engine/resource_manager/compute.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Compute.""" + from novaclient import client as nova_client from oslo_config import cfg from resource_base import Host, LogicalGroup, Flavor @@ -25,12 +27,21 @@ CONF = cfg.CONF class Compute(object): + """Compute Class. + + This class performs functions of setting hosts, availability zones, + aggregates, placed vms, resources, flavors, etc. + + Interacts with nova client to perform these actions. + """ + def __init__(self, _logger): + """Compute init.""" self.logger = _logger self.nova = None def set_hosts(self, _hosts, _logical_groups): - + """Return success if az's, aggregates, vms, resources, all set.""" self._get_nova_client() status = self._set_availability_zones(_hosts, _logical_groups) @@ -56,7 +67,7 @@ class Compute(object): return "success" def _get_nova_client(self): - '''Returns a nova client''' + """Return a nova client.""" self.nova = nova_client.Client(VERSION, CONF.identity.username, CONF.identity.password, @@ -86,7 +97,8 @@ class Compute(object): if host.name not in logical_group.vms_per_host.keys(): logical_group.vms_per_host[host.name] = [] - self.logger.info("adding Host LogicalGroup: " + str(host.__dict__)) + self.logger.info("adding Host LogicalGroup: " + + str(host.__dict__)) _hosts[host.name] = host @@ -114,7 +126,8 @@ class Compute(object): metadata[mk] = a.metadata.get(mk) aggregate.metadata = metadata - self.logger.info("adding aggregate LogicalGroup: " + str(aggregate.__dict__)) + self.logger.info("adding aggregate LogicalGroup: " + + str(aggregate.__dict__)) _logical_groups[aggregate.name] = aggregate @@ -141,7 +154,8 @@ class Compute(object): if result_status == "success": for vm_uuid in vm_uuid_list: vm_detail = [] # (vm_name, az, metadata, status) - result_status_detail = self._get_vm_detail(vm_uuid, vm_detail) + result_status_detail = self._get_vm_detail(vm_uuid, + vm_detail) if result_status_detail == "success": vm_id = ("none", vm_detail[0], vm_uuid) @@ -162,7 +176,8 @@ class Compute(object): return error_status def _get_vms_of_host(self, _hk, _vm_list): - hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk, servers=True) + hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk, + servers=True) try: for hv in hypervisor_list: @@ -221,6 +236,7 @@ class Compute(object): return "success" def set_flavors(self, _flavors): + """Set flavors.""" error_status = None self._get_nova_client() @@ -260,7 +276,8 @@ class Compute(object): ephemeral_gb = 0.0 if hasattr(f, "OS-FLV-EXT-DATA:ephemeral"): - ephemeral_gb = float(getattr(f, "OS-FLV-EXT-DATA:ephemeral")) + ephemeral_gb = float(getattr(f, + "OS-FLV-EXT-DATA:ephemeral")) swap_mb = 0.0 if hasattr(f, "swap"): diff --git a/valet/engine/resource_manager/compute_manager.py b/valet/engine/resource_manager/compute_manager.py index 1135341..0c19f94 100755 --- a/valet/engine/resource_manager/compute_manager.py +++ b/valet/engine/resource_manager/compute_manager.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Compute Manager.""" + import threading import time @@ -23,8 +25,14 @@ from valet.engine.resource_manager.resource_base import Host class ComputeManager(threading.Thread): + """Compute Manager Class. + + Threaded class to setup and manage compute for resources, hosts, + flavors, etc. Calls many functions from Resource. + """ def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger): + """Init Compute Manager.""" threading.Thread.__init__(self) self.thread_id = _t_id @@ -42,7 +50,9 @@ class ComputeManager(threading.Thread): self.project_token = None def run(self): - self.logger.info("ComputeManager: start " + self.thread_name + " ......") + """Start Compute Manager thread to run setup.""" + self.logger.info("ComputeManager: start " + self.thread_name + + " ......") if self.config.compute_trigger_freq > 0: period_end = time.time() + self.config.compute_trigger_freq @@ -67,7 +77,10 @@ class ComputeManager(threading.Thread): time.sleep(60) now = time.localtime() - if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday: + if now.tm_year > last_trigger_year or \ + now.tm_mon > last_trigger_mon or \ + now.tm_mday > last_trigger_mday: + timeout = False if timeout is False and \ @@ -82,34 +95,39 @@ class ComputeManager(threading.Thread): self.logger.info("ComputeManager: exit " + self.thread_name) def _run(self): - self.logger.info("ComputeManager: --- start compute_nodes status update ---") + self.logger.info("ComputeManager: --- start compute_nodes " + "status update ---") self.data_lock.acquire() try: triggered_host_updates = self.set_hosts() triggered_flavor_updates = self.set_flavors() - if triggered_host_updates is True and triggered_flavor_updates is True: + if triggered_host_updates is True and \ + triggered_flavor_updates is True: if self.resource.update_topology() is False: - # TODO: error in MUSIC. ignore? + # TODO(UNKNOWN): error in MUSIC. ignore? pass else: - # TODO: error handling, e.g., 3 times failure then stop Ostro? + # TODO(UNKNOWN): error handling, e.g., + # 3 times failure then stop Ostro? pass finally: self.data_lock.release() - self.logger.info("ComputeManager: --- done compute_nodes status update ---") + self.logger.info("ComputeManager: --- done compute_nodes " + "status update ---") return True def set_hosts(self): + """Return True if hosts set, compute avail resources, checks update.""" hosts = {} logical_groups = {} compute = None if self.config.mode.startswith("sim") is True or \ - self.config.mode.startswith("test") is True: + self.config.mode.startswith("test") is True: compute = SimCompute(self.config) else: compute = Compute(self.logger) @@ -136,25 +154,30 @@ class ComputeManager(threading.Thread): self.resource.logical_groups[lk] = deepcopy(_logical_groups[lk]) self.resource.logical_groups[lk].last_update = time.time() - self.logger.warn("ComputeManager: new logical group (" + lk + ") added") + self.logger.warn("ComputeManager: new logical group (" + + lk + ") added") for rlk in self.resource.logical_groups.keys(): rl = self.resource.logical_groups[rlk] - if rl.group_type != "EX" and rl.group_type != "AFF" and rl.group_type != "DIV": + if rl.group_type != "EX" and rl.group_type != "AFF" and \ + rl.group_type != "DIV": if rlk not in _logical_groups.keys(): self.resource.logical_groups[rlk].status = "disabled" self.resource.logical_groups[rlk].last_update = time.time() - self.logger.warn("ComputeManager: logical group (" + rlk + ") removed") + self.logger.warn("ComputeManager: logical group (" + + rlk + ") removed") for lk in _logical_groups.keys(): lg = _logical_groups[lk] rlg = self.resource.logical_groups[lk] - if lg.group_type != "EX" and lg.group_type != "AFF" and lg.group_type != "DIV": + if lg.group_type != "EX" and lg.group_type != "AFF" and \ + lg.group_type != "DIV": if self._check_logical_group_metadata_update(lg, rlg) is True: rlg.last_update = time.time() - self.logger.warn("ComputeManager: logical group (" + lk + ") updated") + self.logger.warn("ComputeManager: logical group (" + + lk + ") updated") def _check_logical_group_metadata_update(self, _lg, _rlg): if _lg.status != _rlg.status: @@ -183,7 +206,8 @@ class ComputeManager(threading.Thread): self.resource.hosts[new_host.name] = new_host new_host.last_update = time.time() - self.logger.warn("ComputeManager: new host (" + new_host.name + ") added") + self.logger.warn("ComputeManager: new host (" + + new_host.name + ") added") for rhk, rhost in self.resource.hosts.iteritems(): if rhk not in _hosts.keys(): @@ -191,7 +215,8 @@ class ComputeManager(threading.Thread): rhost.tag.remove("nova") rhost.last_update = time.time() - self.logger.warn("ComputeManager: host (" + rhost.name + ") disabled") + self.logger.warn("ComputeManager: host (" + + rhost.name + ") disabled") for hk in _hosts.keys(): host = _hosts[hk] @@ -202,7 +227,8 @@ class ComputeManager(threading.Thread): for hk, h in self.resource.hosts.iteritems(): if h.clean_memberships() is True: h.last_update = time.time() - self.logger.warn("ComputeManager: host (" + h.name + ") updated (delete EX/AFF/DIV membership)") + self.logger.warn("ComputeManager: host (" + h.name + + ") updated (delete EX/AFF/DIV membership)") for hk, host in self.resource.hosts.iteritems(): if host.last_update > self.resource.current_timestamp: @@ -224,17 +250,20 @@ class ComputeManager(threading.Thread): if "nova" not in _rhost.tag: _rhost.tag.append("nova") topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (tag added)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (tag added)") if _host.status != _rhost.status: _rhost.status = _host.status topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (status changed)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (status changed)") if _host.state != _rhost.state: _rhost.state = _host.state topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (state changed)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (state changed)") return topology_updated @@ -248,7 +277,8 @@ class ComputeManager(threading.Thread): _rhost.original_vCPUs = _host.original_vCPUs _rhost.avail_vCPUs = _host.avail_vCPUs topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (CPU updated)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (CPU updated)") if _host.mem_cap != _rhost.mem_cap or \ _host.original_mem_cap != _rhost.original_mem_cap or \ @@ -257,7 +287,8 @@ class ComputeManager(threading.Thread): _rhost.original_mem_cap = _host.original_mem_cap _rhost.avail_mem_cap = _host.avail_mem_cap topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (mem updated)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (mem updated)") if _host.local_disk_cap != _rhost.local_disk_cap or \ _host.original_local_disk_cap != _rhost.original_local_disk_cap or \ @@ -266,7 +297,8 @@ class ComputeManager(threading.Thread): _rhost.original_local_disk_cap = _host.original_local_disk_cap _rhost.avail_local_disk_cap = _host.avail_local_disk_cap topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (local disk space updated)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (local disk space updated)") if _host.vCPUs_used != _rhost.vCPUs_used or \ _host.free_mem_mb != _rhost.free_mem_mb or \ @@ -277,7 +309,8 @@ class ComputeManager(threading.Thread): _rhost.free_disk_gb = _host.free_disk_gb _rhost.disk_available_least = _host.disk_available_least topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (other resource numbers)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (other resource numbers)") return topology_updated @@ -288,15 +321,18 @@ class ComputeManager(threading.Thread): if mk not in _rhost.memberships.keys(): _rhost.memberships[mk] = self.resource.logical_groups[mk] topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new membership)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (new membership)") for mk in _rhost.memberships.keys(): m = _rhost.memberships[mk] - if m.group_type != "EX" and m.group_type != "AFF" and m.group_type != "DIV": + if m.group_type != "EX" and m.group_type != "AFF" and \ + m.group_type != "DIV": if mk not in _host.memberships.keys(): del _rhost.memberships[mk] topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (delete membership)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (delete membership)") return topology_updated @@ -309,7 +345,8 @@ class ComputeManager(threading.Thread): _rhost.vm_list.remove(rvm_id) topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (none vm removed)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (none vm removed)") self.resource.clean_none_vms_from_logical_groups(_rhost) @@ -318,25 +355,29 @@ class ComputeManager(threading.Thread): _rhost.vm_list.append(vm_id) topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new vm placed)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (new vm placed)") for rvm_id in _rhost.vm_list: if _host.exist_vm_by_uuid(rvm_id[2]) is False: _rhost.vm_list.remove(rvm_id) - self.resource.remove_vm_by_uuid_from_logical_groups(_rhost, rvm_id[2]) + self.resource.remove_vm_by_uuid_from_logical_groups(_rhost, + rvm_id[2]) topology_updated = True - self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (vm removed)") + self.logger.warn("ComputeManager: host (" + _rhost.name + + ") updated (vm removed)") return topology_updated def set_flavors(self): + """Return True if compute set flavors returns success.""" flavors = {} compute = None if self.config.mode.startswith("sim") is True or \ - self.config.mode.startswith("test") is True: + self.config.mode.startswith("test") is True: compute = SimCompute(self.config) else: compute = Compute(self.logger) @@ -356,14 +397,16 @@ class ComputeManager(threading.Thread): self.resource.flavors[fk] = deepcopy(_flavors[fk]) self.resource.flavors[fk].last_update = time.time() - self.logger.warn("ComputeManager: new flavor (" + fk + ") added") + self.logger.warn("ComputeManager: new flavor (" + + fk + ") added") for rfk in self.resource.flavors.keys(): if rfk not in _flavors.keys(): self.resource.flavors[rfk].status = "disabled" self.resource.flavors[rfk].last_update = time.time() - self.logger.warn("ComputeManager: flavor (" + rfk + ") removed") + self.logger.warn("ComputeManager: flavor (" + + rfk + ") removed") for fk in _flavors.keys(): f = _flavors[fk] @@ -371,7 +414,8 @@ class ComputeManager(threading.Thread): if self._check_flavor_spec_update(f, rf) is True: rf.last_update = time.time() - self.logger.warn("ComputeManager: flavor (" + fk + ") spec updated") + self.logger.warn("ComputeManager: flavor (" + + fk + ") spec updated") def _check_flavor_spec_update(self, _f, _rf): spec_updated = False @@ -380,7 +424,8 @@ class ComputeManager(threading.Thread): _rf.status = _f.status spec_updated = True - if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or _f.disk_cap != _rf.disk_cap: + if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or \ + _f.disk_cap != _rf.disk_cap: _rf.vCPUs = _f.vCPUs _rf.mem_cap = _f.mem_cap _rf.disk_cap = _f.disk_cap diff --git a/valet/engine/resource_manager/compute_simulator.py b/valet/engine/resource_manager/compute_simulator.py index 646ebf0..eb7cbc3 100644 --- a/valet/engine/resource_manager/compute_simulator.py +++ b/valet/engine/resource_manager/compute_simulator.py @@ -1,28 +1,37 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor +"""Compute Simulator.""" + +from valet.engine.resource_manager.resource_base \ + import Host, LogicalGroup, Flavor class SimCompute(object): + """Sim Compute Class. + + This class simulates a compute datacenter using classes from resource_base. + """ def __init__(self, _config): + """Init Sim Compute class (object).""" self.config = _config self.datacenter_name = "sim" def set_hosts(self, _hosts, _logical_groups): + """Set hosts and logical groups using resource_base, return success.""" self._set_availability_zones(_hosts, _logical_groups) self._set_aggregates(_hosts, _logical_groups) @@ -40,7 +49,8 @@ class SimCompute(object): for r_num in range(0, self.config.num_of_racks): for h_num in range(0, self.config.num_of_hosts_per_rack): - host = Host(self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num)) + host = Host(self.datacenter_name + "0r" + str(r_num) + + "c" + str(h_num)) host.tag.append("nova") host.memberships["nova"] = logical_group @@ -63,9 +73,11 @@ class SimCompute(object): aggregate = _logical_groups["aggregate" + str(a_num)] for r_num in range(0, self.config.num_of_racks): for h_num in range(0, self.config.num_of_hosts_per_rack): - host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num) + host_name = self.datacenter_name + "0r" + str(r_num) +\ + "c" + str(h_num) if host_name in _hosts.keys(): - if (h_num % (self.config.aggregated_ratio + a_num)) == 0: + if (h_num % + (self.config.aggregated_ratio + a_num)) == 0: host = _hosts[host_name] host.memberships[aggregate.name] = aggregate @@ -77,23 +89,28 @@ class SimCompute(object): def _set_resources(self, _hosts): for r_num in range(0, self.config.num_of_racks): for h_num in range(0, self.config.num_of_hosts_per_rack): - host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num) + host_name = self.datacenter_name + "0r" + str(r_num) +\ + "c" + str(h_num) if host_name in _hosts.keys(): host = _hosts[host_name] host.original_vCPUs = float(self.config.cpus_per_host) host.vCPUs_used = 0.0 host.original_mem_cap = float(self.config.mem_per_host) host.free_mem_mb = host.original_mem_cap - host.original_local_disk_cap = float(self.config.disk_per_host) + host.original_local_disk_cap = \ + float(self.config.disk_per_host) host.free_disk_gb = host.original_local_disk_cap host.disk_available_least = host.original_local_disk_cap def set_flavors(self, _flavors): + """Set flavors in compute sim, return success.""" for f_num in range(0, self.config.num_of_basic_flavors): flavor = Flavor("bflavor" + str(f_num)) flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1)) flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1)) - flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0 + flavor.disk_cap = \ + float(self.config.base_flavor_disk * (f_num + 1)) + \ + 10.0 + 20.0 / 1024.0 _flavors[flavor.name] = flavor diff --git a/valet/engine/resource_manager/resource.py b/valet/engine/resource_manager/resource.py index 1492e98..a4ae7ab 100755 --- a/valet/engine/resource_manager/resource.py +++ b/valet/engine/resource_manager/resource.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Resource - Handles data, metadata, status of resources.""" + import json import sys import time @@ -20,33 +22,42 @@ import traceback from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.util import util as util -from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, LogicalGroup +from valet.engine.resource_manager.resource_base \ + import Datacenter, HostGroup, Host, LogicalGroup from valet.engine.resource_manager.resource_base import Flavor, Switch, Link class Resource(object): + """Resource Class. + + This class bootsraps the resources from the database and initializes + them using base resources (datacenter, host group, host, logical group). + Also manages aggregate status of resources and metadata and handles + updates to base resource types. + """ def __init__(self, _db, _config, _logger): + """Init Resource Class.""" self.db = _db self.config = _config self.logger = _logger - ''' resource data ''' + """ resource data """ self.datacenter = Datacenter(self.config.datacenter_name) self.host_groups = {} self.hosts = {} self.switches = {} self.storage_hosts = {} - ''' metadata ''' + """ metadata """ self.logical_groups = {} self.flavors = {} self.current_timestamp = 0 self.last_log_index = 0 - ''' resource status aggregation ''' + """ resource status aggregation """ self.CPU_avail = 0 self.mem_avail = 0 self.local_disk_avail = 0 @@ -54,6 +65,7 @@ class Resource(object): self.nw_bandwidth_avail = 0 def bootstrap_from_db(self, _resource_status): + """Return True if bootsrap resource from database successful.""" try: logical_groups = _resource_status.get("logical_groups") if logical_groups: @@ -69,9 +81,11 @@ class Resource(object): self.logical_groups[lgk] = logical_group if len(self.logical_groups) > 0: - self.logger.debug("Resource.bootstrap_from_db: logical_groups loaded") + self.logger.debug("Resource.bootstrap_from_db: logical_groups " + "loaded") else: - self.logger.warn("Resource.bootstrap_from_db: no logical_groups") + self.logger.warn("Resource.bootstrap_from_db: no " + "logical_groups") flavors = _resource_status.get("flavors") if flavors: @@ -89,7 +103,8 @@ class Resource(object): if len(self.flavors) > 0: self.logger.debug("Resource.bootstrap_from_db: flavors loaded") else: - self.logger.error("Resource.bootstrap_from_db: fail loading flavors") + self.logger.error("Resource.bootstrap_from_db: fail loading " + "flavors") switches = _resource_status.get("switches") if switches: @@ -129,9 +144,11 @@ class Resource(object): switch.peer_links = peer_links - self.logger.debug("Resource.bootstrap_from_db: switch links loaded") + self.logger.debug("Resource.bootstrap_from_db: switch links " + "loaded") else: - self.logger.error("Resource.bootstrap_from_db: fail loading switches") + self.logger.error("Resource.bootstrap_from_db: fail loading " + "switches") # storage_hosts hosts = _resource_status.get("hosts") @@ -168,9 +185,11 @@ class Resource(object): self.hosts[hk] = host if len(self.hosts) > 0: - self.logger.debug("Resource.bootstrap_from_db: hosts loaded") + self.logger.debug("Resource.bootstrap_from_db: hosts " + "loaded") else: - self.logger.error("Resource.bootstrap_from_db: fail loading hosts") + self.logger.error("Resource.bootstrap_from_db: fail " + "loading hosts") host_groups = _resource_status.get("host_groups") if host_groups: @@ -185,7 +204,8 @@ class Resource(object): host_group.original_mem_cap = hg.get("original_mem") host_group.avail_mem_cap = hg.get("avail_mem") host_group.local_disk_cap = hg.get("local_disk") - host_group.original_local_disk_cap = hg.get("original_local_disk") + host_group.original_local_disk_cap = \ + hg.get("original_local_disk") host_group.avail_local_disk_cap = hg.get("avail_local_disk") host_group.vm_list = hg.get("vm_list") host_group.volume_list = hg.get("volume_list", []) @@ -201,9 +221,11 @@ class Resource(object): self.host_groups[hgk] = host_group if len(self.host_groups) > 0: - self.logger.debug("Resource.bootstrap_from_db: host_groups loaded") + self.logger.debug("Resource.bootstrap_from_db: host_groups " + "loaded") else: - self.logger.error("Resource.bootstrap_from_db: fail loading host_groups") + self.logger.error("Resource.bootstrap_from_db: fail " + "loading host_groups") dc = _resource_status.get("datacenter") if dc: @@ -217,7 +239,8 @@ class Resource(object): self.datacenter.original_mem_cap = dc.get("original_mem") self.datacenter.avail_mem_cap = dc.get("avail_mem") self.datacenter.local_disk_cap = dc.get("local_disk") - self.datacenter.original_local_disk_cap = dc.get("original_local_disk") + self.datacenter.original_local_disk_cap = \ + dc.get("original_local_disk") self.datacenter.avail_local_disk_cap = dc.get("avail_local_disk") self.datacenter.vm_list = dc.get("vm_list") self.datacenter.volume_list = dc.get("volume_list", []) @@ -237,9 +260,11 @@ class Resource(object): self.datacenter.resources[ck] = self.hosts[ck] if len(self.datacenter.resources) > 0: - self.logger.debug("Resource.bootstrap_from_db: datacenter loaded") + self.logger.debug("Resource.bootstrap_from_db: datacenter " + "loaded") else: - self.logger.error("Resource.bootstrap_from_db: fail loading datacenter") + self.logger.error("Resource.bootstrap_from_db: fail " + "loading datacenter") hgs = _resource_status.get("host_groups") if hgs: @@ -258,7 +283,8 @@ class Resource(object): elif ck in self.host_groups.keys(): host_group.child_resources[ck] = self.host_groups[ck] - self.logger.debug("Resource.bootstrap_from_db: host_groups'layout loaded") + self.logger.debug("Resource.bootstrap_from_db: " + "host_groups'layout loaded") hs = _resource_status.get("hosts") if hs: @@ -271,20 +297,24 @@ class Resource(object): elif pk in self.host_groups.keys(): host.host_group = self.host_groups[pk] - self.logger.debug("Resource.bootstrap_from_db: hosts'layout loaded") + self.logger.debug("Resource.bootstrap_from_db: " + "hosts'layout loaded") self._update_compute_avail() self._update_storage_avail() self._update_nw_bandwidth_avail() - self.logger.debug("Resource.bootstrap_from_db: resource availability updated") + self.logger.debug("Resource.bootstrap_from_db: " + "resource availability updated") except Exception: - self.logger.error("Resource.bootstrap_from_db - FAILED:" + traceback.format_exc()) + self.logger.error("Resource.bootstrap_from_db - " + "FAILED:" + traceback.format_exc()) return True def update_topology(self, store=True): + """Update Topology and return True, if store True then store update.""" self._update_topology() self._update_compute_avail() @@ -304,7 +334,8 @@ class Resource(object): def _update_topology(self): for level in LEVELS: for _, host_group in self.host_groups.iteritems(): - if host_group.host_type == level and host_group.check_availability() is True: + if host_group.host_type == level and \ + host_group.check_availability() is True: if host_group.last_update > self.current_timestamp: self._update_host_group_topology(host_group) @@ -326,7 +357,8 @@ class Resource(object): _host_group.original_mem_cap += host.original_mem_cap _host_group.avail_mem_cap += host.avail_mem_cap _host_group.local_disk_cap += host.local_disk_cap - _host_group.original_local_disk_cap += host.original_local_disk_cap + _host_group.original_local_disk_cap += \ + host.original_local_disk_cap _host_group.avail_local_disk_cap += host.avail_local_disk_cap for shk, storage_host in host.storages.iteritems(): @@ -362,8 +394,10 @@ class Resource(object): self.datacenter.original_mem_cap += resource.original_mem_cap self.datacenter.avail_mem_cap += resource.avail_mem_cap self.datacenter.local_disk_cap += resource.local_disk_cap - self.datacenter.original_local_disk_cap += resource.original_local_disk_cap - self.datacenter.avail_local_disk_cap += resource.avail_local_disk_cap + self.datacenter.original_local_disk_cap += \ + resource.original_local_disk_cap + self.datacenter.avail_local_disk_cap += \ + resource.avail_local_disk_cap for shk, storage_host in resource.storages.iteritems(): if storage_host.status == "enabled": @@ -413,7 +447,8 @@ class Resource(object): for sk, s in h.switches.iteritems(): if s.status == "enabled": for ulk, ul in s.up_links.iteritems(): - avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth) + avail_nw_bandwidth_list.append( + ul.avail_nw_bandwidth) self.nw_bandwidth_avail += min(avail_nw_bandwidth_list) elif level == "spine": for _, hg in self.host_groups.iteritems(): @@ -422,7 +457,8 @@ class Resource(object): for _, s in hg.switches.iteritems(): if s.status == "enabled": for _, ul in s.up_links.iteritems(): - avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth) + avail_nw_bandwidth_list.append( + ul.avail_nw_bandwidth) # NOTE: peer links? self.nw_bandwidth_avail += min(avail_nw_bandwidth_list) @@ -466,7 +502,8 @@ class Resource(object): last_update_time = s.last_update for hk, host in self.hosts.iteritems(): - if host.last_update > self.current_timestamp or host.last_link_update > self.current_timestamp: + if host.last_update > self.current_timestamp or \ + host.last_link_update > self.current_timestamp: host_updates[hk] = host.get_json_info() if host.last_update > self.current_timestamp: @@ -493,11 +530,10 @@ class Resource(object): if self.datacenter.last_link_update > self.current_timestamp: last_update_time = self.datacenter.last_link_update - (resource_logfile, last_index, mode) = util.get_last_logfile(self.config.resource_log_loc, - self.config.max_log_size, - self.config.max_num_of_logs, - self.datacenter.name, - self.last_log_index) + (resource_logfile, last_index, mode) = util.get_last_logfile( + self.config.resource_log_loc, self.config.max_log_size, + self.config.max_num_of_logs, self.datacenter.name, + self.last_log_index) self.last_log_index = last_index logging = open(self.config.resource_log_loc + resource_logfile, mode) @@ -527,17 +563,21 @@ class Resource(object): logging.close() - self.logger.info("Resource._store_topology_updates: log resource status in " + resource_logfile) + self.logger.info("Resource._store_topology_updates: log resource " + "status in " + resource_logfile) if self.db is not None: - if self.db.update_resource_status(self.datacenter.name, json_logging) is False: + if self.db.update_resource_status(self.datacenter.name, + json_logging) is False: return None - if self.db.update_resource_log_index(self.datacenter.name, self.last_log_index) is False: + if self.db.update_resource_log_index(self.datacenter.name, + self.last_log_index) is False: return None return last_update_time def update_rack_resource(self, _host): + """Update resources for rack (host), then update cluster.""" rack = _host.host_group if rack is not None: @@ -547,6 +587,7 @@ class Resource(object): self.update_cluster_resource(rack) def update_cluster_resource(self, _rack): + """Update cluster rack belonged to, then update datacenter.""" cluster = _rack.parent_resource if cluster is not None: @@ -556,11 +597,13 @@ class Resource(object): self.datacenter.last_update = time.time() def get_uuid(self, _h_uuid, _host_name): + """Return host uuid.""" host = self.hosts[_host_name] return host.get_uuid(_h_uuid) def add_vm_to_host(self, _host_name, _vm_id, _vcpus, _mem, _ldisk): + """Add vm to host and adjust compute resources for host.""" host = self.hosts[_host_name] host.vm_list.append(_vm_id) @@ -574,7 +617,9 @@ class Resource(object): host.free_disk_gb -= _ldisk host.disk_available_least -= _ldisk - def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem, _ldisk): + def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem, + _ldisk): + """Remove vm from host by h_uuid, adjust compute resources for host.""" host = self.hosts[_host_name] host.remove_vm_by_h_uuid(_h_uuid) @@ -588,7 +633,9 @@ class Resource(object): host.free_disk_gb += _ldisk host.disk_available_least += _ldisk - def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem, _ldisk): + def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem, + _ldisk): + """Remove vm from host by uuid, adjust compute resources for host.""" host = self.hosts[_host_name] host.remove_vm_by_uuid(_uuid) @@ -603,6 +650,7 @@ class Resource(object): host.disk_available_least += _ldisk def add_vol_to_host(self, _host_name, _storage_name, _v_id, _disk): + """Add volume to host and adjust available disk on host.""" host = self.hosts[_host_name] host.volume_list.append(_v_id) @@ -612,9 +660,11 @@ class Resource(object): storage_host.avail_disk_cap -= _disk - # NOTE: Assume the up-link of spine switch is not used except out-going from datacenter + # NOTE: Assume the up-link of spine switch is not used except out-going + # from datacenter # NOTE: What about peer-switches? def deduct_bandwidth(self, _host_name, _placement_level, _bandwidth): + """Deduct bandwidth at appropriate placement level.""" host = self.hosts[_host_name] if _placement_level == "host": @@ -648,26 +698,31 @@ class Resource(object): hs.last_update = time.time() - def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem, _ldisk, _fldisk, _avail_least): + def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem, + _ldisk, _fldisk, _avail_least): + """Return True if status or compute resources avail on host changed.""" updated = False host = self.hosts[_hn] if host.status != _st: host.status = _st - self.logger.debug("Resource.update_host_resources: host status changed") + self.logger.debug("Resource.update_host_resources: host status " + "changed") updated = True if host.original_vCPUs != _vcpus or \ host.vCPUs_used != _vcpus_used: - self.logger.debug("Resource.update_host_resources: host cpu changed") + self.logger.debug("Resource.update_host_resources: host cpu " + "changed") host.original_vCPUs = _vcpus host.vCPUs_used = _vcpus_used updated = True if host.free_mem_mb != _fmem or \ host.original_mem_cap != _mem: - self.logger.debug("Resource.update_host_resources: host mem changed") + self.logger.debug("Resource.update_host_resources: host mem " + "changed") host.free_mem_mb = _fmem host.original_mem_cap = _mem updated = True @@ -675,7 +730,8 @@ class Resource(object): if host.free_disk_gb != _fldisk or \ host.original_local_disk_cap != _ldisk or \ host.disk_available_least != _avail_least: - self.logger.debug("Resource.update_host_resources: host disk changed") + self.logger.debug("Resource.update_host_resources: host disk " + "changed") host.free_disk_gb = _fldisk host.original_local_disk_cap = _ldisk host.disk_available_least = _avail_least @@ -687,17 +743,20 @@ class Resource(object): return updated def update_host_time(self, _host_name): + """Update last host update time.""" host = self.hosts[_host_name] host.last_update = time.time() self.update_rack_resource(host) def update_storage_time(self, _storage_name): + """Update last storage update time.""" storage_host = self.storage_hosts[_storage_name] storage_host.last_cap_update = time.time() def add_logical_group(self, _host_name, _lg_name, _lg_type): + """Add logical group to host memberships and update host resource.""" host = None if _host_name in self.hosts.keys(): host = self.hosts[_host_name] @@ -720,6 +779,7 @@ class Resource(object): self.update_cluster_resource(host) def add_vm_to_logical_groups(self, _host, _vm_id, _logical_groups_of_vm): + """Add vm to logical group and update corresponding lg.""" for lgk in _host.memberships.keys(): if lgk in _logical_groups_of_vm: lg = self.logical_groups[lgk] @@ -728,17 +788,21 @@ class Resource(object): if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: lg.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == _host.host_type: if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: lg.last_update = time.time() if isinstance(_host, Host) and _host.host_group is not None: - self.add_vm_to_logical_groups(_host.host_group, _vm_id, _logical_groups_of_vm) + self.add_vm_to_logical_groups(_host.host_group, _vm_id, + _logical_groups_of_vm) elif isinstance(_host, HostGroup) and _host.parent_resource is not None: - self.add_vm_to_logical_groups(_host.parent_resource, _vm_id, _logical_groups_of_vm) + self.add_vm_to_logical_groups(_host.parent_resource, _vm_id, + _logical_groups_of_vm) def remove_vm_by_h_uuid_from_logical_groups(self, _host, _h_uuid): + """Remove vm by orchestration id from lgs. Update host and lgs.""" for lgk in _host.memberships.keys(): if lgk not in self.logical_groups.keys(): continue @@ -752,7 +816,8 @@ class Resource(object): _host.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == _host.host_type: if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True: lg.last_update = time.time() @@ -760,16 +825,20 @@ class Resource(object): if _host.remove_membership(lg) is True: _host.last_update = time.time() - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if len(lg.vm_list) == 0: del self.logical_groups[lgk] if isinstance(_host, Host) and _host.host_group is not None: - self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group, _h_uuid) + self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group, + _h_uuid) elif isinstance(_host, HostGroup) and _host.parent_resource is not None: - self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource, _h_uuid) + self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource, + _h_uuid) def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid): + """Remove vm by uuid from lgs and update proper host and lgs.""" for lgk in _host.memberships.keys(): if lgk not in self.logical_groups.keys(): continue @@ -783,7 +852,8 @@ class Resource(object): _host.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == _host.host_type: if lg.remove_vm_by_uuid(_uuid, _host.name) is True: lg.last_update = time.time() @@ -791,16 +861,19 @@ class Resource(object): if _host.remove_membership(lg) is True: _host.last_update = time.time() - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if len(lg.vm_list) == 0: del self.logical_groups[lgk] if isinstance(_host, Host) and _host.host_group is not None: self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid) elif isinstance(_host, HostGroup) and _host.parent_resource is not None: - self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource, _uuid) + self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource, + _uuid) def clean_none_vms_from_logical_groups(self, _host): + """Clean vms with status none from logical groups.""" for lgk in _host.memberships.keys(): if lgk not in self.logical_groups.keys(): continue @@ -814,7 +887,8 @@ class Resource(object): _host.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == _host.host_type: if lg.clean_none_vms(_host.name) is True: lg.last_update = time.time() @@ -822,7 +896,8 @@ class Resource(object): if _host.remove_membership(lg) is True: _host.last_update = time.time() - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if len(lg.vm_list) == 0: del self.logical_groups[lgk] @@ -832,6 +907,7 @@ class Resource(object): self.clean_none_vms_from_logical_groups(_host.parent_resource) def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): + """Update uuid in lgs and update lg last update time.""" for lgk in _host.memberships.keys(): lg = self.logical_groups[lgk] @@ -839,7 +915,8 @@ class Resource(object): if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: lg.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == _host.host_type: if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: lg.last_update = time.time() @@ -847,9 +924,11 @@ class Resource(object): if isinstance(_host, Host) and _host.host_group is not None: self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) elif isinstance(_host, HostGroup) and _host.parent_resource is not None: - self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) + self.update_uuid_in_logical_groups(_h_uuid, _uuid, + _host.parent_resource) def update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): + """Update orchestration id in lgs and update lg last update time.""" for lgk in _host.memberships.keys(): lg = self.logical_groups[lgk] @@ -857,17 +936,26 @@ class Resource(object): if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: lg.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": if lgk.split(":")[0] == _host.host_type: if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: lg.last_update = time.time() if isinstance(_host, Host) and _host.host_group is not None: - self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) + self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, + _host.host_group) elif isinstance(_host, HostGroup) and _host.parent_resource is not None: - self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) + self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, + _host.parent_resource) def compute_avail_resources(self, hk, host): + """Compute avail resources for host. + + This function computes ram, cpu and disk allocation ratios for + the passed in host. Then uses data to compute avail memory, disk + and vCPUs. + """ ram_allocation_ratio_list = [] cpu_allocation_ratio_list = [] disk_allocation_ratio_list = [] @@ -875,11 +963,14 @@ class Resource(object): for _, lg in host.memberships.iteritems(): if lg.group_type == "AGGR": if "ram_allocation_ratio" in lg.metadata.keys(): - ram_allocation_ratio_list.append(float(lg.metadata["ram_allocation_ratio"])) + ram_allocation_ratio_list.append( + float(lg.metadata["ram_allocation_ratio"])) if "cpu_allocation_ratio" in lg.metadata.keys(): - cpu_allocation_ratio_list.append(float(lg.metadata["cpu_allocation_ratio"])) + cpu_allocation_ratio_list.append( + float(lg.metadata["cpu_allocation_ratio"])) if "disk_allocation_ratio" in lg.metadata.keys(): - disk_allocation_ratio_list.append(float(lg.metadata["disk_allocation_ratio"])) + disk_allocation_ratio_list.append( + float(lg.metadata["disk_allocation_ratio"])) ram_allocation_ratio = 1.0 if len(ram_allocation_ratio_list) > 0: @@ -890,12 +981,15 @@ class Resource(object): static_ram_standby_ratio = 0 if self.config.static_mem_standby_ratio > 0: - static_ram_standby_ratio = float(self.config.static_mem_standby_ratio) / float(100) + static_ram_standby_ratio = \ + float(self.config.static_mem_standby_ratio) / float(100) host.compute_avail_mem(ram_allocation_ratio, static_ram_standby_ratio) - self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_mem = " + - str(host.mem_cap) + ", avail_mem = " + str(host.avail_mem_cap)) + self.logger.debug("Resource.compute_avail_resources: host (" + + hk + ")'s total_mem = " + + str(host.mem_cap) + ", avail_mem = " + + str(host.avail_mem_cap)) cpu_allocation_ratio = 1.0 if len(cpu_allocation_ratio_list) > 0: @@ -906,30 +1000,39 @@ class Resource(object): static_cpu_standby_ratio = 0 if self.config.static_cpu_standby_ratio > 0: - static_cpu_standby_ratio = float(self.config.static_cpu_standby_ratio) / float(100) + static_cpu_standby_ratio = \ + float(self.config.static_cpu_standby_ratio) / float(100) host.compute_avail_vCPUs(cpu_allocation_ratio, static_cpu_standby_ratio) - self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_vCPUs = " + - str(host.vCPUs) + ", avail_vCPUs = " + str(host.avail_vCPUs)) + self.logger.debug("Resource.compute_avail_resources: host (" + + hk + ")'s total_vCPUs = " + + str(host.vCPUs) + ", avail_vCPUs = " + + str(host.avail_vCPUs)) disk_allocation_ratio = 1.0 if len(disk_allocation_ratio_list) > 0: disk_allocation_ratio = min(disk_allocation_ratio_list) else: if self.config.default_disk_allocation_ratio > 0: - disk_allocation_ratio = self.config.default_disk_allocation_ratio + disk_allocation_ratio = \ + self.config.default_disk_allocation_ratio static_disk_standby_ratio = 0 if self.config.static_local_disk_standby_ratio > 0: - static_disk_standby_ratio = float(self.config.static_local_disk_standby_ratio) / float(100) + static_disk_standby_ratio = \ + float(self.config.static_local_disk_standby_ratio) / float(100) - host.compute_avail_disk(disk_allocation_ratio, static_disk_standby_ratio) + host.compute_avail_disk(disk_allocation_ratio, + static_disk_standby_ratio) - self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_local_disk = " + - str(host.local_disk_cap) + ", avail_local_disk = " + str(host.avail_local_disk_cap)) + self.logger.debug("Resource.compute_avail_resources: host (" + + hk + ")'s total_local_disk = " + + str(host.local_disk_cap) + ", avail_local_disk = " + + str(host.avail_local_disk_cap)) def get_flavor(self, _name): + """Return flavor according to name passed in.""" flavor = None if _name in self.flavors.keys(): diff --git a/valet/engine/resource_manager/resource_base.py b/valet/engine/resource_manager/resource_base.py index ed7536f..dbf98f9 100755 --- a/valet/engine/resource_manager/resource_base.py +++ b/valet/engine/resource_manager/resource_base.py @@ -1,31 +1,45 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Resource Base. + +File contains resource datatype objects from base type of a flavor and +builds all the way up to a datacenter object. +""" + from valet.engine.optimizer.app_manager.app_topology_base import LEVELS class Datacenter(object): + """Datacenter Class. + + This object represents a datacenter. It contains all memberships or + logical groups in the datacenter, all resources available, placed vms, + and more throughout the datacenter. + """ def __init__(self, _name): + """Init Datacenter object.""" self.name = _name self.region_code_list = [] self.status = "enabled" - self.memberships = {} # all available logical groups (e.g., aggregate) in the datacenter + # all available logical groups (e.g., aggregate) in the datacenter + self.memberships = {} self.vCPUs = 0 self.original_vCPUs = 0 @@ -42,13 +56,17 @@ class Datacenter(object): self.resources = {} - self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) - self.volume_list = [] # a list of placed volumes + # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.vm_list = [] + + # a list of placed volumes + self.volume_list = [] self.last_update = 0 self.last_link_update = 0 def init_resources(self): + """Init datacenter resources to 0.""" self.vCPUs = 0 self.original_vCPUs = 0 self.avail_vCPUs = 0 @@ -60,6 +78,7 @@ class Datacenter(object): self.avail_local_disk_cap = 0 def get_json_info(self): + """Return JSON info for datacenter object.""" membership_list = [] for lgk in self.memberships.keys(): membership_list.append(lgk) @@ -100,14 +119,24 @@ class Datacenter(object): # data container for rack or cluster class HostGroup(object): + """Class for Host Group Object. + + This Class represents a group of hosts. If a single host is a single server + then host group is a rack or cluster of servers. This class contains all + memberships and resources for the group of hosts. + """ def __init__(self, _id): + """Init for Host Group Class.""" self.name = _id - self.host_type = "rack" # rack or cluster(e.g., power domain, zone) + + # rack or cluster(e.g., power domain, zone) + self.host_type = "rack" self.status = "enabled" - self.memberships = {} # all available logical groups (e.g., aggregate) in this group + # all available logical groups (e.g., aggregate) in this group + self.memberships = {} self.vCPUs = 0 self.original_vCPUs = 0 @@ -125,13 +154,17 @@ class HostGroup(object): self.parent_resource = None # e.g., datacenter self.child_resources = {} # e.g., hosting servers - self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) - self.volume_list = [] # a list of placed volumes + # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.vm_list = [] + + # a list of placed volumes + self.volume_list = [] self.last_update = 0 self.last_link_update = 0 def init_resources(self): + """Init all host group resources to 0.""" self.vCPUs = 0 self.original_vCPUs = 0 self.avail_vCPUs = 0 @@ -143,19 +176,24 @@ class HostGroup(object): self.avail_local_disk_cap = 0 def init_memberships(self): + """Init Host Group memberships.""" for lgk in self.memberships.keys(): lg = self.memberships[lgk] - if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": + if lg.group_type == "EX" or lg.group_type == "AFF" or \ + lg.group_type == "DIV": level = lg.name.split(":")[0] - if LEVELS.index(level) < LEVELS.index(self.host_type) or self.name not in lg.vms_per_host.keys(): + if LEVELS.index(level) < LEVELS.index(self.host_type) or \ + self.name not in lg.vms_per_host.keys(): del self.memberships[lgk] else: del self.memberships[lgk] def remove_membership(self, _lg): + """Return True if membership to group _lg removed.""" cleaned = False - if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV": + if _lg.group_type == "EX" or _lg.group_type == "AFF" or \ + _lg.group_type == "DIV": if self.name not in _lg.vms_per_host.keys(): del self.memberships[_lg.name] cleaned = True @@ -163,12 +201,14 @@ class HostGroup(object): return cleaned def check_availability(self): + """Return True if Host Group status is 'enabled'.""" if self.status == "enabled": return True else: return False def get_json_info(self): + """Return JSON info for Host Group object.""" membership_list = [] for lgk in self.memberships.keys(): membership_list.append(lgk) @@ -208,15 +248,25 @@ class HostGroup(object): class Host(object): + """Class for Host Object. + + This class is for a Host Object, imagine a server. This means + information about the groups the host is a part of, all the hardware + parameters (vCPUs, local disk, memory) as well as the list of vms and + volumes placed on the host. + """ def __init__(self, _name): + """Init for Host object.""" self.name = _name - self.tag = [] # mark if this is synch'ed by multiple sources + # mark if this is synch'ed by multiple sources + self.tag = [] self.status = "enabled" self.state = "up" - self.memberships = {} # logical group (e.g., aggregate) this hosting server is involved in + # logical group (e.g., aggregate) this hosting server is involved in + self.memberships = {} self.vCPUs = 0 self.original_vCPUs = 0 @@ -238,13 +288,17 @@ class Host(object): self.host_group = None # e.g., rack - self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) - self.volume_list = [] # a list of placed volumes + # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.vm_list = [] + + # a list of placed volumes + self.volume_list = [] self.last_update = 0 self.last_link_update = 0 def clean_memberships(self): + """Return True if host cleaned from logical group membership.""" cleaned = False for lgk in self.memberships.keys(): @@ -256,9 +310,11 @@ class Host(object): return cleaned def remove_membership(self, _lg): + """Return True if host removed from logical group _lg passed in.""" cleaned = False - if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV": + if _lg.group_type == "EX" or _lg.group_type == "AFF" or \ + _lg.group_type == "DIV": if self.name not in _lg.vms_per_host.keys(): del self.memberships[_lg.name] cleaned = True @@ -266,12 +322,15 @@ class Host(object): return cleaned def check_availability(self): - if self.status == "enabled" and self.state == "up" and ("nova" in self.tag) and ("infra" in self.tag): + """Return True if host is up, enabled and tagged as nova infra.""" + if self.status == "enabled" and self.state == "up" and \ + ("nova" in self.tag) and ("infra" in self.tag): return True else: return False def get_uuid(self, _h_uuid): + """Return uuid of vm with matching orchestration id(_h_uuid).""" uuid = None for vm_id in self.vm_list: @@ -282,6 +341,7 @@ class Host(object): return uuid def exist_vm_by_h_uuid(self, _h_uuid): + """Return True if vm with orchestration id(_h_uuid) exists on host.""" exist = False for vm_id in self.vm_list: @@ -292,6 +352,7 @@ class Host(object): return exist def exist_vm_by_uuid(self, _uuid): + """Return True if vm with physical id(_uuid) exists on host.""" exist = False for vm_id in self.vm_list: @@ -302,6 +363,7 @@ class Host(object): return exist def remove_vm_by_h_uuid(self, _h_uuid): + """Return True if vm removed with matching _h_uuid.""" success = False for vm_id in self.vm_list: @@ -313,6 +375,7 @@ class Host(object): return success def remove_vm_by_uuid(self, _uuid): + """Return True if vm removed with matching _uuid.""" success = False for vm_id in self.vm_list: @@ -324,6 +387,7 @@ class Host(object): return success def update_uuid(self, _h_uuid, _uuid): + """Return True if vm physical id updated.""" success = False vm_name = "none" @@ -341,6 +405,7 @@ class Host(object): return success def update_h_uuid(self, _h_uuid, _uuid): + """Return True if vm orchestration id (_h_uuid) updated.""" success = False vm_name = "none" @@ -358,19 +423,27 @@ class Host(object): return success def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio): - self.vCPUs = self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio) + """Calc avail_vCPUs by calculating vCPUs and subtracting in use.""" + self.vCPUs = \ + self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio) self.avail_vCPUs = self.vCPUs - self.vCPUs_used def compute_avail_mem(self, _overcommit_ratio, _standby_ratio): - self.mem_cap = self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio) + """Calc avail_mem by calculating mem_cap and subtract used mem.""" + self.mem_cap = \ + self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio) used_mem_mb = self.original_mem_cap - self.free_mem_mb self.avail_mem_cap = self.mem_cap - used_mem_mb def compute_avail_disk(self, _overcommit_ratio, _standby_ratio): - self.local_disk_cap = self.original_local_disk_cap * _overcommit_ratio * (1.0 - _standby_ratio) + """Calc avail_disk by calc local_disk_cap and subtract used disk.""" + self.local_disk_cap = \ + self.original_local_disk_cap * \ + _overcommit_ratio * \ + (1.0 - _standby_ratio) free_disk_cap = self.free_disk_gb if self.disk_available_least > 0: @@ -381,6 +454,7 @@ class Host(object): self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap def get_json_info(self): + """Return JSON info for Host object.""" membership_list = [] for lgk in self.memberships.keys(): membership_list.append(lgk) @@ -418,23 +492,37 @@ class Host(object): class LogicalGroup(object): + """Logical Group class. + + This class contains info about grouped vms, such as metadata when placing + nodes, list of placed vms, list of placed volumes and group type. + """ def __init__(self, _name): + """Init Logical Group object.""" self.name = _name - self.group_type = "AGGR" # AGGR, AZ, INTG, EX, DIV, or AFF + + # AGGR, AZ, INTG, EX, DIV, or AFF + self.group_type = "AGGR" self.status = "enabled" - self.metadata = {} # any metadata to be matched when placing nodes + # any metadata to be matched when placing nodes + self.metadata = {} - self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) - self.volume_list = [] # a list of placed volumes + # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) + self.vm_list = [] - self.vms_per_host = {} # key = host_id, value = a list of placed vms + # a list of placed volumes + self.volume_list = [] + + # key = host_id, value = a list of placed vms + self.vms_per_host = {} self.last_update = 0 def exist_vm_by_h_uuid(self, _h_uuid): + """Return True if h_uuid exist in vm_list as an orchestration_uuid.""" exist = False for vm_id in self.vm_list: @@ -445,6 +533,7 @@ class LogicalGroup(object): return exist def exist_vm_by_uuid(self, _uuid): + """Return True if uuid exist in vm_list as physical_uuid.""" exist = False for vm_id in self.vm_list: @@ -455,6 +544,7 @@ class LogicalGroup(object): return exist def update_uuid(self, _h_uuid, _uuid, _host_id): + """Return True if _uuid and/or _host_id successfully updated.""" success = False vm_name = "none" @@ -481,6 +571,7 @@ class LogicalGroup(object): return success def update_h_uuid(self, _h_uuid, _uuid, _host_id): + """Return True physical_uuid and/or _host_id successfully updated.""" success = False vm_name = "none" @@ -507,12 +598,14 @@ class LogicalGroup(object): return success def add_vm_by_h_uuid(self, _vm_id, _host_id): + """Return True if vm added with id _vm_id(orchestration id).""" success = False if self.exist_vm_by_h_uuid(_vm_id[0]) is False: self.vm_list.append(_vm_id) - if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": + if self.group_type == "EX" or self.group_type == "AFF" or \ + self.group_type == "DIV": if _host_id not in self.vms_per_host.keys(): self.vms_per_host[_host_id] = [] self.vms_per_host[_host_id].append(_vm_id) @@ -522,6 +615,7 @@ class LogicalGroup(object): return success def remove_vm_by_h_uuid(self, _h_uuid, _host_id): + """Return True if vm removed with id _h_uuid(orchestration id).""" success = False for vm_id in self.vm_list: @@ -537,13 +631,16 @@ class LogicalGroup(object): success = True break - if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": - if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: + if self.group_type == "EX" or self.group_type == "AFF" or \ + self.group_type == "DIV": + if (_host_id in self.vms_per_host.keys()) and \ + len(self.vms_per_host[_host_id]) == 0: del self.vms_per_host[_host_id] return success def remove_vm_by_uuid(self, _uuid, _host_id): + """Return True if vm with matching uuid found and removed.""" success = False for vm_id in self.vm_list: @@ -559,13 +656,16 @@ class LogicalGroup(object): success = True break - if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": - if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: + if self.group_type == "EX" or self.group_type == "AFF" or \ + self.group_type == "DIV": + if (_host_id in self.vms_per_host.keys()) and \ + len(self.vms_per_host[_host_id]) == 0: del self.vms_per_host[_host_id] return success def clean_none_vms(self, _host_id): + """Return True if vm's or host vm's removed with physical id none.""" success = False for vm_id in self.vm_list: @@ -579,13 +679,16 @@ class LogicalGroup(object): self.vms_per_host[_host_id].remove(vm_id) success = True - if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": - if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: + if self.group_type == "EX" or self.group_type == "AFF" or \ + self.group_type == "DIV": + if (_host_id in self.vms_per_host.keys()) and \ + len(self.vms_per_host[_host_id]) == 0: del self.vms_per_host[_host_id] return success def get_json_info(self): + """Return JSON info for Logical Group object.""" return {'status': self.status, 'group_type': self.group_type, 'metadata': self.metadata, @@ -596,8 +699,10 @@ class LogicalGroup(object): class Switch(object): + """Switch class.""" def __init__(self, _switch_id): + """Init Switch object.""" self.name = _switch_id self.switch_type = "ToR" # root, spine, ToR, or leaf @@ -610,6 +715,7 @@ class Switch(object): self.last_update = 0 def get_json_info(self): + """Return JSON info on Switch object.""" ulinks = {} for ulk, ul in self.up_links.iteritems(): ulinks[ulk] = ul.get_json_info() @@ -626,8 +732,10 @@ class Switch(object): class Link(object): + """Link class.""" def __init__(self, _name): + """Init Link object.""" self.name = _name # format: source + "-" + target self.resource = None # switch beging connected to @@ -635,29 +743,33 @@ class Link(object): self.avail_nw_bandwidth = 0 def get_json_info(self): + """Return JSON info on Link object.""" return {'resource': self.resource.name, 'bandwidth': self.nw_bandwidth, 'avail_bandwidth': self.avail_nw_bandwidth} class StorageHost(object): + """Storage Host class.""" def __init__(self, _name): + """Init Storage Host object.""" self.name = _name - self.storage_class = None # tiering, e.g., platinum, gold, silver + self.storage_class = None # tiering, e.g., platinum, gold, silver self.status = "enabled" self.host_list = [] - self.disk_cap = 0 # GB + self.disk_cap = 0 # GB self.avail_disk_cap = 0 - self.volume_list = [] # list of volume names placed in this host + self.volume_list = [] # list of volume names placed in this host self.last_update = 0 self.last_cap_update = 0 def get_json_info(self): + """Return JSON info on Storage Host object.""" return {'status': self.status, 'class': self.storage_class, 'host_list': self.host_list, @@ -669,8 +781,10 @@ class StorageHost(object): class Flavor(object): + """Flavor class.""" def __init__(self, _name): + """Init flavor object.""" self.name = _name self.flavor_id = None @@ -685,6 +799,7 @@ class Flavor(object): self.last_update = 0 def get_json_info(self): + """Return JSON info of Flavor Object.""" return {'status': self.status, 'flavor_id': self.flavor_id, 'vCPUs': self.vCPUs, diff --git a/valet/engine/resource_manager/simulation/compute_simulator.py b/valet/engine/resource_manager/simulation/compute_simulator.py index 896a4ee..96a7310 100644 --- a/valet/engine/resource_manager/simulation/compute_simulator.py +++ b/valet/engine/resource_manager/simulation/compute_simulator.py @@ -1,36 +1,33 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Simulate hosts and flavors.""" -################################################################################################################# -# Author: Gueyoung Jung -# Contact: gjung@research.att.com -# Version 2.0.2: Feb. 9, 2016 -# -# Functions -# - Simulate hosts and flavors -# -################################################################################################################# - - -from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor +from valet.engine.resource_manager.resource_base \ + import Host, LogicalGroup, Flavor class SimCompute(object): + """Simulate Compute class.""" def __init__(self, _config): + """Init.""" self.config = _config def set_hosts(self, _hosts, _logical_groups): + """Return success after setting sim hosts and flavors.""" self._set_availability_zones(_hosts, _logical_groups) self._set_aggregates(_hosts, _logical_groups) @@ -47,18 +44,9 @@ class SimCompute(object): _logical_groups[logical_group.name] = logical_group for r_num in range(0, self.config.num_of_racks): - - # for test - ''' - num_of_hosts = 0 - if r_num == 1: - num_of_hosts = 1 - else: - num_of_hosts = 2 - for h_num in range(0, num_of_hosts): - ''' for h_num in range(0, self.config.num_of_hosts_per_rack): - host = Host(self.config.mode + "0r" + str(r_num) + "c" + str(h_num)) + host = Host(self.config.mode + "0r" + str(r_num) + "c" + + str(h_num)) host.tag.append("nova") host.memberships["nova"] = logical_group @@ -81,9 +69,11 @@ class SimCompute(object): aggregate = _logical_groups["aggregate" + str(a_num)] for r_num in range(0, self.config.num_of_racks): for h_num in range(0, self.config.num_of_hosts_per_rack): - host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num) + host_name = self.config.mode + "0r" + str(r_num) + "c" + \ + str(h_num) if host_name in _hosts.keys(): - if (h_num % (self.config.aggregated_ratio + a_num)) == 0: + if (h_num % + (self.config.aggregated_ratio + a_num)) == 0: host = _hosts[host_name] host.memberships[aggregate.name] = aggregate @@ -94,40 +84,29 @@ class SimCompute(object): def _set_resources(self, _hosts): for r_num in range(0, self.config.num_of_racks): - - # for test - ''' - num_of_hosts = 0 - if r_num == 1: - num_of_hosts = 1 - else: - num_of_hosts = 2 - for h_num in range(0, num_of_hosts): - ''' for h_num in range(0, self.config.num_of_hosts_per_rack): - host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num) + host_name = self.config.mode + "0r" + str(r_num) + "c" + \ + str(h_num) if host_name in _hosts.keys(): host = _hosts[host_name] - # for test - ''' - if r_num == 1: - host.status = "disabled" - host.state = "down" - ''' host.original_vCPUs = float(self.config.cpus_per_host) host.vCPUs_used = 0.0 host.original_mem_cap = float(self.config.mem_per_host) host.free_mem_mb = host.original_mem_cap - host.original_local_disk_cap = float(self.config.disk_per_host) + host.original_local_disk_cap = \ + float(self.config.disk_per_host) host.free_disk_gb = host.original_local_disk_cap host.disk_available_least = host.original_local_disk_cap def set_flavors(self, _flavors): + """Return success after setting passed in flavors.""" for f_num in range(0, self.config.num_of_basic_flavors): flavor = Flavor("bflavor" + str(f_num)) flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1)) flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1)) - flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0 + flavor.disk_cap = \ + float(self.config.base_flavor_disk * (f_num + 1)) + \ + 10.0 + 20.0 / 1024.0 _flavors[flavor.name] = flavor @@ -137,7 +116,6 @@ class SimCompute(object): flavor.mem_cap = self.config.base_flavor_mem * (a_num + 1) flavor.disk_cap = self.config.base_flavor_disk * (a_num + 1) - # flavor.extra_specs["availability_zone"] = "nova" flavor.extra_specs["cpu_allocation_ratio"] = "0.5" _flavors[flavor.name] = flavor diff --git a/valet/engine/resource_manager/simulation/topology_simulator.py b/valet/engine/resource_manager/simulation/topology_simulator.py index 96ae223..d839f65 100644 --- a/valet/engine/resource_manager/simulation/topology_simulator.py +++ b/valet/engine/resource_manager/simulation/topology_simulator.py @@ -1,36 +1,33 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Simulate datacenter configurations (i.e., layout, cabling).""" -################################################################################################################# -# Author: Gueyoung Jung -# Contact: gjung@research.att.com -# Version 2.0.2: Feb. 9, 2016 -# -# Functions -# - Simulate datacenter configurations (i.e., layout, cabling) -# -################################################################################################################# - - -from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link +from valet.engine.resource_manager.resource_base \ + import HostGroup, Host, Switch, Link class SimTopology(object): + """Simulate Network and Host Topology class.""" def __init__(self, _config): + """Init.""" self.config = _config def set_topology(self, _datacenter, _host_groups, _hosts, _switches): + """Return success string after setting network and host topology.""" self._set_network_topology(_switches) self._set_host_topology(_datacenter, _host_groups, _hosts, _switches) @@ -71,7 +68,8 @@ class SimTopology(object): ps = None if (s_num % 2) == 0: if (s_num + 1) < self.config.num_of_spine_switches: - ps = _switches[root_switch.name + "s" + str(s_num + 1)] + ps = _switches[root_switch.name + "s" + + str(s_num + 1)] else: ps = _switches[root_switch.name + "s" + str(s_num - 1)] if ps is not None: @@ -87,7 +85,8 @@ class SimTopology(object): parent_switch_list = [] if self.config.num_of_spine_switches > 0: for s_num in range(0, self.config.num_of_spine_switches): - parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)]) + parent_switch_list.append(_switches[root_switch.name + + "s" + str(s_num)]) else: parent_switch_list.append(_switches[root_switch.name]) diff --git a/valet/engine/resource_manager/topology.py b/valet/engine/resource_manager/topology.py index 0a0b3fc..94b1b52 100755 --- a/valet/engine/resource_manager/topology.py +++ b/valet/engine/resource_manager/topology.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Topology class - performs actual setting up of Topology object.""" + import copy import sys @@ -21,18 +23,24 @@ from valet.engine.resource_manager.resource_base import HostGroup, Switch, Link class Topology(object): + """Topology class.""" def __init__(self, _config, _logger): + """Init config and logger.""" self.config = _config self.logger = _logger # Triggered by rhosts change - def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts, _switches): - result_status = self._set_host_topology(_datacenter, _host_groups, _hosts, _rhosts) + def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts, + _switches): + """Return result status if setting host or network topology fails.""" + result_status = self._set_host_topology(_datacenter, _host_groups, + _hosts, _rhosts) if result_status != "success": return result_status - result_status = self._set_network_topology(_datacenter, _host_groups, _hosts, _switches) + result_status = self._set_network_topology(_datacenter, _host_groups, + _hosts, _switches) if result_status != "success": return result_status @@ -80,7 +88,8 @@ class Topology(object): return "success" # NOTE: this is just muck-ups - def _set_network_topology(self, _datacenter, _host_groups, _hosts, _switches): + def _set_network_topology(self, _datacenter, _host_groups, _hosts, + _switches): root_switch = Switch(_datacenter.name) root_switch.switch_type = "root" @@ -134,7 +143,8 @@ class Topology(object): if index >= self.config.num_of_region_chars: if not isdigit(c): if index == self.config.num_of_region_chars: - status = "invalid region name = " + _host_name[:index] + c + status = "invalid region name = " + \ + _host_name[:index] + c validated_name = False break @@ -152,7 +162,9 @@ class Topology(object): validated_name = False break - if end_of_rack_index == 0 and index > (end_of_region_index + 1): + if end_of_rack_index == 0 and \ + index > (end_of_region_index + 1): + end_of_rack_index = index num_of_fields += 1 @@ -179,7 +191,8 @@ class Topology(object): validated_name = False if num_of_fields != 3: - status = "invalid number of identification fields = " + str(num_of_fields) + status = "invalid number of identification fields = " + \ + str(num_of_fields) validated_name = False if validated_name is False: diff --git a/valet/engine/resource_manager/topology_manager.py b/valet/engine/resource_manager/topology_manager.py index e2ae896..90015aa 100755 --- a/valet/engine/resource_manager/topology_manager.py +++ b/valet/engine/resource_manager/topology_manager.py @@ -1,28 +1,38 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Topology Manager. + +Actions involved in setting up and managing topology. This includes setting +topology, checking updates, creating new switches( also hosts and links), as +well as updating them. +""" + import threading import time -from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, Switch, Link +from valet.engine.resource_manager.resource_base \ + import Datacenter, HostGroup, Host, Switch, Link from valet.engine.resource_manager.topology import Topology class TopologyManager(threading.Thread): + """Topology Manager Class.""" def __init__(self, _t_id, _t_name, _resource, _data_lock, _config, _logger): + """Init Topology Manager.""" threading.Thread.__init__(self) self.thread_id = _t_id @@ -37,7 +47,9 @@ class TopologyManager(threading.Thread): self.logger = _logger def run(self): - self.logger.info("TopologyManager: start " + self.thread_name + " ......") + """Function starts and tracks Topology Manager Thread.""" + self.logger.info("TopologyManager: start " + + self.thread_name + " ......") if self.config.topology_trigger_freq > 0: period_end = time.time() + self.config.topology_trigger_freq @@ -61,7 +73,10 @@ class TopologyManager(threading.Thread): time.sleep(70) now = time.localtime() - if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday: + if now.tm_year > last_trigger_year or \ + now.tm_mon > last_trigger_mon or \ + now.tm_mday > last_trigger_mday: + timeout = False if timeout is False and \ @@ -77,13 +92,14 @@ class TopologyManager(threading.Thread): def _run(self): - self.logger.info("TopologyManager: --- start topology status update ---") + self.logger.info("TopologyManager: --- start topology " + "status update ---") self.data_lock.acquire() try: if self.set_topology() is True: if self.resource.update_topology() is False: - # TODO: ignore? + # TODO(UNKOWN): ignore? pass finally: self.data_lock.release() @@ -91,6 +107,7 @@ class TopologyManager(threading.Thread): self.logger.info("TopologyManager: --- done topology status update ---") def set_topology(self): + """Return True if datacenter topology successfully setup.""" datacenter = None host_groups = {} hosts = {} @@ -105,7 +122,8 @@ class TopologyManager(threading.Thread): topology = Topology(self.config, self.logger) - status = topology.set_topology(datacenter, host_groups, hosts, self.resource.hosts, switches) + status = topology.set_topology(datacenter, host_groups, hosts, + self.resource.hosts, switches) if status != "success": self.logger.error("TopologyManager: " + status) return False @@ -122,7 +140,8 @@ class TopologyManager(threading.Thread): new_switch.last_update = time.time() - self.logger.warn("TopologyManager: new switch (" + new_switch.name + ") added") + self.logger.warn("TopologyManager: new switch (" + + new_switch.name + ") added") for rsk in self.resource.switches.keys(): if rsk not in _switches.keys(): @@ -131,7 +150,8 @@ class TopologyManager(threading.Thread): switch.last_update = time.time() - self.logger.warn("TopologyManager: switch (" + switch.name + ") disabled") + self.logger.warn("TopologyManager: switch (" + + switch.name + ") disabled") for hk in _hosts.keys(): if hk not in self.resource.hosts.keys(): @@ -140,7 +160,8 @@ class TopologyManager(threading.Thread): new_host.last_update = time.time() - self.logger.warn("TopologyManager: new host (" + new_host.name + ") added from configuration") + self.logger.warn("TopologyManager: new host (" + + new_host.name + ") added from configuration") for rhk in self.resource.hosts.keys(): if rhk not in _hosts.keys(): @@ -150,7 +171,8 @@ class TopologyManager(threading.Thread): host.last_update = time.time() - self.logger.warn("TopologyManager: host (" + host.name + ") removed from configuration") + self.logger.warn("TopologyManager: host (" + + host.name + ") removed from configuration") for hgk in _host_groups.keys(): if hgk not in self.resource.host_groups.keys(): @@ -159,7 +181,8 @@ class TopologyManager(threading.Thread): new_host_group.last_update = time.time() - self.logger.warn("TopologyManager: new host_group (" + new_host_group.name + ") added") + self.logger.warn("TopologyManager: new host_group (" + + new_host_group.name + ") added") for rhgk in self.resource.host_groups.keys(): if rhgk not in _host_groups.keys(): @@ -168,7 +191,8 @@ class TopologyManager(threading.Thread): host_group.last_update = time.time() - self.logger.warn("TopologyManager: host_group (" + host_group.name + ") disabled") + self.logger.warn("TopologyManager: host_group (" + + host_group.name + ") disabled") for sk in _switches.keys(): switch = _switches[sk] @@ -180,7 +204,8 @@ class TopologyManager(threading.Thread): for hk in _hosts.keys(): host = _hosts[hk] rhost = self.resource.hosts[hk] - (topology_updated, link_updated) = self._check_host_update(host, rhost) + (topology_updated, link_updated) = \ + self._check_host_update(host, rhost) if topology_updated is True: rhost.last_update = time.time() if link_updated is True: @@ -189,13 +214,15 @@ class TopologyManager(threading.Thread): for hgk in _host_groups.keys(): hg = _host_groups[hgk] rhg = self.resource.host_groups[hgk] - (topology_updated, link_updated) = self._check_host_group_update(hg, rhg) + (topology_updated, link_updated) = \ + self._check_host_group_update(hg, rhg) if topology_updated is True: rhg.last_update = time.time() if link_updated is True: rhg.last_link_update = time.time() - (topology_updated, link_updated) = self._check_datacenter_update(_datacenter) + (topology_updated, link_updated) = \ + self._check_datacenter_update(_datacenter) if topology_updated is True: self.resource.datacenter.last_update = time.time() if link_updated is True: @@ -242,12 +269,14 @@ class TopologyManager(threading.Thread): if _switch.switch_type != _rswitch.switch_type: _rswitch.switch_type = _switch.switch_type updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (switch type)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (switch type)") if _rswitch.status == "disabled": _rswitch.status = "enabled" updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (enabled)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (enabled)") for ulk in _switch.up_links.keys(): exist = False @@ -259,7 +288,8 @@ class TopologyManager(threading.Thread): new_link = self._create_new_link(_switch.up_links[ulk]) _rswitch.up_links[new_link.name] = new_link updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (new link)") for rulk in _rswitch.up_links.keys(): exist = False @@ -270,14 +300,16 @@ class TopologyManager(threading.Thread): if exist is False: del _rswitch.up_links[rulk] updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (link removed)") for ulk in _rswitch.up_links.keys(): link = _switch.up_links[ulk] rlink = _rswitch.up_links[ulk] if self._check_link_update(link, rlink) is True: updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (bandwidth)") for plk in _switch.peer_links.keys(): exist = False @@ -289,7 +321,8 @@ class TopologyManager(threading.Thread): new_link = self._create_new_link(_switch.peer_links[plk]) _rswitch.peer_links[new_link.name] = new_link updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (new link)") for rplk in _rswitch.peer_links.keys(): exist = False @@ -300,14 +333,16 @@ class TopologyManager(threading.Thread): if exist is False: del _rswitch.peer_links[rplk] updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (link removed)") for plk in _rswitch.peer_links.keys(): link = _switch.peer_links[plk] rlink = _rswitch.peer_links[plk] if self._check_link_update(link, rlink) is True: updated = True - self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)") + self.logger.warn("TopologyManager: switch (" + _rswitch.name + + ") updated (bandwidth)") return updated @@ -327,15 +362,20 @@ class TopologyManager(threading.Thread): if "infra" not in _rhost.tag: _rhost.tag.append("infra") updated = True - self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (tag)") + self.logger.warn("TopologyManager: host (" + _rhost.name + + ") updated (tag)") + + if _rhost.host_group is None or \ + _host.host_group.name != _rhost.host_group.name: - if _rhost.host_group is None or _host.host_group.name != _rhost.host_group.name: if _host.host_group.name in self.resource.host_groups.keys(): - _rhost.host_group = self.resource.host_groups[_host.host_group.name] + _rhost.host_group = \ + self.resource.host_groups[_host.host_group.name] else: _rhost.host_group = self.resource.datacenter updated = True - self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (host_group)") + self.logger.warn("TopologyManager: host (" + _rhost.name + + ") updated (host_group)") for sk in _host.switches.keys(): exist = False @@ -346,7 +386,8 @@ class TopologyManager(threading.Thread): if exist is False: _rhost.switches[sk] = self.resource.switches[sk] link_updated = True - self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (new switch)") + self.logger.warn("TopologyManager: host (" + _rhost.name + + ") updated (new switch)") for rsk in _rhost.switches.keys(): exist = False @@ -357,7 +398,8 @@ class TopologyManager(threading.Thread): if exist is False: del _rhost.switches[rsk] link_updated = True - self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (switch removed)") + self.logger.warn("TopologyManager: host (" + _rhost.name + + ") updated (switch removed)") return (updated, link_updated) @@ -368,20 +410,26 @@ class TopologyManager(threading.Thread): if _hg.host_type != _rhg.host_type: _rhg.host_type = _hg.host_type updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (hosting type)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (hosting type)") if _rhg.status == "disabled": _rhg.status = "enabled" updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (enabled)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (enabled)") + + if _rhg.parent_resource is None or \ + _hg.parent_resource.name != _rhg.parent_resource.name: - if _rhg.parent_resource is None or _hg.parent_resource.name != _rhg.parent_resource.name: if _hg.parent_resource.name in self.resource.host_groups.keys(): - _rhg.parent_resource = self.resource.host_groups[_hg.parent_resource.name] + _rhg.parent_resource = \ + self.resource.host_groups[_hg.parent_resource.name] else: _rhg.parent_resource = self.resource.datacenter updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (parent host_group)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (parent host_group)") for rk in _hg.child_resources.keys(): exist = False @@ -395,7 +443,8 @@ class TopologyManager(threading.Thread): elif _rhg.host_type == "cluster": _rhg.child_resources[rk] = self.resource.host_groups[rk] updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new child host)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (new child host)") for rrk in _rhg.child_resources.keys(): exist = False @@ -406,7 +455,8 @@ class TopologyManager(threading.Thread): if exist is False: del _rhg.child_resources[rrk] updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (child host removed)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (child host removed)") for sk in _hg.switches.keys(): exist = False @@ -417,7 +467,8 @@ class TopologyManager(threading.Thread): if exist is False: _rhg.switches[sk] = self.resource.switches[sk] link_updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new switch)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (new switch)") for rsk in _rhg.switches.keys(): exist = False @@ -428,7 +479,8 @@ class TopologyManager(threading.Thread): if exist is False: del _rhg.switches[rsk] link_updated = True - self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (switch removed)") + self.logger.warn("TopologyManager: host_group (" + _rhg.name + + ") updated (switch removed)") return (updated, link_updated) @@ -440,13 +492,15 @@ class TopologyManager(threading.Thread): if rc not in self.resource.datacenter.region_code_list: self.resource.datacenter.region_code_list.append(rc) updated = True - self.logger.warn("TopologyManager: datacenter updated (new region code, " + rc + ")") + self.logger.warn("TopologyManager: datacenter updated " + "(new region code, " + rc + ")") for rrc in self.resource.datacenter.region_code_list: if rrc not in _datacenter.region_code_list: self.resource.datacenter.region_code_list.remove(rrc) updated = True - self.logger.warn("TopologyManager: datacenter updated (region code, " + rrc + ", removed)") + self.logger.warn("TopologyManager: datacenter updated " + "(region code, " + rrc + ", removed)") for rk in _datacenter.resources.keys(): exist = False @@ -457,11 +511,14 @@ class TopologyManager(threading.Thread): if exist is False: r = _datacenter.resources[rk] if isinstance(r, HostGroup): - self.resource.datacenter.resources[rk] = self.resource.host_groups[rk] + self.resource.datacenter.resources[rk] = \ + self.resource.host_groups[rk] elif isinstance(r, Host): - self.resource.datacenter.resources[rk] = self.resource.hosts[rk] + self.resource.datacenter.resources[rk] = \ + self.resource.hosts[rk] updated = True - self.logger.warn("TopologyManager: datacenter updated (new resource)") + self.logger.warn("TopologyManager: datacenter updated " + "(new resource)") for rrk in self.resource.datacenter.resources.keys(): exist = False @@ -472,7 +529,8 @@ class TopologyManager(threading.Thread): if exist is False: del self.resource.datacenter.resources[rrk] updated = True - self.logger.warn("TopologyManager: datacenter updated (resource removed)") + self.logger.warn("TopologyManager: datacenter updated " + "(resource removed)") for sk in _datacenter.root_switches.keys(): exist = False @@ -481,9 +539,11 @@ class TopologyManager(threading.Thread): exist = True break if exist is False: - self.resource.datacenter.root_switches[sk] = self.resource.switches[sk] + self.resource.datacenter.root_switches[sk] = \ + self.resource.switches[sk] link_updated = True - self.logger.warn("TopologyManager: datacenter updated (new switch)") + self.logger.warn("TopologyManager: datacenter updated " + "(new switch)") for rsk in self.resource.datacenter.root_switches.keys(): exist = False @@ -494,6 +554,7 @@ class TopologyManager(threading.Thread): if exist is False: del self.resource.datacenter.root_switches[rsk] link_updated = True - self.logger.warn("TopologyManager: datacenter updated (switch removed)") + self.logger.warn("TopologyManager: datacenter updated " + "(switch removed)") return (updated, link_updated) diff --git a/valet/engine/resource_manager/topology_simulator.py b/valet/engine/resource_manager/topology_simulator.py index ebe5b9d..b837da1 100644 --- a/valet/engine/resource_manager/topology_simulator.py +++ b/valet/engine/resource_manager/topology_simulator.py @@ -1,29 +1,36 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# - Simulate datacenter configurations (i.e., layout, cabling) +"""Simulate datacenter configurations (i.e., layout, cabling).""" -from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link +from valet.engine.resource_manager.resource_base \ + import HostGroup, Host, Switch, Link class SimTopology(object): + """Simulate Topology class. + + Sim network and host topology for datacenters. + """ def __init__(self, _config): + """Init.""" self.config = _config def set_topology(self, _datacenter, _host_groups, _hosts, _switches): + """Return success after setting network and host topology.""" self._set_network_topology(_switches) self._set_host_topology(_datacenter, _host_groups, _hosts, _switches) @@ -64,7 +71,8 @@ class SimTopology(object): ps = None if (s_num % 2) == 0: if (s_num + 1) < self.config.num_of_spine_switches: - ps = _switches[root_switch.name + "s" + str(s_num + 1)] + ps = _switches[root_switch.name + "s" + + str(s_num + 1)] else: ps = _switches[root_switch.name + "s" + str(s_num - 1)] if ps is not None: @@ -80,7 +88,8 @@ class SimTopology(object): parent_switch_list = [] if self.config.num_of_spine_switches > 0: for s_num in range(0, self.config.num_of_spine_switches): - parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)]) + parent_switch_list.append(_switches[root_switch.name + + "s" + str(s_num)]) else: parent_switch_list.append(_switches[root_switch.name]) diff --git a/valet/ha/ha_valet.py b/valet/ha/ha_valet.py index 7c47610..f15a431 100644 --- a/valet/ha/ha_valet.py +++ b/valet/ha/ha_valet.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# vi: sw=4 ts=4: # # Copyright 2014-2017 AT&T Intellectual Property # @@ -15,25 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" +"""HA Valet. - Mnemonic: ha_valet.py - Abstract: High availability script for valet processes. - starts it's configured processes, and pings for their availability. - If local instances are not running, then makes the - current instances start. If it finds multiple instances running, then - determines which instance should be shut down based on priorities. +Mnemonic: ha_valet.py +Abstract: High availability script for valet processes. Starts it's + configured processes, and pings for their availability. If local + instances are not running, then makes the current instances + start. If it finds multiple instances running, then determines + which instance should be shut down based on priorities. - Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi +Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi ------------------------------------------------------------------------------ Algorithm ----------- The ha_valet script runs on each valet node in a continuous loop checking for - heartbeats from all the valet nodes found in the "stand_by_list" conf property once - every 5 secs (default). A heartbeat is obtained by invoking the "test_command" - conf property. + heartbeats from all the valet nodes found in the "stand_by_list" conf property + once every 5 secs (default). A heartbeat is obtained by invoking the + "test_command" conf property. If exactly one monitored process instance is running, the script does nothing. If no instance is running, then the local instance is activated after waiting for 5*priority seconds to let a higher priority valet take over @@ -70,10 +68,10 @@ max_num_of_logs = 10 PRIMARY_SETUP = 1 -RETRY_COUNT = 3 # How many times to retry ping command -CONNECT_TIMEOUT = 3 # Ping timeout -MAX_QUICK_STARTS = 10 # we stop if there are > 10 restarts in quick succession -QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this +RETRY_COUNT = 3 # How many times to retry ping command +CONNECT_TIMEOUT = 3 # Ping timeout +MAX_QUICK_STARTS = 10 # we stop if there are > 10 restart in quick succession +QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this # HA Configuration HEARTBEAT_SEC = 5 # Heartbeat interval in seconds @@ -111,7 +109,7 @@ CONF.register_opts(havalet_opts, ostro_group) def read_conf(): - """returns dictionary of configured processes""" + """Return dictionary of configured processes.""" return dict([ ('Ostro', { NAME: 'Ostro', @@ -143,7 +141,8 @@ def prepare_log(obj, name): obj.log.setLevel(logging.DEBUG) # logging.register_options(CONF) # logging.setup(CONF, 'valet') - handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log', maxBytes=max_log_size, + handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log', + maxBytes=max_log_size, backupCount=max_num_of_logs) fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s') handler.setFormatter(fmt) @@ -153,14 +152,16 @@ def prepare_log(obj, name): class HaValetThread (threading.Thread): def __init__(self, data, exit_event): + """Initialize HAValetThread.""" threading.Thread.__init__(self) self.data = data self.log = None def run(self): - """Main function""" + """Main function.""" prepare_log(self, self.data[NAME]) - self.log.info('HA Valet - ' + self.data[NAME] + ' Watcher Thread - starting') + self.log.info('HA Valet - ' + self.data[NAME] + + ' Watcher Thread - starting') fqdn_list = [] this_node = socket.getfqdn() @@ -181,7 +182,8 @@ class HaValetThread (threading.Thread): self.data[STAND_BY_LIST] = standby_list self.log.debug("modified stand by list: " + str(standby_list)) except ValueError: - self.log.debug("host " + this_node + " is not in standby list: %s - continue" + self.log.debug("host " + this_node + + " is not in standby list: %s - continue" % str(standby_list)) break @@ -193,7 +195,7 @@ class HaValetThread (threading.Thread): pass def _main_loop(self, this_node): - """ Main heartbeat and liveness check loop + """Main heartbeat and liveness check loop. :param this_node: host name :type this_node: string @@ -225,16 +227,19 @@ class HaValetThread (threading.Thread): # No valet running. Wait for higher priority valet to activate. time.sleep(HEARTBEAT_SEC * my_priority) - self.log.info('checking status here - ' + host + ', my priority: ' + str(my_priority)) + self.log.info('checking status here - ' + host + + ', my priority: ' + str(my_priority)) i_am_active, priority = self._is_active(eval(test_command)) - self.log.info(host + ': host_active = ' + str(i_am_active) + ', ' + str(priority)) + self.log.info(host + ': host_active = ' + str(i_am_active) + + ', ' + str(priority)) any_active = i_am_active self.log.info('any active = ' + str(any_active)) # Check for active valets standby_list_is_empty = not standby_list if not standby_list_is_empty: - self.log.debug('main loop: standby_list is not empty ' + str(standby_list)) + self.log.debug('main loop: standby_list is not empty ' + + str(standby_list)) for host_in_list in standby_list: if host_in_list == this_node: self.log.info('host_in_list is this_node - skipping') @@ -242,39 +247,51 @@ class HaValetThread (threading.Thread): self.log.info('checking status on - ' + host_in_list) host = host_in_list - host_active, host_priority = self._is_active(eval(test_command)) + host_active, host_priority = \ + self._is_active(eval(test_command)) host = self.data.get(HOST, 'localhost') - self.log.info(host_in_list + ' - host_active = ' + str(host_active) + ', ' + str(host_priority)) + self.log.info(host_in_list + ' - host_active = ' + + str(host_active) + ', ' + str(host_priority)) # Check for split brain: 2 valets active if i_am_active and host_active: - self.log.info('found two live instances, checking priorities') + self.log.info('found two live instances, ' + 'checking priorities') should_be_active = self._should_be_active(host_priority, my_priority) if should_be_active: - self.log.info('deactivate myself, ' + host_in_list + ' already running') - self._deactivate_process(eval(stop_command)) # Deactivate myself + self.log.info('deactivate myself, ' + host_in_list + + ' already running') + # Deactivate myself + self._deactivate_process(eval(stop_command)) i_am_active = False else: - self.log.info('deactivate ' + self.data[NAME] + ' on ' + host_in_list + + self.log.info('deactivate ' + self.data[NAME] + + ' on ' + host_in_list + ', already running here') host = host_in_list - self._deactivate_process(eval(stop_command)) # Deactivate other valet + # Deactivate other valet + self._deactivate_process(eval(stop_command)) host = self.data.get(HOST, 'localhost') # Track that at-least one valet is active any_active = any_active or host_active # If no active process or I'm primary, then we must try to start one - if not any_active or (not i_am_active and my_priority == PRIMARY_SETUP): + if not any_active or \ + (not i_am_active and my_priority == PRIMARY_SETUP): self.log.warn('there is no instance up') - self.log.info('Im primary instance: ' + str(my_priority is PRIMARY_SETUP)) + self.log.info('Im primary instance: ' + + str(my_priority is PRIMARY_SETUP)) if priority_wait or my_priority == PRIMARY_SETUP: now = int(time.time()) - if now - last_start < QUICK_RESTART_SEC: # quick restart (crash?) + # quick restart (crash?) + if now - last_start < QUICK_RESTART_SEC: quick_start += 1 if quick_start > MAX_QUICK_STARTS: - self.log.critical("too many restarts in quick succession.") + self.log.critical("too many restarts " + "in quick succession.") else: - quick_start = 0 # reset if it's been a while since last restart + # reset if it's been a while since last restart + quick_start = 0 if last_start == 0: diff = "never by this instance" @@ -283,12 +300,16 @@ class HaValetThread (threading.Thread): last_start = now priority_wait = False - if (not i_am_active and my_priority == PRIMARY_SETUP) or (standby_list is not None): - self.log.info('no running instance found, starting here; last start %s' % diff) + if (not i_am_active and my_priority == PRIMARY_SETUP) or \ + (standby_list is not None): + self.log.info('no running instance found, ' + 'starting here; last start %s' % diff) self._activate_process(start_command, my_priority) else: - host = standby_list[0] # LIMITATION - supporting only 1 stand by host - self.log.info('no running instances found, starting on %s; last start %s' % (host, diff)) + # LIMITATION - supporting only 1 stand by host + host = standby_list[0] + self.log.info('no running instances found, starting ' + 'on %s; last start %s' % (host, diff)) self._activate_process(start_command, my_priority) host = self.data.get(HOST, 'localhost') else: @@ -298,10 +319,13 @@ class HaValetThread (threading.Thread): # end loop def _should_be_active(self, host_priority, my_priority): - """ Returns True if host should be active as opposed to current node, based on the hosts priorities. + """Should Be Active. - Lower value means higher Priority, - 0 (zero) - invalid priority (e.g. process is down) + Returns True if host should be active as opposed to current node, + based on the hosts priorities. + + Lower value means higher Priority, + 0 (zero) - invalid priority (e.g. process is down) :param host_priority: other host's priority :type host_priority: int @@ -310,38 +334,42 @@ class HaValetThread (threading.Thread): :return: True/False :rtype: bool """ - self.log.info('my priority is %d, remote priority is %d' % (my_priority, host_priority)) + self.log.info('my priority is %d, remote priority is %d' % + (my_priority, host_priority)) return host_priority < my_priority def _is_active(self, call): - """ Return 'True, Priority' if valet is running on host + """_is_active. - 'False, None' Otherwise. + Return 'True, Priority' if valet is running on host + 'False, None' Otherwise. """ - # must use no-proxy to avoid proxy servers gumming up the works for i in xrange(RETRY_COUNT): try: self.log.info('ping (retry %d): %s' % (i, call)) - proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + proc = subprocess.Popen(call, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True) priority = proc.wait() if priority == 255: # no route to host priority = 0 out, err = proc.communicate() self.log.debug('out: ' + out + ', err: ' + err) - self.log.info('ping result (should be > 0): %s' % (str(priority))) + self.log.info('ping result (should be > 0): %s' + % (str(priority))) return (priority > 0), priority except subprocess.CalledProcessError: - self.log.error('ping error: ' + str(subprocess.CalledProcessError)) + self.log.error('ping error: ' + + str(subprocess.CalledProcessError)) continue return False, None def _deactivate_process(self, deactivate_command): - """ Deactivate valet on a given host. If host is omitted, local + """Deactivate Process. - valet is stopped. Returns True if successful, False on error. + Deactivate valet on a given host. If host is omitted, local + valet is stopped. Returns True if successful, False on error. """ - try: # call = "'" + deactivate_command % (PROTO, host, port) + "'" self.log.info('deactivate_command: ' + deactivate_command) @@ -352,11 +380,11 @@ class HaValetThread (threading.Thread): return False def _activate_process(self, activate_command, priority): - """ Activate valet on a given host. If host is omitted, local + """Activate Process. - valet is started. Returns True if successful, False on error. + Activate valet on a given host. If host is omitted, local + valet is started. Returns True if successful, False on error. """ - try: self.log.info('activate_command: ' + activate_command) subprocess.check_call(activate_command, shell=True) @@ -368,27 +396,31 @@ class HaValetThread (threading.Thread): class HAValet(object): + """""" def __init__(self): + """Init HAValet object.""" if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) self.log = None @DeprecationWarning - def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE, process=''): - """ This function reads the valet config file and returns configuration + def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE, + process=''): + """Parse Valet Conf v010. - attributes in key/value format + This function reads the valet config file and returns configuration + attributes in key/value format :param conf_file_name: config file name :type conf_file_name: string :param process: specific process name - when not supplied - the module launches all the processes in the configuration + when not supplied - the module launches all the + processes in the configuration :type process: string :return: dictionary of configured monitored processes :rtype: dict """ - cdata = {} section = '' @@ -423,14 +455,16 @@ class HAValet(object): return cdata def _valid_process_conf_data(self, process_data): - """ verify all mandatory parameters are found in the monitored process configuration only standby_list is optional + """Valid Process conf data. + + verify all mandatory parameters are found in the monitored process + configuration only standby_list is optional :param process_data: specific process configuration parameters :type process_data: dict :return: are all mandatory parameters are found :rtype: bool """ - if (process_data.get(HOST) is not None and process_data.get(PRIORITY) is not None and process_data.get(ORDER) is not None and @@ -442,7 +476,7 @@ class HAValet(object): return False def start(self): - """Start valet HA - Main function""" + """Start valet HA - Main function.""" prepare_log(self, 'havalet') self.log.info('ha_valet v1.1 starting') @@ -460,13 +494,15 @@ class HAValet(object): for proc in proc_sorted: if self._valid_process_conf_data(proc): - self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' + str(proc)) + self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' + + str(proc)) thread = HaValetThread(proc, exit_event) time.sleep(HEARTBEAT_SEC) thread.start() threads.append(thread) else: - self.log.info(proc[NAME] + " section is missing mandatory parameter.") + self.log.info(proc[NAME] + + " section is missing mandatory parameter.") continue self.log.info('on air.') diff --git a/valet/tests/api/config.py b/valet/tests/api/config.py index 74d490e..408ab56 100644 --- a/valet/tests/api/config.py +++ b/valet/tests/api/config.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Config.""" + from pecan.hooks import TransactionHook from valet.api.db import models diff --git a/valet/tests/api/conftest.py b/valet/tests/api/conftest.py index c071b7f..bf80e39 100644 --- a/valet/tests/api/conftest.py +++ b/valet/tests/api/conftest.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Conf Test - Test config file (app, connection, session, etc).""" + from copy import deepcopy import os from pecan import conf @@ -33,12 +35,14 @@ BIND = 'mysql+pymysql://root:password@127.0.0.1' def config_file(): + """Return config file.""" here = os.path.abspath(os.path.dirname(__file__)) return os.path.join(here, 'config.py') @pytest.fixture(scope='session') def app(request): + """Return test app based on config file.""" config = configuration.conf_from_file(config_file()).to_dict() # Add the appropriate connection string to the app config. @@ -60,8 +64,10 @@ def connection(app, request): print("=" * 80) print("CREATING TEMPORARY DATABASE FOR TESTS") print("=" * 80) - subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop', DBNAME]) - subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create', DBNAME]) + subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop', + DBNAME]) + subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create', + DBNAME]) # Bind and create the database tables _db.clear() @@ -94,7 +100,7 @@ def connection(app, request): @pytest.fixture(scope='function') def session(connection, request): - """Creates a new database session for a test.""" + """Create new database session for a test.""" _config = configuration.conf_from_file(config_file()).to_dict() config = deepcopy(_config) @@ -137,11 +143,16 @@ def session(connection, request): class TestApp(object): - """ A controller test starts a database transaction and creates a fake WSGI app. """ + """Test App Class. + + A controller test starts a database transaction + and creates a fake WSGI app. + """ __headers__ = {} def __init__(self, app): + """Init Test App.""" self.app = app def _do_request(self, url, method='GET', **kwargs): @@ -156,7 +167,7 @@ class TestApp(object): return methods.get(method, self.app.get)(str(url), **kwargs) def post_json(self, url, **kwargs): - """ note: + """Post json. @param (string) url - The URL to emulate a POST request to @returns (paste.fixture.TestResponse) @@ -164,7 +175,7 @@ class TestApp(object): return self._do_request(url, 'POSTJ', **kwargs) def post(self, url, **kwargs): - """ note: + """Post. @param (string) url - The URL to emulate a POST request to @returns (paste.fixture.TestResponse) @@ -172,7 +183,7 @@ class TestApp(object): return self._do_request(url, 'POST', **kwargs) def get(self, url, **kwargs): - """ note: + """Get. @param (string) url - The URL to emulate a GET request to @returns (paste.fixture.TestResponse) @@ -180,7 +191,7 @@ class TestApp(object): return self._do_request(url, 'GET', **kwargs) def put(self, url, **kwargs): - """ note: + """Put. @param (string) url - The URL to emulate a PUT request to @returns (paste.fixture.TestResponse) @@ -188,7 +199,7 @@ class TestApp(object): return self._do_request(url, 'PUT', **kwargs) def delete(self, url, **kwargs): - """ note: + """Delete. @param (string) url - The URL to emulate a DELETE request to @returns (paste.fixture.TestResponse) diff --git a/valet/tests/api/controllers/__init__.py b/valet/tests/api/controllers/__init__.py index 43a3e5d..e178959 100644 --- a/valet/tests/api/controllers/__init__.py +++ b/valet/tests/api/controllers/__init__.py @@ -1,19 +1,25 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Init.""" + from uuid import UUID def is_valid_uuid4(uuid_string): - """ Validate that a UUID string is in fact a valid uuid4. + """Validate that a UUID string is in fact a valid uuid4. Happily, the uuid module does the actual checking for us. @@ -22,7 +28,6 @@ def is_valid_uuid4(uuid_string): to the UUID() call, otherwise any 32-character hex string is considered valid. """ - try: val = UUID(uuid_string, version=4) except ValueError: diff --git a/valet/tests/api/controllers/test_plans.py b/valet/tests/api/controllers/test_plans.py index c552a1f..783dc5a 100644 --- a/valet/tests/api/controllers/test_plans.py +++ b/valet/tests/api/controllers/test_plans.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Plans.""" + from uuid import uuid4 from valet.api.db.models import Plan, Placement @@ -24,18 +26,23 @@ PLAN_NAME = 'ihaveaplan' class TestPlansController(object): + """Test Plans Controller Class.""" + def test_get_index_no_plans(self, session): + """Test getting plans where there are none, should be empty.""" result = session.app.get('/v1/plans/') assert result.status_int == 200 assert result.json == [] def test_get_index_a_plan(self, session): + """Test get a plan using an index, should get a plan name.""" Plan(PLAN_NAME, STACK_ID) session.commit() result = session.app.get('/v1/plans/').json assert result == [PLAN_NAME] def test_single_plan_should_have_one_item(self, session): + """Test getting a single plan with one item.""" Plan(PLAN_NAME, STACK_ID) session.commit() result = session.app.get('/v1/plans/') @@ -43,6 +50,7 @@ class TestPlansController(object): assert len(result.json) == 1 def test_list_a_few_plans(self, session): + """Test returning a list of plans.""" for plan_number in range(20): stack_id = str(uuid4()) Plan('foo_%s' % plan_number, stack_id) @@ -55,21 +63,26 @@ class TestPlansController(object): class TestPlansItemController(object): + """Test Plans Item Controller Class.""" + def test_get_index_single_plan(self, session): + """Test get index of a single plan.""" Plan(PLAN_NAME, STACK_ID) session.commit() - result = session.app.get('/v1/plans/%s/' % (STACK_ID)) + result = session.app.get('/v1/plans/%s/' % STACK_ID) assert result.status_int == 200 def test_get_index_no_plan(self, session): - result = session.app.get('/v1/plans/%s/' % (STACK_ID), + """Test getting index of no plan, should return 404.""" + result = session.app.get('/v1/plans/%s/' % STACK_ID, expect_errors=True) assert result.status_int == 404 def test_get_index_single_plan_data(self, session): + """Test getting a single plan data.""" Plan(PLAN_NAME, STACK_ID) session.commit() - result = session.app.get('/v1/plans/%s/' % (STACK_ID)) + result = session.app.get('/v1/plans/%s/' % STACK_ID) json = result.json assert is_valid_uuid4(json['id']) assert json['name'] == PLAN_NAME @@ -77,6 +90,7 @@ class TestPlansItemController(object): assert json['stack_id'] == STACK_ID def test_get_plan_refs(self, session): + """Test get plan refs by getting app json result.""" plan = Plan(PLAN_NAME, STACK_ID) Placement( 'placement_1', str(uuid4()), @@ -89,7 +103,7 @@ class TestPlansItemController(object): location='foo_2' ) session.commit() - result = session.app.get('/v1/plans/%s/' % (STACK_ID)) + result = session.app.get('/v1/plans/%s/' % STACK_ID) json = result.json assert is_valid_uuid4(json['id']) assert json['name'] == PLAN_NAME diff --git a/valet/tests/base.py b/valet/tests/base.py index 5740973..7b94b12 100644 --- a/valet/tests/base.py +++ b/valet/tests/base.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Base.""" + from oslo_config import fixture as fixture_config from oslo_log import log as logging from oslotest.base import BaseTestCase @@ -26,24 +28,31 @@ class Base(BaseTestCase): """Test case base class for all unit tests.""" def __init__(self, *args, **kwds): - ''' ''' + """Init Base.""" super(Base, self).__init__(*args, **kwds) self.CONF = self.useFixture(fixture_config.Config()).conf init.prepare(self.CONF) def setUp(self): + """Setup.""" super(Base, self).setUp() def run_test(self, stack_name, template_path): - ''' main function ''' + """Main Function.""" pass def validate(self, result): + """Validate.""" + # TODO(CM): Maybe fix unnecessary obfuscation of assertEqual code. self.assertEqual(True, result.ok, result.message) def validate_test(self, result): + """Validate Test.""" + # TODO(CM): Maybe fix unnecessary obfuscation of assertTrue code. self.assertTrue(result) def get_name(self): + """Get Name.""" + # TODO(CM): Make this function actually do something. pass diff --git a/valet/tests/functional/etc/valet_validator.cfg b/valet/tests/functional/etc/valet_validator.cfg index 2d9182a..53111b4 100644 --- a/valet/tests/functional/etc/valet_validator.cfg +++ b/valet/tests/functional/etc/valet_validator.cfg @@ -5,7 +5,7 @@ default_log_levels="valet_validator=DEBUG,tests=DEBUG,compute=DEBUG,common=DEBUG [auth] OS_AUTH_URL_WITH_VERSION=http://controller:5000/v2.0 OS_USERNAME=admin -OS_PASSWORD=qwer4321 +OS_PASSWORD=PASSWORD OS_TENANT_NAME=demo TOKEN_EXPIRATION=600 @@ -20,7 +20,7 @@ VALUE=output_value VERSION=1 [valet] -HOST=http://192.168.10.18:8090/v1 +HOST=http://127.0.0.1:8090/v1 DELAY_DURATION=30 PAUSE=10 TRIES_TO_CREATE=5 @@ -32,7 +32,7 @@ TEMPLATE_NAME=affinity_basic_2_instances [test_affinity_3] STACK_NAME=affinity_3_stack -TEMPLATE_NAME=affinity_ 3_Instances +TEMPLATE_NAME=affinity_3_Instances [test_diversity] STACK_NAME=basic_diversity_stack @@ -45,4 +45,3 @@ TEMPLATE_NAME=diversity_between_2_affinity [test_exclusivity] STACK_NAME=basic_exclusivity_stack TEMPLATE_NAME=exclusivity_basic_2_instances - diff --git a/valet/tests/functional/valet_validator/common/__init__.py b/valet/tests/functional/valet_validator/common/__init__.py index 80f46f4..73523c8 100644 --- a/valet/tests/functional/valet_validator/common/__init__.py +++ b/valet/tests/functional/valet_validator/common/__init__.py @@ -1,14 +1,20 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Init.""" + from oslo_log import log as logging import time from valet.tests.functional.valet_validator.common.init import CONF, COLORS @@ -17,32 +23,42 @@ LOG = logging.getLogger(__name__) class Result(object): + """Class consisting of ok (bool) and a string message.""" + ok = False message = "" def __init__(self, ok=True, msg=""): + """Init a Result.""" self.ok = ok self.message = msg class GeneralLogger(object): + """Class consisting of different logging functions.""" + @staticmethod def delay(duration=None): + """Delay method by performing time sleep.""" time.sleep(duration or CONF.heat.DELAY_DURATION) @staticmethod def log_info(msg): + """Generic log info method.""" LOG.info("%s %s %s" % (COLORS["L_GREEN"], msg, COLORS["WHITE"])) @staticmethod def log_error(msg, trc_back=""): + """Log error mthd with msg and trace back.""" LOG.error("%s %s %s" % (COLORS["L_RED"], msg, COLORS["WHITE"])) LOG.error("%s %s %s" % (COLORS["L_RED"], trc_back, COLORS["WHITE"])) @staticmethod def log_debug(msg): + """Log debug method.""" LOG.debug("%s %s %s" % (COLORS["L_BLUE"], msg, COLORS["WHITE"])) @staticmethod def log_group(msg): + """Log info method for group.""" LOG.info("%s %s %s" % (COLORS["Yellow"], msg, COLORS["WHITE"])) diff --git a/valet/tests/functional/valet_validator/common/auth.py b/valet/tests/functional/valet_validator/common/auth.py index ca156c6..0c7d220 100644 --- a/valet/tests/functional/valet_validator/common/auth.py +++ b/valet/tests/functional/valet_validator/common/auth.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Auth.""" + from keystoneclient.auth.identity import v2 as identity from keystoneclient import session from oslo_log import log as logging @@ -24,37 +26,46 @@ MIN_TOKEN_LIFE_SECONDS = 120 class Auth(object): - ''' Singleton class for authentication token ''' + """Singleton class for authentication token.""" + auth = None session = None @staticmethod def _init(): if Auth.is_auth_invalid(): - Auth.auth = identity.Password(auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION, - username=CONF.auth.OS_USERNAME, - password=CONF.auth.OS_PASSWORD, - tenant_name=CONF.auth.OS_TENANT_NAME) + Auth.auth = identity.Password( + auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION, + username=CONF.auth.OS_USERNAME, + password=CONF.auth.OS_PASSWORD, + tenant_name=CONF.auth.OS_TENANT_NAME) Auth.session = session.Session(auth=Auth.auth) @staticmethod def get_password_plugin(): + """Return auth after init.""" Auth._init() return Auth.auth @staticmethod def get_auth_token(): + """Return auth token for session.""" return Auth.get_password_plugin().get_token(Auth.get_auth_session()) @staticmethod def get_auth_session(): + """Return auth session.""" Auth._init() return Auth.session @staticmethod def get_project_id(): - return Auth.get_password_plugin().get_project_id(Auth.get_auth_session()) + """Return auth_session based on project_id.""" + return Auth.get_password_plugin().get_project_id( + Auth.get_auth_session()) @staticmethod def is_auth_invalid(): - return Auth.auth is None or Auth.auth.get_auth_ref(Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION) + """Return True/False based on status of auth.""" + return Auth.auth is None or Auth.auth.get_auth_ref( + Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION) diff --git a/valet/tests/functional/valet_validator/common/init.py b/valet/tests/functional/valet_validator/common/init.py index cb3d067..086209f 100644 --- a/valet/tests/functional/valet_validator/common/init.py +++ b/valet/tests/functional/valet_validator/common/init.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Init.""" + import os from oslo_config import cfg from oslo_log import log as logging @@ -45,7 +47,8 @@ COLORS = \ opts_auth = \ [ - cfg.StrOpt('OS_AUTH_URL_WITH_VERSION', default='http://controller:5000/v2.0'), + cfg.StrOpt('OS_AUTH_URL_WITH_VERSION', + default='http://controller:5000/v2.0'), cfg.StrOpt('OS_USERNAME', default="addddmin"), cfg.StrOpt('OS_PASSWORD', default="qwer4321"), cfg.StrOpt('OS_TENANT_NAME', default="demo"), @@ -87,6 +90,7 @@ _initialized = False def prepare(CONF): + """Prepare config options.""" global _initialized try: if _initialized is False: @@ -94,9 +98,12 @@ def prepare(CONF): _initialized = True # Adding config file - possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)) + possible_topdir = os.path.normpath( + os.path.join(os.path.abspath(__file__), os.pardir, + os.pardir, os.pardir)) conf_file = os.path.join(possible_topdir, 'etc', DOMAIN + '.cfg') - CONF([], project=DOMAIN, default_config_files=[conf_file] or None, validate_default_values=True) + CONF([], project=DOMAIN, default_config_files=[conf_file] or None, + validate_default_values=True) logging.setup(CONF, DOMAIN) diff --git a/valet/tests/functional/valet_validator/common/resources.py b/valet/tests/functional/valet_validator/common/resources.py index cf0accf..3fe9f16 100644 --- a/valet/tests/functional/valet_validator/common/resources.py +++ b/valet/tests/functional/valet_validator/common/resources.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Resources.""" + from oslo_log import log as logging import traceback import yaml @@ -23,8 +25,10 @@ TEMPLATE_RES = "resources" class TemplateResources(object): - ''' Heat template parser ''' + """Heat template parser.""" + def __init__(self, template): + """Init Template resources.""" self.instances = [] self.groups = {} self.template_data = None @@ -47,7 +51,10 @@ class TemplateResources(object): class Instance(object): + """Contains instance details from template (name, image, flavor, etc).""" + def __init__(self, doc, instance_name): + """Init Instance Object.""" self.resource_name = instance_name self.name = None self.image = None @@ -57,6 +64,7 @@ class Instance(object): self.fill(doc, instance_name) def fill(self, doc, instance_name): + """Fill Instance details from template properties.""" try: template_property = doc[TEMPLATE_RES][instance_name]["properties"] @@ -69,12 +77,17 @@ class Instance(object): LOG.error(traceback.format_exc()) def get_ins(self): + """Return instance data.""" return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s " - % (self.type, self.name, self.image, self.flavor, self.resource_name)) + % (self.type, self.name, self.image, + self.flavor, self.resource_name)) class Group(object): + """Class containing group details (type, name, resources) from template.""" + def __init__(self, doc, group_name): + """Init Group Object.""" self.group_type = None self.group_name = None self.level = None @@ -83,11 +96,13 @@ class Group(object): self.fill(doc, group_name) def fill(self, doc, group_name): + """Fill group from template properties.""" try: template_property = doc[TEMPLATE_RES][group_name]["properties"] self.group_type = template_property["group_type"] - self.group_name = template_property["group_name"] if "group_name" in template_property else None + self.group_name = template_property["group_name"] \ + if "group_name" in template_property else None self.level = template_property["level"] for res in template_property[TEMPLATE_RES]: self.group_resources.append(res["get_resource"]) diff --git a/valet/tests/functional/valet_validator/compute/analyzer.py b/valet/tests/functional/valet_validator/compute/analyzer.py index 15ab574..a298ba7 100644 --- a/valet/tests/functional/valet_validator/compute/analyzer.py +++ b/valet/tests/functional/valet_validator/compute/analyzer.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Analyzer.""" + from novaclient import client import traceback from valet.tests.functional.valet_validator.common import Result, GeneralLogger @@ -21,24 +23,27 @@ from valet.tests.functional.valet_validator.common.init import CONF class Analyzer(object): + """Methods to perform analysis on hosts, vms, racks.""" def __init__(self): - ''' initializing the analyzer - connecting to nova ''' + """Initializing the analyzer - connecting to nova.""" GeneralLogger.log_info("Initializing Analyzer") - self.nova = client.Client(CONF.nova.VERSION, session=Auth.get_auth_session()) + self.nova = client.Client(CONF.nova.VERSION, + session=Auth.get_auth_session()) def get_host_name(self, instance_name): - ''' Returning host by instance name ''' + """Returning host by instance name.""" serv = self.nova.servers.find(name=instance_name) return self.get_hostname(serv) def get_all_hosts(self, instances_list): - ''' Returning all hosts of all instances ''' + """Returning all hosts of all instances.""" GeneralLogger.log_debug("Getting hosts names") - return [self.get_host_name(instance.name) for instance in instances_list] + return [self.get_host_name(instance.name) + for instance in instances_list] def check(self, resources): - ''' Checking if all instances are on the Appropriate hosts and racks ''' + """Check if all instances are on the Appropriate hosts and racks.""" GeneralLogger.log_debug("Starting to check instances location") result = True @@ -46,39 +51,51 @@ class Analyzer(object): for key in resources.groups: group = resources.groups[key] - resources_to_compare = self.get_resources_to_compare(resources, group.group_resources) or group.group_resources - instances_for_group = self.get_group_instances(resources, resources_to_compare) + resources_to_compare = self.get_resources_to_compare( + resources, group.group_resources) or group.group_resources + instances_for_group = self.get_group_instances( + resources, resources_to_compare) hosts_list = self.get_all_hosts(instances_for_group) # switch case result = result and \ { - "affinity": self.are_the_same(hosts_list, group.level), - "diversity": self.are_different(hosts_list, group.level), - "exclusivity": self.are_we_alone(hosts_list, instances_for_group) + "affinity": self.are_the_same(hosts_list, + group.level), + "diversity": self.are_different(hosts_list, + group.level), + "exclusivity": self.are_we_alone(hosts_list, + instances_for_group) }[group.group_type] except Exception as ex: - GeneralLogger.log_error("Exception at method check: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method check: %s" % ex, + traceback.format_exc()) result = False return Result(result) def get_resources_to_compare(self, resources, group_resources): + """Return resources to compare.""" resources_to_compare = [] try: - for group_name in group_resources: # ['test-affinity-group1', 'test-affinity-group2'] + # ['test-affinity-group1', 'test-affinity-group2'] + for group_name in group_resources: if "test" in group_name: - resources_to_compare.append(resources.groups[group_name].group_resources) + resources_to_compare.append( + resources.groups[group_name].group_resources) else: return None return resources_to_compare except Exception as ex: - GeneralLogger.log_error("Exception at method get_resources_to_compare: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method " + "get_resources_to_compare: %s" + % ex, traceback.format_exc()) def are_we_alone(self, hosts_list, ins_for_group): + """Return result of whether any instances on host.""" try: # instances is all the instances on this host all_instances_on_host = self.get_instances_per_host(hosts_list) @@ -88,10 +105,11 @@ class Analyzer(object): return not all_instances_on_host except Exception as ex: - GeneralLogger.log_error("Exception at method are_we_alone: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method are_we_alone: %s" + % ex, traceback.format_exc()) def get_instances_per_host(self, hosts_list): - ''' get_instances_per_host ''' + """Get number of instances per host.""" instances = [] try: for host in set(hosts_list): @@ -100,39 +118,50 @@ class Analyzer(object): return instances except Exception as ex: - GeneralLogger.log_error("Exception at method get_instances_per_host: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method " + "get_instances_per_host: %s" + % ex, traceback.format_exc()) def are_different(self, hosts_list, level): - ''' Checking if all hosts (and racks) are different for all instances ''' + """Check if all hosts (and racks) are different for all instances.""" diction = {} try: for h in hosts_list: - if self.is_already_exists(diction, self.get_host_or_rack(level, h)): + if self.is_already_exists(diction, + self.get_host_or_rack(level, h)): return False return True except Exception as ex: - GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method " + "are_all_hosts_different: %s" + % ex, traceback.format_exc()) return False def are_the_same(self, hosts_list, level): + """Check if all hosts (and racks) are the same for all instances.""" GeneralLogger.log_debug("Hosts are:") try: for h in hosts_list: - if self.compare_host(self.get_host_or_rack(level, h), self.get_host_or_rack(level, hosts_list[0])) is False: + if self.compare_host( + self.get_host_or_rack(level, h), + self.get_host_or_rack(level, hosts_list[0])) is False: return False return True except Exception as ex: - GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method " + "are_all_hosts_different: %s" + % ex, traceback.format_exc()) return False def get_group_instances(self, resources, group_ins): - ''' gets the instance object according to the group_ins + """Get the instance object according to the group_ins. - group_ins - the group_resources name of the instances belong to this group (['my-instance-1', 'my-instance-2']) - ''' + group_ins - the group_resources name of the instances belong to + this group (['my-instance-1', 'my-instance-2']). + """ ins_for_group = [] try: for instance in resources.instances: @@ -141,13 +170,17 @@ class Analyzer(object): return ins_for_group except Exception as ex: - GeneralLogger.log_error("Exception at method get_group_instances: %s" % ex, traceback.format_exc()) + GeneralLogger.log_error("Exception at method " + "get_group_instances: %s" + % ex, traceback.format_exc()) return None def get_hostname(self, vm): + """Get hostname of vm.""" return str(getattr(vm, CONF.nova.ATTR)) def is_already_exists(self, diction, item): + """If item exists, return True, otherwise return False.""" if item in diction: return True @@ -155,18 +188,24 @@ class Analyzer(object): return False def compare_rack(self, current_host, first_host): + """Return True if racks of current and first host are equal.""" GeneralLogger.log_debug(current_host) return self.get_rack(current_host) == self.get_rack(first_host) def compare_host(self, current_host, first_host): + """Compare current host to first host.""" GeneralLogger.log_debug(current_host) return current_host == first_host def get_rack(self, host): + """Get rack from host.""" return (host.split("r")[1])[:2] def get_host_or_rack(self, level, host): + """Return host if current level is host, otherwise return rack.""" return host if level == "host" else self.get_rack(host) def get_vms_by_hypervisor(self, host): - return [vm for vm in self.nova.servers.list(search_opts={"all_tenants": True}) if self.get_hostname(vm) == host] + """Return vms based on hypervisor(host).""" + return [vm for vm in self.nova.servers.list( + search_opts={"all_tenants": True}) if self.get_hostname(vm) == host] diff --git a/valet/tests/functional/valet_validator/group_api/valet_group.py b/valet/tests/functional/valet_validator/group_api/valet_group.py index f53d26a..2be7d01 100644 --- a/valet/tests/functional/valet_validator/group_api/valet_group.py +++ b/valet/tests/functional/valet_validator/group_api/valet_group.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Valet Group.""" + import json import requests import traceback @@ -22,61 +24,73 @@ from valet.tests.functional.valet_validator.common.init import CONF class ValetGroup(object): + """Class of helpers and basic functions for Valet Groups.""" def __init__(self): + """Init groups_url and headers for Valet Group.""" self.groups_url = "%s/groups" % CONF.valet.HOST self.headers = {"X-Auth-Token": Auth.get_auth_token(), "Content-Type": "application/json"} def create_group(self, group_name, group_type): + """Create group given name and type.""" grp_data = {"name": group_name, "type": group_type} - return requests.post(self.groups_url, data=json.dumps(grp_data), headers=self.headers) + return requests.post(self.groups_url, data=json.dumps(grp_data), + headers=self.headers) def get_list_groups(self): + """Return a list of groups.""" list_response = requests.get(self.groups_url, headers=self.headers) return list_response.json()["groups"] def get_group_details(self, group_id): + """Return Group Details based on group_id.""" url = self.groups_url + "/" + group_id return requests.get(url, headers=self.headers) def update_group_members(self, group_id, members=None): + """Update group members based on group_id.""" add_member_url = self.groups_url + "/%s/members" % group_id data = json.dumps({"members": [members or Auth.get_project_id()]}) return requests.put(add_member_url, data=data, headers=self.headers) def update_group(self, group_id, new_description): + """Update group based on its id with a new description.""" url = self.groups_url + "/" + group_id new_data = json.dumps({"description": new_description}) return requests.put(url, new_data, headers=self.headers) def delete_group_member(self, group_id, member_id): + """Delete a single group member based on its member_id.""" url = self.groups_url + "/%s/members/%s" % (group_id, member_id) return requests.delete(url, headers=self.headers) def delete_all_group_member(self, group_id): + """Delete all members of a group based on group_id.""" url = self.groups_url + "/%s/members" % group_id return requests.delete(url, headers=self.headers) def delete_group(self, group_id): + """Delete group based on its id.""" url = self.groups_url + "/%s" % group_id return requests.delete(url, headers=self.headers) def get_group_id_and_members(self, group_name, group_type="exclusivity"): - ''' Checks if group name exists, if not - creates it + """Check if group name exists, if not - creates it. - returns group's id and members list - ''' + Returns group's id and members list. + """ group_details = self.check_group_exists(group_name) try: if group_details is None: GeneralLogger.log_info("Creating group") create_response = self.create_group(group_name, group_type) - return create_response.json()["id"], create_response.json()["members"] + return create_response.json()["id"], \ + create_response.json()["members"] else: GeneralLogger.log_info("Group exists") @@ -86,17 +100,18 @@ class ValetGroup(object): GeneralLogger.log_error(traceback.format_exc()) def add_group_member(self, group_details): - ''' Checks if member exists in group, if not - adds it ''' + """Check if member exists in group, if not - adds it.""" # group_details - group id, group members try: if Auth.get_project_id() not in group_details[1]: GeneralLogger.log_info("Adding member to group") self.update_group_members(group_details[0]) except Exception: - GeneralLogger.log_error("Failed to add group member", traceback.format_exc()) + GeneralLogger.log_error("Failed to add group member", + traceback.format_exc()) def check_group_exists(self, group_name): - ''' Checks if group exists in group list, if not returns None ''' + """Check if group exists in group list, if not returns None.""" for grp in self.get_list_groups(): if grp["name"] == group_name: return grp["id"], grp["members"] @@ -104,12 +119,14 @@ class ValetGroup(object): return None def delete_all_groups(self): - DELETED = 204 + """Return deleted code 204 if all groups deleted.""" + deleted = 204 for group in self.get_list_groups(): - codes = [self.delete_all_group_member(group["id"]).status_code, self.delete_group(group["id"]).status_code] + codes = [self.delete_all_group_member(group["id"]).status_code, + self.delete_group(group["id"]).status_code] - res = filter(lambda a: a != DELETED, codes) + res = filter(lambda a: a != deleted, codes) if res: return res[0] - return DELETED + return deleted diff --git a/valet/tests/functional/valet_validator/orchestration/loader.py b/valet/tests/functional/valet_validator/orchestration/loader.py index 3940e79..0c9892a 100644 --- a/valet/tests/functional/valet_validator/orchestration/loader.py +++ b/valet/tests/functional/valet_validator/orchestration/loader.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Loader.""" + from heatclient.client import Client import sys import time @@ -24,9 +26,10 @@ from valet.tests.functional.valet_validator.group_api.valet_group import ValetGr class Loader(object): + """Class is responsible for loading stacks and groups.""" def __init__(self): - ''' initializing the loader - connecting to heat ''' + """Initializing the loader - connecting to heat.""" GeneralLogger.log_info("Initializing Loader") heat_url = CONF.heat.HEAT_URL + str(Auth.get_project_id()) @@ -36,6 +39,7 @@ class Loader(object): self.stacks = heat.stacks def create_stack(self, stack_name, template_resources): + """Create stack from template resources.""" GeneralLogger.log_info("Starting to create stacks") groups = template_resources.groups @@ -44,40 +48,50 @@ class Loader(object): if groups[key].group_type == "exclusivity": self.create_valet_group(groups[key].group_name) - self.stacks.create(stack_name=stack_name, template=template_resources.template_data) + self.stacks.create(stack_name=stack_name, + template=template_resources.template_data) return self.wait(stack_name, operation="create") except Exception: - GeneralLogger.log_error("Failed to create stack", traceback.format_exc()) + GeneralLogger.log_error("Failed to create stack", + traceback.format_exc()) sys.exit(1) def create_valet_group(self, group_name): + """Create valet group.""" try: v_group = ValetGroup() - group_details = v_group.get_group_id_and_members(group_name) # (group_name, group_type) + # (group_name, group_type) + group_details = v_group.get_group_id_and_members(group_name) v_group.add_group_member(group_details) except Exception: - GeneralLogger.log_error("Failed to create valet group", traceback.format_exc()) + GeneralLogger.log_error("Failed to create valet group", + traceback.format_exc()) sys.exit(1) def delete_stack(self, stack_name): + """Delete stack according to stack_name.""" self.stacks.delete(stack_id=stack_name) return self.wait(stack_name, operation="delete") def delete_all_stacks(self): + """Delete all stacks.""" GeneralLogger.log_info("Starting to delete stacks") try: for stack in self.stacks.list(): self.delete_stack(stack.id) except Exception: - GeneralLogger.log_error("Failed to delete stacks", traceback.format_exc()) + GeneralLogger.log_error("Failed to delete stacks", + traceback.format_exc()) - def wait(self, stack_name, count=CONF.valet.TIME_CAP, operation="Operation"): - ''' Checking the result of the process (create/delete) and writing the result to log ''' - while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" and count > 0: + def wait(self, stack_name, count=CONF.valet.TIME_CAP, + operation="Operation"): + """Check result of process (create/delete) and write result to log.""" + while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" \ + and count > 0: count -= 1 time.sleep(1) @@ -85,7 +99,8 @@ class Loader(object): GeneralLogger.log_info(operation + " Successfully completed") return Result() elif str(self.stacks.get(stack_name).status) == "FAILED": - msg = operation + " failed - " + self.stacks.get(stack_name).stack_status_reason + msg = operation + " failed - " + \ + self.stacks.get(stack_name).stack_status_reason else: msg = operation + " timed out" GeneralLogger.log_error(msg) diff --git a/valet/tests/functional/valet_validator/tests/functional_base.py b/valet/tests/functional/valet_validator/tests/functional_base.py index 191bd10..981971f 100644 --- a/valet/tests/functional/valet_validator/tests/functional_base.py +++ b/valet/tests/functional/valet_validator/tests/functional_base.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Functional Base.""" import os from oslo_log import log as logging @@ -31,24 +32,32 @@ class FunctionalTestCase(Base): """Test case base class for all unit tests.""" def __init__(self, *args, **kwds): - ''' initializing the FunctionalTestCase - loading the logger, loader and analyzer ''' + """Init. + + Initializing the FunctionalTestCase - loading the + logger, loader and analyzer. + """ super(FunctionalTestCase, self).__init__(*args, **kwds) def setUp(self): + """Start loader and analyzer.""" super(FunctionalTestCase, self).setUp() self.load = Loader() self.compute = Analyzer() - LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"], self.get_name(), COLORS["WHITE"])) + LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"], + self.get_name(), + COLORS["WHITE"])) def run_test(self, stack_name, template_path): - ''' scenario - + """Run Test. + scenario - deletes all stacks create new stack checks if host (or rack) is the same for all instances - ''' + """ # delete all stacks self.load.delete_all_stacks() @@ -60,17 +69,21 @@ class FunctionalTestCase(Base): res = self.try_again(res, stack_name, my_resources) self.validate(res) - LOG.info("%s stack creation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"])) + LOG.info("%s stack creation is done successfully %s" + % (COLORS["L_PURPLE"], COLORS["WHITE"])) time.sleep(self.CONF.valet.DELAY_DURATION) # validation self.validate(self.compute.check(my_resources)) - LOG.info("%s validation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"])) + LOG.info("%s validation is done successfully %s" + % (COLORS["L_PURPLE"], COLORS["WHITE"])) def try_again(self, res, stack_name, my_resources): + """Try creating stack again.""" tries = CONF.valet.TRIES_TO_CREATE while "Ostro error" in res.message and tries > 0: - LOG.error("Ostro error - try number %d" % (CONF.valet.TRIES_TO_CREATE - tries + 2)) + LOG.error("Ostro error - try number %d" + % (CONF.valet.TRIES_TO_CREATE - tries + 2)) self.load.delete_all_stacks() res = self.load.create_stack(stack_name, my_resources) tries -= 1 @@ -79,9 +92,13 @@ class FunctionalTestCase(Base): return res def get_template_path(self, template_name): - possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir)) - return os.path.join(possible_topdir, 'tests/templates', template_name + '.yml') + """Return template path for the template name given.""" + possible_topdir = os.path.normpath(os.path.join( + os.path.abspath(__file__), os.pardir, os.pardir)) + return os.path.join(possible_topdir, 'tests/templates', + template_name + '.yml') def init_template(self, test): + """Init template, call get path for test template.""" self.stack_name = test.STACK_NAME self.template_path = self.get_template_path(test.TEMPLATE_NAME) diff --git a/valet/tests/functional/valet_validator/tests/test_affinity.py b/valet/tests/functional/valet_validator/tests/test_affinity.py index 8a5d189..9e930be 100644 --- a/valet/tests/functional/valet_validator/tests/test_affinity.py +++ b/valet/tests/functional/valet_validator/tests/test_affinity.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Affinity.""" + from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF @@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__) class TestAffinity(FunctionalTestCase): + """Test Affinity Functional Tests.""" def setUp(self): - ''' Adding configuration and logging mechanism ''' + """Adding configuration and logging mechanism.""" super(TestAffinity, self).setUp() self.init_template(CONF.test_affinity) def test_affinity(self): + """Test Affinity.""" self.run_test(self.stack_name, self.template_path) def get_name(self): + """Return Name.""" return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py b/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py index d8e63a5..ce51187 100644 --- a/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py +++ b/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Affinity 3 Instances.""" + from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF @@ -29,14 +31,17 @@ LOG = logging.getLogger(__name__) class TestAffinity_3(FunctionalTestCase): + """Test Affinity 3 Functional Test.""" def setUp(self): - ''' Adding configuration and logging mechanism ''' + """Adding configuration and logging mechanism.""" super(TestAffinity_3, self).setUp() self.init_template(CONF.test_affinity_3) def test_affinity(self): + """Test Affinity.""" self.run_test(self.stack_name, self.template_path) def get_name(self): + """Return Name.""" return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_diversity.py b/valet/tests/functional/valet_validator/tests/test_diversity.py index 8249f86..db21b42 100644 --- a/valet/tests/functional/valet_validator/tests/test_diversity.py +++ b/valet/tests/functional/valet_validator/tests/test_diversity.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Diversity.""" + from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF @@ -30,15 +32,17 @@ LOG = logging.getLogger(__name__) class TestDiversity(FunctionalTestCase): + """Test Diversity Functional Test.""" def setUp(self): - ''' Initiating template ''' + """Initiating template.""" super(TestDiversity, self).setUp() self.init_template(CONF.test_diversity) def test_diversity(self): - + """Test diversity.""" self.run_test(self.stack_name, self.template_path) def get_name(self): + """Return Name.""" return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_exclusivity.py b/valet/tests/functional/valet_validator/tests/test_exclusivity.py index ffbbab0..fba109a 100644 --- a/valet/tests/functional/valet_validator/tests/test_exclusivity.py +++ b/valet/tests/functional/valet_validator/tests/test_exclusivity.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Exclusivity.""" + from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF @@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__) class TestExclusivity(FunctionalTestCase): + """Test Exclusivity Function Test.""" def setUp(self): - ''' Initiating template ''' + """Initiating template.""" super(TestExclusivity, self).setUp() self.init_template(CONF.test_exclusivity) def test_exclusivity(self): + """Nested run test on stack_name and template_path.""" self.run_test(self.stack_name, self.template_path) def get_name(self): + """Return name.""" return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_groups.py b/valet/tests/functional/valet_validator/tests/test_groups.py index da55ff7..bb3a8f9 100644 --- a/valet/tests/functional/valet_validator/tests/test_groups.py +++ b/valet/tests/functional/valet_validator/tests/test_groups.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Groups.""" + from valet.tests.functional.valet_validator.common.auth import Auth from valet.tests.functional.valet_validator.common import GeneralLogger from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup @@ -20,32 +22,42 @@ from valet.tests.functional.valet_validator.tests.functional_base import Functio class TestGroups(FunctionalTestCase): + """Test valet groups functional.""" def setUp(self): - ''' Adding configuration and logging mechanism ''' + """Add configuration and logging mechanism.""" super(TestGroups, self).setUp() self.groups = ValetGroup() self.group_name = "test_group" self.group_type = "exclusivity" def test_groups(self): + """Test groups using multiple methods and checking response codes.""" GeneralLogger.log_group("Delete all stacks") self.load.delete_all_stacks() GeneralLogger.log_group("Delete all members and groups") respose_code = self.groups.delete_all_groups() - self.assertEqual(204, respose_code, "delete_all_groups failed with code %s" % respose_code) + self.assertEqual(204, respose_code, + "delete_all_groups failed with code %s" + % respose_code) - self.assertEqual([], self.groups.get_list_groups(), "delete_all_groups failed") + self.assertEqual([], self.groups.get_list_groups(), + "delete_all_groups failed") GeneralLogger.log_group("Try to delete not existing group") - response = self.groups.delete_group("d68f62b1-4758-4ea5-a93a-8f9d9c0ae912") - self.assertEqual(404, response.status_code, "delete_group failed with code %s" % response.status_code) + response = self.groups.delete_group( + "d68f62b1-4758-4ea5-a93a-8f9d9c0ae912") + self.assertEqual(404, response.status_code, + "delete_group failed with code %s" + % response.status_code) GeneralLogger.log_group("Create test_group") group_info = self.groups.create_group(self.group_name, self.group_type) - self.assertEqual(201, group_info.status_code, "create_group failed with code %s" % group_info.status_code) + self.assertEqual(201, group_info.status_code, + "create_group failed with code %s" + % group_info.status_code) grp_id = group_info.json()["id"] @@ -53,30 +65,43 @@ class TestGroups(FunctionalTestCase): GeneralLogger.log_group(str(self.groups.get_list_groups())) GeneralLogger.log_group("Create test member (NOT tenant ID)") - member_respone = self.groups.update_group_members(grp_id, members="test_member") - self.assertEqual(409, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) + member_respone = self.groups.update_group_members(grp_id, + members="test_member") + self.assertEqual(409, member_respone.status_code, + "update_group_members failed with code %s" + % member_respone.status_code) GeneralLogger.log_group("Add description to group") desc_response = self.groups.update_group(grp_id, "new_description") - self.assertEqual(201, desc_response.status_code, "update_group failed with code %s" % desc_response.status_code) + self.assertEqual(201, desc_response.status_code, + "update_group failed with code %s" + % desc_response.status_code) GeneralLogger.log_group("Create member (tenant ID)") member_respone = self.groups.update_group_members(grp_id) - self.assertEqual(201, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) + self.assertEqual(201, member_respone.status_code, + "update_group_members failed with code %s" + % member_respone.status_code) GeneralLogger.log_group("Return list of groups") GeneralLogger.log_group(self.groups.get_group_details(grp_id).json()) GeneralLogger.log_group("Delete test member (NOT tenant ID)") member_respone = self.groups.delete_group_member(grp_id, "test_member") - self.assertEqual(404, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code) + self.assertEqual(404, member_respone.status_code, + "delete_group_member failed with code %s" + % member_respone.status_code) GeneralLogger.log_group("Delete member (tenant ID)") - member_respone = self.groups.delete_group_member(grp_id, Auth.get_project_id()) - self.assertEqual(204, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code) + member_respone = self.groups.delete_group_member(grp_id, + Auth.get_project_id()) + self.assertEqual(204, member_respone.status_code, + "delete_group_member failed with code %s" + % member_respone.status_code) GeneralLogger.log_group("Return list of groups") GeneralLogger.log_group(self.groups.get_group_details(grp_id).json()) def get_name(self): + """Return name.""" return __name__ diff --git a/valet/tests/functional/valet_validator/tests/test_nested.py b/valet/tests/functional/valet_validator/tests/test_nested.py index 66239b9..6b64ba1 100644 --- a/valet/tests/functional/valet_validator/tests/test_nested.py +++ b/valet/tests/functional/valet_validator/tests/test_nested.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Test Nested.""" + from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF @@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__) class TestNested(FunctionalTestCase): + """Basic setup and functions for nested tests.""" def setUp(self): - ''' Adding configuration and logging mechanism ''' + """Adding configuration and logging mechanism.""" super(TestNested, self).setUp() self.init_template(CONF.test_nested) def test_nested(self): + """Call run_test on stack and give it the path to the template.""" self.run_test(self.stack_name, self.template_path) def get_name(self): + """Return name.""" return __name__ diff --git a/valet/tests/tempest/api/base.py b/valet/tests/tempest/api/base.py index 82504a5..16d9431 100644 --- a/valet/tests/tempest/api/base.py +++ b/valet/tests/tempest/api/base.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Base.""" + from tempest import config from tempest import test @@ -23,11 +24,13 @@ CONF = config.CONF class BaseValetTest(test.BaseTestCase): + """Vase Valet Tempest Test Class.""" credentials = ['primary'] @classmethod def skip_checks(cls): + """Skp Checks, if CONF service not available, raise exception.""" super(BaseValetTest, cls).skip_checks() if not CONF.service_available.valet: skip_msg = ("%s skipped as valet is not available" % cls.__name__) @@ -35,6 +38,7 @@ class BaseValetTest(test.BaseTestCase): @classmethod def setup_clients(cls): + """Setup Valet Clients.""" super(BaseValetTest, cls).setup_clients() cls.valet_client = client.ValetClient( cls.os.auth_provider, @@ -44,9 +48,11 @@ class BaseValetTest(test.BaseTestCase): @classmethod def resource_setup(cls): + """Resource Setup.""" super(BaseValetTest, cls).resource_setup() cls.catalog_type = CONF.placement.catalog_type @classmethod def resource_cleanup(cls): + """Resource Cleanup.""" super(BaseValetTest, cls).resource_cleanup() diff --git a/valet/tests/tempest/api/disabled_test_plan.py b/valet/tests/tempest/api/disabled_test_plan.py index aa37fca..8a37041 100644 --- a/valet/tests/tempest/api/disabled_test_plan.py +++ b/valet/tests/tempest/api/disabled_test_plan.py @@ -1,28 +1,31 @@ -#!/usr/bin/env python # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Disabled Test Plan.""" + from tempest import test from tempest_lib.common.utils import data_utils from valet.tests.tempest.api import base class ValetPlanTest(base.BaseValetTest): + """Test plan operations for valet plans.""" @classmethod def setup_clients(cls): + """Setup Valet client for Plan Test.""" super(ValetPlanTest, cls).setup_clients() cls.client = cls.valet_client @@ -40,7 +43,7 @@ class ValetPlanTest(base.BaseValetTest): def _get_resource_property(self): properties = {} - # TODO(kr336r): Use tempest to get/create flavour, image, networks + # TODO(UNKNOWN): Use tempest to get/create flavour, image, networks # Is it required really ??? properties['flavor'] = "m1.small" properties['image'] = "ubuntu_1204" @@ -75,7 +78,9 @@ class ValetPlanTest(base.BaseValetTest): plan_name = resp['plan']['name'] for key, value in resp['plan']['placements'].iteritems(): stack_and_plan['resource_id'] = key - location = resp['plan']['placements'][stack_and_plan['resource_id']]['location'] + location = resp['plan']['placements'][ + stack_and_plan['resource_id'] + ]['location'] stack_and_plan['stack_id'] = stack_id stack_and_plan['plan_id'] = plan_id stack_and_plan['name'] = plan_name @@ -84,6 +89,7 @@ class ValetPlanTest(base.BaseValetTest): @test.idempotent_id('f25ea766-c91e-40ca-b96c-dff42129803d') def test_create_plan(self): + """Test plan was created by asserting stack_id and plan_name equal.""" stack_and_plan = self._get_stack_and_plan_id() stack_id = stack_and_plan['stack_id'] plan_id = stack_and_plan['plan_id'] @@ -93,6 +99,7 @@ class ValetPlanTest(base.BaseValetTest): @test.idempotent_id('973635f4-b5c9-4b78-81e7-d273e1782afc') def test_update_plan_action_migrate(self): + """Test plan updated successfully.""" stack_and_plan = self._get_stack_and_plan_id() stack_id = stack_and_plan['stack_id'] plan_id = stack_and_plan['plan_id'] diff --git a/valet/tests/tempest/api/test_groups.py b/valet/tests/tempest/api/test_groups.py index 568822d..d328721 100644 --- a/valet/tests/tempest/api/test_groups.py +++ b/valet/tests/tempest/api/test_groups.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Groups.""" + from tempest.common.utils import data_utils from tempest import test @@ -21,15 +22,17 @@ from valet.tests.tempest.api import base class ValetGroupsTest(base.BaseValetTest): - """Here we test the basic group operations of Valet Groups""" + """Here we test the basic group operations of Valet Groups.""" @classmethod def setup_clients(cls): + """Setup Valet client for Groups Test.""" super(ValetGroupsTest, cls).setup_clients() cls.client = cls.valet_client @test.idempotent_id('b2655098-5a0d-11e6-9efd-525400af9658') def test_list_groups(self): + """List groups using client assert no groups missing to verify.""" group_ids = list() fetched_ids = list() @@ -53,6 +56,7 @@ class ValetGroupsTest(base.BaseValetTest): @test.idempotent_id('2ab0337e-6472-11e6-b6c6-080027824017') def test_create_group(self): + """Test created group by checking details equal to group details.""" group_name = data_utils.rand_name('group') description = data_utils.rand_name('Description') group = self.client.create_group( @@ -68,6 +72,7 @@ class ValetGroupsTest(base.BaseValetTest): @test.idempotent_id('35f0aa20-6472-11e6-b6c6-080027824017') def test_delete_group(self): + """Client Delete group with id, check group with id not in groups.""" # Create group group_name = data_utils.rand_name('group') description = data_utils.rand_name('Description') @@ -89,6 +94,7 @@ class ValetGroupsTest(base.BaseValetTest): @test.attr(type='smoke') @test.idempotent_id('460d86e4-6472-11e6-b6c6-080027824017') def test_update_group(self): + """Client Update group with id, using a new description.""" # Create group group_name = data_utils.rand_name('group') description = data_utils.rand_name('Description') @@ -108,6 +114,7 @@ class ValetGroupsTest(base.BaseValetTest): @test.idempotent_id('4f660e50-6472-11e6-b6c6-080027824017') def test_show_group(self): + """Test client show group by checking values against group_details.""" # Create group group_name = data_utils.rand_name('group') description = data_utils.rand_name('Description') diff --git a/valet/tests/tempest/api/test_members.py b/valet/tests/tempest/api/test_members.py index d95e21e..ca44e3e 100644 --- a/valet/tests/tempest/api/test_members.py +++ b/valet/tests/tempest/api/test_members.py @@ -1,31 +1,35 @@ -#!/usr/bin/env python # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Members.""" + from tempest import test from tempest_lib.common.utils import data_utils from valet.tests.tempest.api import base class ValetGroupsMembersTest(base.BaseValetTest): + """Test for Valet Groups and Members.""" @classmethod def setup_clients(cls): + """Setup client for Valet client and tenants.""" super(ValetGroupsMembersTest, cls).setup_clients() cls.client = cls.valet_client - cls.TenantsClient = getattr(cls.os, "tenants_client", cls.os.identity_client) + cls.TenantsClient = getattr(cls.os, "tenants_client", + cls.os.identity_client) def _create_group(self): group_name = data_utils.rand_name('membergroup') @@ -43,13 +47,15 @@ class ValetGroupsMembersTest(base.BaseValetTest): def _create_tenant(self): tenant_name = data_utils.rand_name(name='tenant') tenant_desc = data_utils.rand_name(name='desc') - body = self.TenantsClient.create_tenant(name=tenant_name, description=tenant_desc) + body = self.TenantsClient.create_tenant(name=tenant_name, + description=tenant_desc) tenant_id = body['tenant']['id'] self.addCleanup(self.TenantsClient.delete_tenant, tenant_id) return tenant_id @test.idempotent_id('5aeec320-65d5-11e6-8b77-86f30ca893d3') def test_add_single_member_to_a_group(self): + """Add single member to group, do comparison to verify.""" # Create a tenant tenants = [] tenant_id = self._create_tenant() @@ -69,6 +75,7 @@ class ValetGroupsMembersTest(base.BaseValetTest): @test.idempotent_id('5aeec6f4-65d5-11e6-8b77-86f30ca893d3') def test_add_multiple_members_to_a_group(self): + """Add multiple members to group, check items equality to verify.""" # Create multiple tenants tenants = [] for count in range(0, 4): @@ -90,6 +97,7 @@ class ValetGroupsMembersTest(base.BaseValetTest): @test.idempotent_id('5aeec8b6-65d5-11e6-8b77-86f30ca893d3') def test_add_single_member_to_a_group_and_verify_membership(self): + """Add a memer to a group, verify membership by checking status.""" # Create a tenant tenants = [] tenant_id = self._create_tenant() @@ -105,6 +113,7 @@ class ValetGroupsMembersTest(base.BaseValetTest): @test.idempotent_id('5aeec99c-65d5-11e6-8b77-86f30ca893d3') def test_delete_member_from_group(self): + """Test deleting a single member from group, check status to verify.""" # Create multiple tenants tenants = [] for count in range(0, 4): @@ -121,6 +130,7 @@ class ValetGroupsMembersTest(base.BaseValetTest): @test.idempotent_id('5aeecb68-65d5-11e6-8b77-86f30ca893d3') def test_delete_all_members_from_group(self): + """Test that all members deleted from group, check status to verify.""" # Create multiple tenants tenants = [] for count in range(0, 4): diff --git a/valet/tests/tempest/config.py b/valet/tests/tempest/config.py index 127a496..6bfeae4 100644 --- a/valet/tests/tempest/config.py +++ b/valet/tests/tempest/config.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Config.""" + from oslo_config import cfg service_available_group = cfg.OptGroup(name="service_available", diff --git a/valet/tests/tempest/plugin.py b/valet/tests/tempest/plugin.py index 409ba3a..8d51422 100644 --- a/valet/tests/tempest/plugin.py +++ b/valet/tests/tempest/plugin.py @@ -1,19 +1,20 @@ -#!/usr/bin/env python # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Plugin.""" + import os from tempest import config @@ -24,7 +25,10 @@ import valet class ValetTempestPlugin(plugins.TempestPlugin): + """Plugins for Valet Tempest Testing.""" + def load_tests(self): + """Load tempest tests, return full test dir and base path.""" base_path = os.path.split(os.path.dirname( os.path.abspath(valet.__file__)))[0] test_dir = "valet/tests/tempest" @@ -32,11 +36,16 @@ class ValetTempestPlugin(plugins.TempestPlugin): return full_test_dir, base_path def register_opts(self, conf): - config.register_opt_group(conf, project_config.service_available_group, project_config.ServiceAvailableGroup) + """Register opt groups in config.""" + config.register_opt_group(conf, project_config.service_available_group, + project_config.ServiceAvailableGroup) - config.register_opt_group(conf, project_config.placement_group, project_config.PlacementGroup) + config.register_opt_group(conf, project_config.placement_group, + project_config.PlacementGroup) - config.register_opt_group(conf, project_config.valet_group, project_config.opt_valet) + config.register_opt_group(conf, project_config.valet_group, + project_config.opt_valet) def get_opt_lists(self): + """Get Opt Lists.""" pass diff --git a/valet/tests/tempest/scenario/analyzer.py b/valet/tests/tempest/scenario/analyzer.py index dd12e51..78fe9b9 100644 --- a/valet/tests/tempest/scenario/analyzer.py +++ b/valet/tests/tempest/scenario/analyzer.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Analyzer.""" + from collections import defaultdict import os from tempest import config @@ -22,12 +24,14 @@ CONF = config.CONF class Analyzer(object): + """Class to analyze groups/racks/instances.""" def __init__(self, logger, stack_id, heat, nova): - ''' initializing the analyzer - connecting to nova ''' + """Initializing the analyzer - connecting to Nova.""" self.heat_client = heat self.nova_client = nova - self.possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)) + self.possible_topdir = os.path.normpath(os.path.join( + os.path.abspath(__file__), os.pardir)) self.stack_identifier = stack_id self.log = logger self.resource_name = {} @@ -35,7 +39,7 @@ class Analyzer(object): self.group_instance_name = {} def check(self, resources): - ''' Checking if all instances are on the Appropriate hosts and racks ''' + """Checking if all instances are on the Appropriate hosts and racks.""" self.log.log_info("Starting to check instances location") result = True @@ -59,18 +63,23 @@ class Analyzer(object): result = result and fn(instances, level) except Exception as ex: - self.log.log_error("Exception at method check: %s" % ex, traceback.format_exc()) + self.log.log_error("Exception at method check: %s" % ex, + traceback.format_exc()) result = False return result def init_instances_for_group(self, resources): + """Init instances for a group with the given resources.""" ins_group = defaultdict(list) for grp in resources.groups.keys(): - self.group_instance_name[grp] = resources.groups[grp].group_resources - resources.groups[grp].group_resources.append(resources.groups[grp].level) - ins_group[resources.groups[grp].group_type].append(resources.groups[grp].group_resources) + self.group_instance_name[grp] = \ + resources.groups[grp].group_resources + resources.groups[grp].group_resources.append( + resources.groups[grp].level) + ins_group[resources.groups[grp].group_type].append( + resources.groups[grp].group_resources) # replacing group for it's instances ins_group = self.organize(ins_group) @@ -78,25 +87,32 @@ class Analyzer(object): return ins_group def init_resources(self, resources): + """Init resources.""" for ins in resources.instances: self.resource_name[ins.resource_name] = ins.name def init_servers_list(self): + """Init server list from nova client.""" servers_list = self.nova_client.list_servers() for i in range(len(servers_list["servers"])): - server = self.nova_client.show_server(servers_list["servers"][i]["id"]) - self.instance_on_server[servers_list["servers"][i]["name"]] = server["server"]["OS-EXT-SRV-ATTR:host"] + server = \ + self.nova_client.show_server(servers_list["servers"][i]["id"]) + self.instance_on_server[servers_list["servers"][i]["name"]] = \ + server["server"]["OS-EXT-SRV-ATTR:host"] def get_instance_name(self, res_name): + """Return instance name (resource name).""" return self.resource_name[res_name] def get_instance_host(self, res_name): + """Return host of instance with matching name.""" hosts = [] if len(self.instance_on_server) == 0: self.init_servers_list() - self.log.log_info("instance_on_server: %s" % self.instance_on_server) + self.log.log_info("instance_on_server: %s" % + self.instance_on_server) for res in res_name: name = self.get_instance_name(res) @@ -105,22 +121,26 @@ class Analyzer(object): return hosts def are_the_same(self, res_name, level): + """Return true if host aren't the same otherwise return False.""" self.log.log_info("are_the_same") hosts_list = self.get_instance_host(res_name) self.log.log_info(hosts_list) try: for h in hosts_list: - if self.compare_host(self.get_host_or_rack(level, h), self.get_host_or_rack(level, hosts_list[0])) is False: + if self.compare_host( + self.get_host_or_rack(level, h), + self.get_host_or_rack(level, hosts_list[0])) is False: return False return True except Exception as ex: - self.log.log_error("Exception at method are_the_same: %s" % ex, traceback.format_exc()) + self.log.log_error("Exception at method are_the_same: %s" % + ex, traceback.format_exc()) return False def are_different(self, res_name, level): - ''' Checking if all hosts (and racks) are different for all instances ''' + """Check if all hosts (and racks) are different for all instances.""" self.log.log_info("are_different") diction = {} hosts_list = self.get_instance_host(res_name) @@ -128,21 +148,25 @@ class Analyzer(object): try: for h in hosts_list: - if self.is_already_exists(diction, self.get_host_or_rack(level, h)): + if self.is_already_exists(diction, self.get_host_or_rack(level, + h)): return False return True except Exception as ex: - self.log.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) + self.log.log_error("Exception at method are_all_hosts_different: %s" + % ex, traceback.format_exc()) return False def are_we_alone(self, ins_for_group, level): + """Return True if no other instances in group on server.""" self.log.log_info("are_we_alone ") self.log.log_info(ins_for_group) instances = self.instance_on_server.keys() if level == "rack": - instances = self.get_rack_instances(set(self.instance_on_server.values())) + instances = self.get_rack_instances(set( + self.instance_on_server.values())) # instance_on_server should be all the instances on the rack if len(instances) < 1: @@ -155,6 +179,7 @@ class Analyzer(object): return not instances def organize(self, ins_group): + """Organize internal groups, return ins_group.""" internal_ins = [] for x in ins_group: for y in ins_group[x]: @@ -167,10 +192,11 @@ class Analyzer(object): return ins_group def get_group_instances(self, resources, group_ins): - ''' gets the instance object according to the group_ins + """Get the instance object according to the group_ins. - group_ins - the group_resources name of the instances belong to this group (['my-instance-1', 'my-instance-2']) - ''' + group_ins - the group_resources name of the instances belong to this + group (['my-instance-1', 'my-instance-2']) + """ ins_for_group = [] try: for instance in resources.instances: @@ -183,6 +209,7 @@ class Analyzer(object): return None def get_rack_instances(self, hosts): + """Get instances on racks, return list of instances.""" racks = [] for host in hosts: racks.append(self.get_rack(host)) @@ -194,6 +221,7 @@ class Analyzer(object): return instances def is_already_exists(self, diction, item): + """Return true if item exists in diction.""" if item in diction: return True @@ -201,15 +229,19 @@ class Analyzer(object): return False def compare_rack(self, current_host, first_host): + """Compare racks for hosts, return true if racks equal.""" self.log.log_debug(current_host) return self.get_rack(current_host) == self.get_rack(first_host) def compare_host(self, current_host, first_host): + """Compare current to first host, return True if equal.""" self.log.log_debug(current_host) return current_host == first_host def get_rack(self, host): + """Get rack for current host.""" return (host.split("r")[1])[:2] def get_host_or_rack(self, level, host): + """Return host or rack based on level.""" return host if level == "host" else self.get_rack(host) diff --git a/valet/tests/tempest/scenario/general_logger.py b/valet/tests/tempest/scenario/general_logger.py index 72de046..e7466e1 100644 --- a/valet/tests/tempest/scenario/general_logger.py +++ b/valet/tests/tempest/scenario/general_logger.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""General Logger.""" + from oslo_log import log as logging from tempest import config @@ -31,20 +33,32 @@ COLORS = \ class GeneralLogger(object): + """Class containing general log methods.""" def __init__(self, name): + """Init logger with test name.""" self.test_name = name def log_info(self, msg): - LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_GREEN"], msg, COLORS["WHITE"])) + """Info log.""" + LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, + COLORS["L_GREEN"], msg, COLORS["WHITE"])) def log_error(self, msg, trc_back=None): - LOG.error("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_RED"], msg, COLORS["WHITE"])) + """Log error and trace_back for error if there is one.""" + LOG.error("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, + COLORS["L_RED"], msg, COLORS["WHITE"])) if trc_back: - LOG.error("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_RED"], trc_back, COLORS["WHITE"])) + LOG.error("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, + COLORS["L_RED"], trc_back, + COLORS["WHITE"])) def log_debug(self, msg): - LOG.debug("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["L_BLUE"], msg, COLORS["WHITE"])) + """Log debug.""" + LOG.debug("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, + COLORS["L_BLUE"], msg, COLORS["WHITE"])) def log_group(self, msg): - LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, COLORS["Yellow"], msg, COLORS["WHITE"])) + """Log info.""" + LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, + COLORS["Yellow"], msg, COLORS["WHITE"])) diff --git a/valet/tests/tempest/scenario/resources.py b/valet/tests/tempest/scenario/resources.py index bf29f77..749b25c 100644 --- a/valet/tests/tempest/scenario/resources.py +++ b/valet/tests/tempest/scenario/resources.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Resources.""" + from oslo_log import log as logging import traceback import yaml @@ -23,8 +25,10 @@ TEMPLATE_RES = "resources" class TemplateResources(object): - ''' Heat template parser ''' + """Heat template parser.""" + def __init__(self, template): + """Init heat template parser.""" self.instances = [] self.groups = {} self.template_data = None @@ -47,7 +51,10 @@ class TemplateResources(object): class Instance(object): + """Nova Instance.""" + def __init__(self, doc, instance_name): + """Init instance with name, image, flavor, key and call fill.""" self.resource_name = instance_name self.name = None self.image = None @@ -57,6 +64,7 @@ class Instance(object): self.fill(doc, instance_name) def fill(self, doc, instance_name): + """Fill instance based on template.""" try: template_property = doc[TEMPLATE_RES][instance_name]["properties"] @@ -69,12 +77,17 @@ class Instance(object): LOG.error(traceback.format_exc()) def get_ins(self): + """Return instance and its data.""" return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s " - % (self.type, self.name, self.image, self.flavor, self.resource_name)) + % (self.type, self.name, self.image, self.flavor, + self.resource_name)) class Group(object): + """Class representing group object.""" + def __init__(self, doc, group_name): + """Init group with type, name, level, resources and call fill.""" self.group_type = None self.group_name = None self.level = None @@ -83,11 +96,13 @@ class Group(object): self.fill(doc, group_name) def fill(self, doc, group_name): + """Fill group details from template.""" try: template_property = doc[TEMPLATE_RES][group_name]["properties"] self.group_type = template_property["group_type"] - self.group_name = template_property["group_name"] if "group_name" in template_property else None + self.group_name = template_property["group_name"] if \ + "group_name" in template_property else None self.level = template_property["level"] for res in template_property[TEMPLATE_RES]: self.group_resources.append(res["get_resource"]) diff --git a/valet/tests/tempest/scenario/scenario_base.py b/valet/tests/tempest/scenario/scenario_base.py index 576a93f..a5a2c82 100644 --- a/valet/tests/tempest/scenario/scenario_base.py +++ b/valet/tests/tempest/scenario/scenario_base.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Scenario Base.""" + import os from tempest import config from tempest import exceptions @@ -28,10 +30,13 @@ CONF = config.CONF class ScenarioTestCase(test.BaseTestCase): + """Base class for Scenario Test cases.""" + credentials = ['primary'] @classmethod def skip_checks(cls): + """Skip checks, if valet service not available, raise exception.""" super(ScenarioTestCase, cls).skip_checks() if not CONF.service_available.valet: skip_msg = ("%s skipped as valet is not available" % cls.__name__) @@ -39,34 +44,37 @@ class ScenarioTestCase(test.BaseTestCase): @classmethod def resource_setup(cls): + """Setup resource, set catalog_type.""" super(ScenarioTestCase, cls).resource_setup() cls.catalog_type = CONF.placement.catalog_type @classmethod def resource_cleanup(cls): + """Class method resource cleanup.""" super(ScenarioTestCase, cls).resource_cleanup() @classmethod def setup_clients(cls): + """Setup clients (valet).""" super(ScenarioTestCase, cls).setup_clients() cls.heat_client = cls.os.orchestration_client cls.nova_client = cls.os.servers_client cls.tenants_client = cls.os.identity_client - cls.valet_client = ValetClient(cls.os.auth_provider, - CONF.placement.catalog_type, - CONF.identity.region, - **cls.os.default_params_with_timeout_values) + cls.valet_client = ValetClient( + cls.os.auth_provider, CONF.placement.catalog_type, + CONF.identity.region, **cls.os.default_params_with_timeout_values) - cls.possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)) + cls.possible_topdir = os.path.normpath( + os.path.join(os.path.abspath(__file__), os.pardir)) cls.stack_identifier = None cls.tries = CONF.valet.TRIES_TO_CREATE def run_test(self, logger, stack_name, template_path): - ''' scenario - + """Scenario. - create new stack - checks if host (or rack) is the same for all instances - ''' + create new stack + checks if host (or rack) is the same for all instances + """ self.log = logger self.log.log_info(" ******** Running Test ******** ") tmplt_url = self.possible_topdir + template_path @@ -79,12 +87,14 @@ class ScenarioTestCase(test.BaseTestCase): self.assertEqual(True, self.create_stack(name, env_data, template)) self.log.log_info(" ******** Analyzing Stack ******** ") - analyzer = Analyzer(self.log, self.stack_identifier, self.heat_client, self.nova_client) + analyzer = Analyzer(self.log, self.stack_identifier, self.heat_client, + self.nova_client) self.assertEqual(True, analyzer.check(template)) self.log.log_info(" ********** THE END ****************") def create_stack(self, stack_name, env_data, template_resources): + """Create stack with name/env/resource. Create all groups/instances.""" try: groups = template_resources.groups @@ -92,12 +102,16 @@ class ScenarioTestCase(test.BaseTestCase): if groups[key].group_type == "exclusivity": self.log.log_info(" creating group ") grp_name = data_utils.rand_name(name=groups[key].group_name) - template_resources.template_data = template_resources.template_data.replace(groups[key].group_name, grp_name) + template_resources.template_data = \ + template_resources.template_data.replace( + groups[key].group_name, grp_name) self.create_valet_group(grp_name) for instance in template_resources.instances: generated_name = data_utils.rand_name(instance.name) - template_resources.template_data = template_resources.template_data.replace(instance.name, generated_name) + template_resources.template_data = \ + template_resources.template_data.replace( + instance.name, generated_name) instance.name = generated_name self.wait_for_stack(stack_name, env_data, template_resources) @@ -109,8 +123,11 @@ class ScenarioTestCase(test.BaseTestCase): return True def create_valet_group(self, group_name): + """Create valet group with name using valet client. Add members.""" try: - v_group = self.valet_client.create_group(name=group_name, group_type='exclusivity', description="description") + v_group = self.valet_client.create_group(name=group_name, + group_type='exclusivity', + description="description") group_id = v_group['id'] tenant_id = self.tenants_client.tenant_id self.addCleanup(self._delete_group, group_id) @@ -118,9 +135,11 @@ class ScenarioTestCase(test.BaseTestCase): self.valet_client.add_members(group_id, [tenant_id]) except Exception: - self.log.log_error("Failed to create valet group", traceback.format_exc()) + self.log.log_error("Failed to create valet group", + traceback.format_exc()) def get_env_file(self, template): + """Return file.read for env file or return None.""" env_url = template.replace(".yml", ".env") if os.path.exists(env_url): @@ -134,30 +153,43 @@ class ScenarioTestCase(test.BaseTestCase): self.valet_client.delete_group(group_id) def delete_stack(self): + """Use heat client to delete stack.""" self.heat_client.delete_stack(self.stack_identifier) - self.heat_client.wait_for_stack_status(self.stack_identifier, "DELETE_COMPLETE", failure_pattern='^.*DELETE_FAILED$') + self.heat_client.wait_for_stack_status( + self.stack_identifier, "DELETE_COMPLETE", + failure_pattern='^.*DELETE_FAILED$') def show_stack(self, stack_id): + """Return show stack with given id from heat client.""" return self.heat_client.show_stack(stack_id) def wait_for_stack(self, stack_name, env_data, template_resources): + """Use heat client to create stack, then wait for status.""" try: self.log.log_info("Trying to create stack") - new_stack = self.heat_client.create_stack(stack_name, environment=env_data, template=template_resources.template_data) + new_stack = self.heat_client.create_stack( + stack_name, environment=env_data, + template=template_resources.template_data) stack_id = new_stack["stack"]["id"] self.stack_identifier = stack_name + "/" + stack_id - self.heat_client.wait_for_stack_status(self.stack_identifier, "CREATE_COMPLETE", failure_pattern='^.*CREATE_FAILED$') + self.heat_client.wait_for_stack_status( + self.stack_identifier, "CREATE_COMPLETE", + failure_pattern='^.*CREATE_FAILED$') except exceptions.StackBuildErrorException as ex: if "Ostro error" in str(ex): if self.tries > 0: - self.log.log_error("Ostro error - try number %d" % (CONF.valet.TRIES_TO_CREATE - self.tries + 2)) + self.log.log_error( + "Ostro error - try number %d" % + (CONF.valet.TRIES_TO_CREATE - self.tries + 2)) self.tries -= 1 self.delete_stack() time.sleep(CONF.valet.PAUSE) - self.wait_for_stack(stack_name, env_data, template_resources) + self.wait_for_stack(stack_name, env_data, + template_resources) else: raise else: - self.log.log_error("Failed to create stack", traceback.format_exc()) + self.log.log_error("Failed to create stack", + traceback.format_exc()) diff --git a/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml b/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml index 28d5994..d8cb27c 100644 --- a/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml +++ b/valet/tests/tempest/scenario/templates/diversity_between_2_affinity.yml @@ -1,6 +1,6 @@ heat_template_version: 2015-04-30 -description: Nested affinity and diversity template - Host level diversity between 2 groups of affinity +description: Nested affinity and diversity template - Host level diversity parameters: instance_image: diff --git a/valet/tests/tempest/scenario/tests/test_affinity.py b/valet/tests/tempest/scenario/tests/test_affinity.py index e9f7adf..9926b51 100644 --- a/valet/tests/tempest/scenario/tests/test_affinity.py +++ b/valet/tests/tempest/scenario/tests/test_affinity.py @@ -1,24 +1,29 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Affinity.""" + from valet.tests.tempest.scenario.general_logger import GeneralLogger from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase class TestAffinity(ScenarioTestCase): + """Test Affinity Scenario.""" def test_affinity(self): + """Run affinity test.""" logger = GeneralLogger("test_affinity") - self.run_test(logger, "affinity", "/templates/affinity_basic_2_instances.yml") + self.run_test(logger, "affinity", + "/templates/affinity_basic_2_instances.yml") diff --git a/valet/tests/tempest/scenario/tests/test_diversity.py b/valet/tests/tempest/scenario/tests/test_diversity.py index 4e1d720..e2cd8e8 100644 --- a/valet/tests/tempest/scenario/tests/test_diversity.py +++ b/valet/tests/tempest/scenario/tests/test_diversity.py @@ -1,24 +1,29 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Diversity.""" + from valet.tests.tempest.scenario.general_logger import GeneralLogger from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase class TestDiversity(ScenarioTestCase): + """Test Diversity Scenario.""" def test_diversity(self): + """Run Test diversity.""" logger = GeneralLogger("test_diversity") - self.run_test(logger, "diversity", "/templates/diversity_basic_2_instances.yml") + self.run_test(logger, "diversity", + "/templates/diversity_basic_2_instances.yml") diff --git a/valet/tests/tempest/scenario/tests/test_exclusivity.py b/valet/tests/tempest/scenario/tests/test_exclusivity.py index 6b061bf..d7248e8 100644 --- a/valet/tests/tempest/scenario/tests/test_exclusivity.py +++ b/valet/tests/tempest/scenario/tests/test_exclusivity.py @@ -1,24 +1,29 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Exclusivity.""" + from valet.tests.tempest.scenario.general_logger import GeneralLogger from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase class TestExclusivity(ScenarioTestCase): + """Test Exclusivity Scenario.""" def test_exclusivity(self): + """Test Exclusivity.""" logger = GeneralLogger("test_exclusivity") - self.run_test(logger, "exclusivity", "/templates/exclusivity_basic_2_instances.yml") + self.run_test(logger, "exclusivity", + "/templates/exclusivity_basic_2_instances.yml") diff --git a/valet/tests/tempest/scenario/tests/test_nested.py b/valet/tests/tempest/scenario/tests/test_nested.py index 615f84b..0d76301 100644 --- a/valet/tests/tempest/scenario/tests/test_nested.py +++ b/valet/tests/tempest/scenario/tests/test_nested.py @@ -1,24 +1,29 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Nested.""" + from valet.tests.tempest.scenario.general_logger import GeneralLogger from valet.tests.tempest.scenario.scenario_base import ScenarioTestCase class TestNested(ScenarioTestCase): + """Test Nested Scenario Test Case.""" def test_nested(self): + """Log test_nested and call run test from base.""" logger = GeneralLogger("test_nested") - self.run_test(logger, "affinity_diversity", "/templates/diversity_between_2_affinity.yml") + self.run_test(logger, "affinity_diversity", + "/templates/diversity_between_2_affinity.yml") diff --git a/valet/tests/tempest/scenario/valet_group.py b/valet/tests/tempest/scenario/valet_group.py index a3127e2..271e0d8 100644 --- a/valet/tests/tempest/scenario/valet_group.py +++ b/valet/tests/tempest/scenario/valet_group.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Valet Group.""" + import json import requests import traceback @@ -22,61 +24,73 @@ from valet.tests.functional.valet_validator.common.init import CONF class ValetGroup(object): + """Class to represent valet group for tempest tests.""" def __init__(self): + """Init valet group with url and headers.""" self.groups_url = "%s/groups" % CONF.valet.HOST self.headers = {"X-Auth-Token": Auth.get_auth_token(), "Content-Type": "application/json"} def create_group(self, group_name, group_type): + """Create valet group with name and type.""" grp_data = {"name": group_name, "type": group_type} - return requests.post(self.groups_url, data=json.dumps(grp_data), headers=self.headers) + return requests.post(self.groups_url, data=json.dumps(grp_data), + headers=self.headers) def get_list_groups(self): + """Get list of groups.""" list_response = requests.get(self.groups_url, headers=self.headers) return list_response.json()["groups"] def get_group_details(self, group_id): + """Get url for group with id and return headers (details).""" url = self.groups_url + "/" + group_id return requests.get(url, headers=self.headers) def update_group_members(self, group_id, members=None): + """Update group members from group with id.""" add_member_url = self.groups_url + "/%s/members" % group_id data = json.dumps({"members": [members or Auth.get_project_id()]}) return requests.put(add_member_url, data=data, headers=self.headers) def update_group(self, group_id, new_description): + """Update group_id with new description.""" url = self.groups_url + "/" + group_id new_data = json.dumps({"description": new_description}) return requests.put(url, new_data, headers=self.headers) def delete_group_member(self, group_id, member_id): + """Delete member with id from group (group_id).""" url = self.groups_url + "/%s/members/%s" % (group_id, member_id) return requests.delete(url, headers=self.headers) def delete_all_group_member(self, group_id): + """Delete all group members belonging to group with group_id.""" url = self.groups_url + "/%s/members" % group_id return requests.delete(url, headers=self.headers) def delete_group(self, group_id): + """Delete group with matching id.""" url = self.groups_url + "/%s" % group_id return requests.delete(url, headers=self.headers) def get_group_id_and_members(self, group_name, group_type="exclusivity"): - ''' Checks if group name exists, if not - creates it + """Check if group name exists, if not - creates it. returns group's id and members list - ''' + """ group_details = self.check_group_exists(group_name) try: if group_details is None: GeneralLogger.log_info("Creating group") create_response = self.create_group(group_name, group_type) - return create_response.json()["id"], create_response.json()["members"] + return create_response.json()["id"], \ + create_response.json()["members"] else: GeneralLogger.log_info("Group exists") @@ -85,17 +99,18 @@ class ValetGroup(object): GeneralLogger.log_error(traceback.format_exc()) def add_group_member(self, group_details): - ''' Checks if member exists in group, if not - adds it ''' + """Check if member exists in group, if not - adds it.""" # group_details - group id, group members try: if Auth.get_project_id() not in group_details[1]: GeneralLogger.log_info("Adding member to group") self.update_group_members(group_details[0]) except Exception: - GeneralLogger.log_error("Failed to add group member", traceback.format_exc()) + GeneralLogger.log_error("Failed to add group member", + traceback.format_exc()) def check_group_exists(self, group_name): - ''' Checks if group exists in group list, if not returns None ''' + """Check if group exists in group list, if not returns None.""" for grp in self.get_list_groups(): if grp["name"] == group_name: return grp["id"], grp["members"] @@ -103,9 +118,13 @@ class ValetGroup(object): return None def delete_all_groups(self): + """Delete all groups, return 204 if successful.""" DELETED = 204 for group in self.get_list_groups(): - codes = [self.delete_all_group_member(group["id"]).status_code, self.delete_group(group["id"]).status_code] + codes = [ + self.delete_all_group_member(group["id"]).status_code, + self.delete_group(group["id"]).status_code + ] res = filter(lambda a: a != DELETED, codes) if res: diff --git a/valet/tests/tempest/services/client.py b/valet/tests/tempest/services/client.py index 63b610d..128e6af 100644 --- a/valet/tests/tempest/services/client.py +++ b/valet/tests/tempest/services/client.py @@ -1,26 +1,26 @@ -#!/usr/bin/env python # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Client.""" + import json from tempest_lib.common import rest_client class ValetClient(rest_client.RestClient): - """Tempest REST client for Valet. Implements @@ -35,11 +35,13 @@ class ValetClient(rest_client.RestClient): return rest_client.ResponseBody(resp, body) def list_groups(self): + """List all groups.""" resp, body = self.get('/groups') self.expected_success(200, resp.status) return self._resp_helper(resp, body) def create_group(self, name, group_type, description): + """Create group with name, type and description.""" params = { "name": name, "type": group_type, @@ -51,11 +53,13 @@ class ValetClient(rest_client.RestClient): return self._resp_helper(resp, body) def delete_group(self, group_id): + """Delete group with id.""" resp, body = self.delete('/groups/%s' % str(group_id)) self.expected_success(204, resp.status) return self._resp_helper(resp, body) def update_group(self, group_id, description): + """Update group description param for group with matching id.""" params = { 'description': description } @@ -65,11 +69,13 @@ class ValetClient(rest_client.RestClient): return self._resp_helper(resp, body) def show_group(self, group_id): + """Show group corresponding to passed in id.""" resp, body = self.get('/groups/%s' % group_id) self.expected_success(200, resp.status) return self._resp_helper(resp, body) def add_members(self, group_id, members): + """Add members to corresponding group (group_id).""" params = { "members": members } @@ -79,23 +85,27 @@ class ValetClient(rest_client.RestClient): return self._resp_helper(resp, body) def verify_membership(self, group_id, member_id): + """Verify member (member_id) is part of group (group_id).""" resp, body = self.get('/groups/%s/members/%s' % (str(group_id), str(member_id))) self.expected_success(204, resp.status) return self._resp_helper(resp, body) def delete_member(self, group_id, member_id): + """Delete single member (member_id) of a group (group_id).""" resp, body = self.delete('/groups/%s/members/%s' % (str(group_id), str(member_id))) self.expected_success(204, resp.status) return self._resp_helper(resp, body) def delete_all_members(self, group_id): + """Delete all members of group.""" resp, body = self.delete('/groups/%s/members' % (str(group_id))) self.expected_success(204, resp.status) return self._resp_helper(resp, body) def create_plan(self, plan_name, resources, stack_id): + """Create plan with name, resources and stack id.""" params = { "plan_name": plan_name, "stack_id": stack_id, @@ -107,6 +117,7 @@ class ValetClient(rest_client.RestClient): return self._resp_helper(resp, body) def update_plan(self, plan_id, action, excluded_hosts, resources): + """Update action, excluded hosts and resources of plan with id.""" params = { "action": action, "excluded_hosts": excluded_hosts, @@ -118,6 +129,7 @@ class ValetClient(rest_client.RestClient): return self._resp_helper(resp, body) def delete_plan(self, plan_id): + """Delete plan with matching id.""" resp, body = self.delete('/plans/%s' % (str(plan_id))) self.expected_success(204, resp.status) return self._resp_helper(resp, body) diff --git a/valet/tests/unit/api/common/test_hooks.py b/valet/tests/unit/api/common/test_hooks.py index 964b64c..6bb35bc 100644 --- a/valet/tests/unit/api/common/test_hooks.py +++ b/valet/tests/unit/api/common/test_hooks.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Hooks.""" + import mock import valet.api.common.hooks as hooks from valet.api.common.hooks import MessageNotificationHook @@ -20,8 +22,10 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestHooks(ApiBase): + """Test Hooks Class to test api hooks.""" def setUp(self): + """Setup Test Hooks.""" super(TestHooks, self).setUp() self.message_notification_hook = MessageNotificationHook() @@ -30,27 +34,31 @@ class TestHooks(ApiBase): @mock.patch.object(hooks, 'conf') @mock.patch.object(hooks, 'webob') def test_after_ok(self, mock_bob, mock_conf, mock_threading): + """Test message notification hook after with an ok.""" mock_bob.exc.status_map = {"test_status_code": State} mock_bob.exc.HTTPOk = State mock_conf.messaging.notifier.return_value = "notifier" mock_conf.messaging.timeout = 1 self.message_notification_hook.after(State) - # print (dir(mock_conf)) - # self.validate_test(mock_conf.messaging.notifier.info.called) self.validate_test(mock_threading.Thread.called) - mock_threading.Thread.assert_called_once_with(target=mock_conf.messaging.notifier.info, args=( - {}, - 'api', {'response': {'body': State.response.body, 'status_code': State.response.status_code}, - 'context': State.request.context, - 'request': {'path': 'test_path', 'method': 'test_method', 'body': None}} - ), ) + mock_threading.Thread.assert_called_once_with( + target=mock_conf.messaging.notifier.info, args=( + {}, + 'api', {'response': {'body': State.response.body, + 'status_code': State.response.status_code}, + 'context': State.request.context, + 'request': {'path': 'test_path', + 'method': 'test_method', + 'body': None}} + ), ) @mock.patch.object(hooks, 'threading') @mock.patch.object(hooks, 'conf') @mock.patch.object(hooks, 'webob') def test_after_with_error(self, mock_bob, mock_conf, mock_threading): + """Test message notification hook after with an error.""" mock_bob.exc.status_map = {"test_status_code": State} mock_conf.messaging.notifier.return_value = "notifier" mock_conf.messaging.timeout = 1 @@ -58,23 +66,32 @@ class TestHooks(ApiBase): mock_bob.exc.HTTPOk = ApiBase self.message_notification_hook.after(State) - # self.validate_test(mock_conf.messaging.notifier.error.called) self.validate_test(mock_threading.Thread.called) - mock_threading.Thread.assert_called_once_with(target=mock_conf.messaging.notifier.error, args=( - {}, - 'api', {'response': {'body': State.response.body, 'status_code': State.response.status_code}, - 'context': State.request.context, - 'request': {'path': 'test_path', 'method': 'test_method', 'body': None}} - ), ) + mock_threading.Thread.assert_called_once_with( + target=mock_conf.messaging.notifier.error, args=( + {}, + 'api', {'response': {'body': State.response.body, + 'status_code': State.response.status_code}, + 'context': State.request.context, + 'request': {'path': 'test_path', + 'method': 'test_method', + 'body': None}} + ), ) class State(object): + """State Class.""" + class response(object): + """Response Class.""" + status_code = "test_status_code" body = "test_body" class request(object): + """Request Class.""" + path = "test_path" method = "test_method" body = "test_req_body" diff --git a/valet/tests/unit/api/common/test_identity.py b/valet/tests/unit/api/common/test_identity.py index 07982b2..0b618aa 100644 --- a/valet/tests/unit/api/common/test_identity.py +++ b/valet/tests/unit/api/common/test_identity.py @@ -1,62 +1,92 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Identity.""" + import mock from valet.api.common.identity import Identity from valet.tests.unit.api.v1.api_base import ApiBase class TestIdentity(ApiBase): + """Test Identity Class.""" def setUp(self): + """Setup Test Identity.""" super(TestIdentity, self).setUp() - kwargs = {'username': 'admin', 'tenant_name': 'demo', 'password': 'qwer4321', 'auth_url': 'http://controller:5000/v2.0'} + kwargs = {'username': 'admin', + 'tenant_name': 'demo', + 'password': 'qwer4321', + 'auth_url': 'http://controller:5000/v2.0'} self.identity = Identity(**kwargs) def test_is_token_admin(self): + """Test if passed in token is an Admin.""" self.validate_test(self.identity.is_token_admin(TokenT)) self.validate_test(not self.identity.is_token_admin(TokenF)) def test_tenant_from_token(self): - self.validate_test(self.identity.tenant_from_token(TokenT) == "cb9c9997fc6e41cc87186de92aa0a099") + """Test tenant from ID in Token.""" + self.validate_test( + self.identity.tenant_from_token(TokenT) == + "cb9c9997fc6e41cc87186de92aa0a099") def test_user_from_token(self): - self.validate_test(self.identity.user_from_token(TokenT) == "cb9c9997fc6e41cc87186de92aa0a099") + """Test identity from user ID from token.""" + self.validate_test( + self.identity.user_from_token(TokenT) == + "cb9c9997fc6e41cc87186de92aa0a099") def test_client(self): + """Test Client.""" with mock.patch('valet.api.common.identity.client'): self.identity.client() def test_validate_token(self): + """Validate identity auth token.""" self.validate_test(self.identity.validate_token("auth_token") is None) with mock.patch('valet.api.common.identity.client'): - self.validate_test(self.identity.validate_token("auth_token") is not None) + self.validate_test( + self.identity.validate_token("auth_token") is not None) def test_is_tenant_list_validself(self): + """Test to see if tenant list is valid.""" with mock.patch('valet.api.common.identity.client'): - self.validate_test(self.identity.is_tenant_list_valid(["a", "b"]) is False) + self.validate_test( + self.identity.is_tenant_list_valid(["a", "b"]) is False) class TokenT(object): - user = {'roles': [{'name': 'user'}, {'name': 'heat_stack_owner'}, {'name': 'admin'}], 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} - tenant = {'description': 'Demo Project', 'enabled': True, 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} + """Token for User and Tenant with user roles filled.""" + + user = {'roles': [{'name': 'user'}, + {'name': 'heat_stack_owner'}, + {'name': 'admin'}], + 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} + tenant = {'description': 'Demo Project', + 'enabled': True, + 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} class TokenF(object): + """Token for User and Tenant with empty user role list.""" + user = {'roles': []} - tenant = {'description': 'Demo Project', 'enabled': True, 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} + tenant = {'description': 'Demo Project', + 'enabled': True, + 'id': 'cb9c9997fc6e41cc87186de92aa0a099'} diff --git a/valet/tests/unit/api/common/test_messaging.py b/valet/tests/unit/api/common/test_messaging.py index 675cdeb..aa9d99b 100644 --- a/valet/tests/unit/api/common/test_messaging.py +++ b/valet/tests/unit/api/common/test_messaging.py @@ -1,32 +1,37 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Messaging.""" + import mock import valet.api.common.messaging as messaging from valet.tests.unit.api.v1.api_base import ApiBase class TestMessaging(ApiBase): + """Test Messaging Class for messaging api.""" def setUp(self): + """Setup test messaging.""" super(TestMessaging, self).setUp() @mock.patch.object(messaging, 'cfg') @mock.patch.object(messaging, 'conf') @mock.patch.object(messaging, 'messaging') def test_messaging(self, mock_msg, mock_conf, mock_cfg): + """Validate messaging in api.""" mock_conf.messaging.config = {"transport_url": "test_transport_url"} mock_msg.get_transport.return_value = "get_transport_method" mock_msg.Notifier.return_value = "Notifier" diff --git a/valet/tests/unit/api/common/test_ostro_helper.py b/valet/tests/unit/api/common/test_ostro_helper.py index 2ab6bc4..42ef79d 100644 --- a/valet/tests/unit/api/common/test_ostro_helper.py +++ b/valet/tests/unit/api/common/test_ostro_helper.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Ostro Helper.""" + import mock import valet.api.common.ostro_helper as helper from valet.api.common.ostro_helper import Ostro @@ -21,14 +23,17 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestOstroHelper(ApiBase): + """Test Ostro (Engine) Helper Class.""" def setUp(self): + """Setup Test Ostro and call init Ostro.""" super(TestOstroHelper, self).setUp() self.ostro = self.init_Ostro() @mock.patch.object(helper, 'conf') def init_Ostro(self, mock_conf): + """Init Engine(Ostro) and return.""" mock_conf.ostro = {} mock_conf.ostro["tries"] = 10 mock_conf.ostro["interval"] = 1 @@ -36,48 +41,61 @@ class TestOstroHelper(ApiBase): return Ostro() def test_build_request(self): + """Test Build Request in Engine API using many different kwargs.""" kwargs = {'tenant_id': 'test_tenant_id', 'args': {'stack_id': 'test_stack_id', 'plan_name': 'test_plan_name', - 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', - 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], - 'group_type': 'affinity', - 'level': 'host'}, - 'name': 'test-affinity-group3'}}}} + 'resources': {'test_resource': { + 'Type': 'ATT::Valet::GroupAssignment', + 'Properties': { + 'resources': ['my-instance-1', + 'my-instance-2'], + 'group_type': 'affinity', + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} self.validate_test(self.ostro.build_request(**kwargs)) kwargs = {'tenant_id': 'test_tenant_id', 'args': {'stack_id': 'test_stack_id', 'plan_name': 'test_plan_name', - 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', - 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], - 'group_type': 'affinity', - 'group_name': "test_group_name", - 'level': 'host'}, - 'name': 'test-affinity-group3'}}}} + 'resources': {'test_resource': { + 'Type': 'ATT::Valet::GroupAssignment', + 'Properties': { + 'resources': ['my-instance-1', + 'my-instance-2'], + 'group_type': 'affinity', + 'group_name': "test_group_name", + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} self.validate_test(not self.ostro.build_request(**kwargs)) self.validate_test("conflict" in self.ostro.error_uri) kwargs = {'tenant_id': 'test_tenant_id', 'args': {'stack_id': 'test_stack_id', 'plan_name': 'test_plan_name', - 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', - 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], - 'group_type': 'exclusivity', - 'level': 'host'}, - 'name': 'test-affinity-group3'}}}} + 'resources': {'test_resource': { + 'Type': 'ATT::Valet::GroupAssignment', + 'Properties': { + 'resources': ['my-instance-1', + 'my-instance-2'], + 'group_type': 'exclusivity', + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} self.validate_test(not self.ostro.build_request(**kwargs)) self.validate_test("invalid" in self.ostro.error_uri) kwargs = {'tenant_id': 'test_tenant_id', 'args': {'stack_id': 'test_stack_id', 'plan_name': 'test_plan_name', - 'resources': {'test_resource': {'Type': 'ATT::Valet::GroupAssignment', - 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], - 'group_type': 'exclusivity', - 'group_name': "test_group_name", - 'level': 'host'}, - 'name': 'test-affinity-group3'}}}} + 'resources': {'test_resource': { + 'Type': 'ATT::Valet::GroupAssignment', + 'Properties': { + 'resources': ['my-instance-1', + 'my-instance-2'], + 'group_type': 'exclusivity', + 'group_name': "test_group_name", + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} self.validate_test(not self.ostro.build_request(**kwargs)) self.validate_test("not_found" in self.ostro.error_uri) @@ -85,30 +103,38 @@ class TestOstroHelper(ApiBase): 'args': {'stack_id': 'test_stack_id', 'plan_name': 'test_plan_name', 'timeout': '60 sec', - 'resources': {'ca039d18-1976-4e13-b083-edb12b806e25': {'Type': 'ATT::Valet::GroupAssignment', - 'Properties': {'resources': ['my-instance-1', 'my-instance-2'], - 'group_type': 'non_type', - 'group_name': "test_group_name", - 'level': 'host'}, - 'name': 'test-affinity-group3'}}}} + 'resources': { + 'ca039d18-1976-4e13-b083-edb12b806e25': { + 'Type': 'ATT::Valet::GroupAssignment', + 'Properties': { + 'resources': ['my-instance-1', + 'my-instance-2'], + 'group_type': 'non_type', + 'group_name': "test_group_name", + 'level': 'host'}, + 'name': 'test-affinity-group3'}}}} self.validate_test(not self.ostro.build_request(**kwargs)) self.validate_test("invalid" in self.ostro.error_uri) @mock.patch.object(helper, 'uuid') def test_ping(self, mock_uuid): + """Validate engine ping by checking engine request equality.""" mock_uuid.uuid4.return_value = "test_stack_id" self.ostro.ping() self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") def test_is_request_serviceable(self): - self.ostro.request = {'resources': {"bla": {'type': "OS::Nova::Server"}}} + """Validate if engine request serviceable.""" + self.ostro.request = { + 'resources': {"bla": {'type': "OS::Nova::Server"}}} self.validate_test(self.ostro.is_request_serviceable()) self.ostro.request = {} self.validate_test(not self.ostro.is_request_serviceable()) def test_replan(self): + """Validate engine replan.""" kwargs = {'args': {'stack_id': 'test_stack_id', 'locations': 'test_locations', 'orchestration_id': 'test_orchestration_id', @@ -117,21 +143,31 @@ class TestOstroHelper(ApiBase): self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") self.validate_test(self.ostro.request['locations'] == "test_locations") - self.validate_test(self.ostro.request['orchestration_id'] == "test_orchestration_id") - self.validate_test(self.ostro.request['exclusions'] == "test_exclusions") + + self.validate_test( + self.ostro.request['orchestration_id'] == "test_orchestration_id") + + self.validate_test( + self.ostro.request['exclusions'] == "test_exclusions") def test_migrate(self): + """Validate engine migrate.""" kwargs = {'args': {'stack_id': 'test_stack_id', 'excluded_hosts': 'test_excluded_hosts', 'orchestration_id': 'test_orchestration_id'}} self.ostro.migrate(**kwargs) self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") - self.validate_test(self.ostro.request['excluded_hosts'] == "test_excluded_hosts") - self.validate_test(self.ostro.request['orchestration_id'] == "test_orchestration_id") + + self.validate_test( + self.ostro.request['excluded_hosts'] == "test_excluded_hosts") + + self.validate_test( + self.ostro.request['orchestration_id'] == "test_orchestration_id") @mock.patch.object(helper, 'uuid') def test_query(self, mock_uuid): + """Validate test query by validating several engine requests.""" mock_uuid.uuid4.return_value = "test_stack_id" kwargs = {'args': {'type': 'test_type', 'parameters': 'test_parameters'}} @@ -139,11 +175,14 @@ class TestOstroHelper(ApiBase): self.validate_test(self.ostro.request['stack_id'] == "test_stack_id") self.validate_test(self.ostro.request['type'] == "test_type") - self.validate_test(self.ostro.request['parameters'] == "test_parameters") + + self.validate_test( + self.ostro.request['parameters'] == "test_parameters") @mock.patch.object(models, 'PlacementRequest', mock.MagicMock) @mock.patch.object(models, 'Query', mock.MagicMock) def test_send(self): + """Validate test send by checking engine server error.""" self.ostro.args = {'stack_id': 'test_stack_id'} self.ostro.send() self.validate_test("server_error" in self.ostro.error_uri) diff --git a/valet/tests/unit/api/db/test_groups.py b/valet/tests/unit/api/db/test_groups.py index 4ed94ff..6a36142 100644 --- a/valet/tests/unit/api/db/test_groups.py +++ b/valet/tests/unit/api/db/test_groups.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Groups.""" + import mock from valet.api.db.models.music import Base from valet.api.db.models.music.groups import Group @@ -20,23 +22,27 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestGroups(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.placements.""" def setUp(self): + """Setup Test Groups by calling super setup and init.""" super(TestGroups, self).setUp() self.group = self.init_group() @mock.patch.object(Base, 'insert') def init_group(self, mock_insert): + """Init a test group object and return.""" mock_insert.return_value = None members = ["me", "you"] return Group("test_name", "test_description", "test_type", members) def test__repr__(self): + """Validate test name in group repr.""" self.validate_test("test_name" in self.group.__repr__()) def test__json__(self): + """Test json of a group object.""" json = self.group.__json__() self.validate_test(json["name"] == "test_name") @@ -44,12 +50,15 @@ class TestGroups(ApiBase): self.validate_test(json["description"] == "test_description") def test_pk_name(self): + """Test pk name of a group object.""" self.validate_test(self.group.pk_name() == "id") def test_pk_value(self): + """Test pk value of a group object.""" self.validate_test(self.group.pk_value() is None) def test_values(self): + """Test values (name, type, description) of a group object.""" val = self.group.values() self.validate_test(val["name"] == "test_name") diff --git a/valet/tests/unit/api/db/test_ostro.py b/valet/tests/unit/api/db/test_ostro.py index 3a55a27..43b321a 100644 --- a/valet/tests/unit/api/db/test_ostro.py +++ b/valet/tests/unit/api/db/test_ostro.py @@ -1,26 +1,29 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Ostro(Engine).""" + from valet.api.db.models.music.ostro import PlacementRequest, PlacementResult, Event from valet.tests.unit.api.v1.api_base import ApiBase class TestOstro(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.placements.""" def setUp(self): + """Setup Test Ostro and call class inits (event and placements).""" super(TestOstro, self).setUp() self.placement_request = self.init_PlacementRequest() @@ -30,15 +33,19 @@ class TestOstro(ApiBase): self.event = self.init_Event() def init_PlacementRequest(self): + """Return init test placement request.""" return PlacementRequest("test_request", "test_stack_id", False) def init_PlacementResult(self): + """Return init test placement result.""" return PlacementResult("test_placement", "test_stack_id", False) def init_Event(self): + """Return init test event.""" return Event("test_event", "test_event_id", False) def test__repr__(self): + """Test test id in placement request/result and event.""" self.validate_test("test_stack_id" in self.placement_request.__repr__()) self.validate_test("test_stack_id" in self.placement_result.__repr__()) @@ -46,6 +53,7 @@ class TestOstro(ApiBase): self.validate_test("test_event_id" in self.event.__repr__()) def test__json__(self): + """Test json return value for placement request, result and event.""" request_json = self.placement_request.__json__() self.validate_test(request_json["request"] == "test_request") @@ -62,6 +70,7 @@ class TestOstro(ApiBase): self.validate_test(event_json["event"] == "test_event") def test_pk_name(self): + """Test placement request, result and event's pk name.""" self.validate_test(self.placement_request.pk_name() == "stack_id") self.validate_test(self.placement_result.pk_name() == "stack_id") @@ -69,6 +78,7 @@ class TestOstro(ApiBase): self.validate_test(self.event.pk_name() == "event_id") def test_pk_value(self): + """Test placement request, result and events' pk values.""" self.validate_test(self.placement_request.pk_value() == "test_stack_id") self.validate_test(self.placement_result.pk_value() == "test_stack_id") @@ -76,6 +86,7 @@ class TestOstro(ApiBase): self.validate_test(self.event.pk_value() == "test_event_id") def test_values(self): + """Test request, result and event values.""" request_val = self.placement_request.values() self.validate_test(request_val["request"] == "test_request") @@ -92,6 +103,7 @@ class TestOstro(ApiBase): self.validate_test(event_val["event_id"] == "test_event_id") def test_schema(self): + """Validate request, result and event schemas.""" request_schema = self.placement_request.schema() self.validate_test(request_schema["request"] == "text") diff --git a/valet/tests/unit/api/db/test_placements.py b/valet/tests/unit/api/db/test_placements.py index 15c6bad..d401774 100644 --- a/valet/tests/unit/api/db/test_placements.py +++ b/valet/tests/unit/api/db/test_placements.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Placements.""" + import mock from valet.api.db.models.music import Base from valet.api.db.models import Placement, Plan @@ -20,22 +22,30 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestPlacement(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.placements.""" def setUp(self): + """Setup test placements and call init placement.""" super(TestPlacement, self).setUp() self.placement = self.init_Placement() @mock.patch.object(Base, 'insert') def init_Placement(self, mock_insert): + """Return init test placement object for class init.""" mock_insert.return_value = None - return Placement("test_name", "test_orchestration_id", plan=Plan("plan_name", "stack_id", _insert=False), location="test_location", _insert=False) + return Placement("test_name", + "test_orchestration_id", + plan=Plan("plan_name", "stack_id", _insert=False), + location="test_location", + _insert=False) def test__repr__(self): + """Test name from placement repr.""" self.validate_test("test_name" in self.placement.__repr__()) def test__json__(self): + """Test json return value of placement object.""" json = self.placement.__json__() self.validate_test(json["name"] == "test_name") @@ -43,12 +53,15 @@ class TestPlacement(ApiBase): self.validate_test(json["orchestration_id"] == "test_orchestration_id") def test_pk_name(self): + """Test placement pk name is id.""" self.validate_test(self.placement.pk_name() == "id") def test_pk_value(self): + """Test placement pk value is none.""" self.validate_test(self.placement.pk_value() is None) def test_values(self): + """Test placement values (name, location, orchestration id).""" val = self.placement.values() self.validate_test(val["name"] == "test_name") diff --git a/valet/tests/unit/api/db/test_plans.py b/valet/tests/unit/api/db/test_plans.py index 8443e03..6e5fd40 100644 --- a/valet/tests/unit/api/db/test_plans.py +++ b/valet/tests/unit/api/db/test_plans.py @@ -1,49 +1,58 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Plans.""" + from valet.api.db.models import Plan from valet.tests.unit.api.v1.api_base import ApiBase class TestPlans(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.placements.""" def setUp(self): + """Setup Test Plans and call init plan.""" super(TestPlans, self).setUp() self.plan = self.init_Plan() def init_Plan(self): + """Return init plan test object.""" return Plan("test_name", "test_stack_id", _insert=False) def test__repr__(self): + """Validate test name in plan repr.""" self.validate_test("test_name" in self.plan.__repr__()) def test__json__(self): + """Validate jason return for plan object.""" json = self.plan.__json__() self.validate_test(json["name"] == "test_name") self.validate_test(json["stack_id"] == "test_stack_id") def test_pk_name(self): + """Validate plan pk name is id.""" self.validate_test(self.plan.pk_name() == "id") def test_pk_value(self): + """Validate plan pk value is none.""" self.validate_test(self.plan.pk_value() is None) def test_values(self): + """Validate plan values for name and stack_id.""" val = self.plan.values() self.validate_test(val["name"] == "test_name") diff --git a/valet/tests/unit/api/v1/api_base.py b/valet/tests/unit/api/v1/api_base.py index 13b88dc..3c4f7ff 100644 --- a/valet/tests/unit/api/v1/api_base.py +++ b/valet/tests/unit/api/v1/api_base.py @@ -1,26 +1,30 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Api Base.""" + import mock import pecan from valet.tests.base import Base class ApiBase(Base): + """Api Base Test Class, calls valet tests base.""" def setUp(self): + """Setup api base and mock pecan identity/music/state.""" super(ApiBase, self).setUp() pecan.conf.identity = mock.MagicMock() pecan.conf.music = mock.MagicMock() @@ -29,4 +33,5 @@ class ApiBase(Base): @classmethod def mock_error(cls, url, msg=None, **kwargs): + """Mock error and set response to msg.""" cls.response = msg diff --git a/valet/tests/unit/api/v1/test_groups.py b/valet/tests/unit/api/v1/test_groups.py index 3108572..dfa0dce 100644 --- a/valet/tests/unit/api/v1/test_groups.py +++ b/valet/tests/unit/api/v1/test_groups.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Groups.""" + import mock import pecan from valet.api.db.models.music.groups import Group @@ -23,14 +25,13 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestGroups(ApiBase): - ''' Unit tests for valet.api.v1.controllers.groups ''' + """Unit tests for valet.api.v1.controllers.groups.""" def setUp(self): + """Setup TestGroups by initializing Groups and Members controllers.""" super(TestGroups, self).setUp() self.tenant_id = "testprojectid" - # core.state = mock.MagicMock() - # Testing class GroupsController self.groups_controller = GroupsController() @@ -46,8 +47,12 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'request') @mock.patch.object(Query, 'filter_by') def init_GroupsItemController(self, mock_filter, mock_request): + """Called by Setup, return GroupsItemController object with id.""" mock_request.context = {} - mock_filter.return_value = Results([Group("test_name", "test_description", "test_type", None)]) + mock_filter.return_value = Results([Group("test_name", + "test_description", + "test_type", + None)]) contrler = GroupsItemController("group_id") self.validate_test("test_name" == groups.request.context['group'].name) @@ -55,13 +60,18 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'error', ApiBase.mock_error) def test_init_GroupsItemController_unhappy(self): + """Test GroupsItemController when 'Group not found'.""" GroupsItemController("group_id") self.validate_test("Group not found" in TestGroups.response) @mock.patch.object(groups, 'error', ApiBase.mock_error) @mock.patch.object(groups, 'request') def init_MembersItemController(self, mock_request): - grp = Group("test_member_item_name", "test_description", "test_type", None) + """Called by Setup, return MembersItemController with demo members.""" + grp = Group("test_member_item_name", + "test_description", + "test_type", + None) grp.members = ["demo members"] mock_request.context = {'group': grp} @@ -69,42 +79,54 @@ class TestGroups(ApiBase): self.validate_test("Member not found in group" in TestGroups.response) contrler = MembersItemController("demo members") - self.validate_test("test_member_item_name" == groups.request.context['group'].name) + self.validate_test( + "test_member_item_name" == groups.request.context['group'].name) return contrler def test_allow(self): + """Test group and member controller allow method.""" self.validate_test(self.groups_controller.allow() == 'GET,POST') self.validate_test(self.members_controller.allow() == 'PUT,DELETE') - self.validate_test(self.groups_item_controller.allow() == "GET,PUT,DELETE") + self.validate_test( + self.groups_item_controller.allow() == "GET,PUT,DELETE") self.validate_test(self.members_item_controller.allow() == "GET,DELETE") @mock.patch.object(groups, 'error', ApiBase.mock_error) @mock.patch.object(groups, 'request') def test_index(self, mock_request): + """Test controller index method with requests HEAD,GET,POST,PUT.""" mock_request.method = "HEAD" self.groups_controller.index() - self.validate_test("The HEAD method is not allowed" in TestGroups.response) + self.validate_test("The HEAD method is not allowed" in + TestGroups.response) mock_request.method = "GET" self.members_controller.index() - self.validate_test("The GET method is not allowed" in TestGroups.response) + self.validate_test("The GET method is not allowed" in + TestGroups.response) mock_request.method = "POST" self.groups_item_controller.index() - self.validate_test("The POST method is not allowed" in TestGroups.response) + self.validate_test("The POST method is not allowed" in + TestGroups.response) mock_request.method = "PUT" self.members_item_controller.index() - self.validate_test("The PUT method is not allowed" in TestGroups.response) + self.validate_test("The PUT method is not allowed" in + TestGroups.response) @mock.patch.object(groups, 'request') def index_put(self, mock_request): + """Test members_controller index_put method, check status/tenant_id.""" pecan.conf.identity.engine.is_tenant_list_valid.return_value = True - mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + mock_request.context = {'group': Group("test_name", + "test_description", + "test_type", + None)} r = self.members_controller.index_put(members=[self.tenant_id]) self.validate_test(groups.response.status == 201) @@ -115,16 +137,22 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'error', ApiBase.mock_error) @mock.patch.object(groups, 'request') def test_index_put_unhappy(self, mock_request): + """Test members_controller index_put method with invalid tenants.""" pecan.conf.identity.engine.is_tenant_list_valid.return_value = False - mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + mock_request.context = {'group': Group("test_name", + "test_description", + "test_type", + None)} self.members_controller.index_put(members=[self.tenant_id]) - self.validate_test("Member list contains invalid tenant IDs" in TestGroups.response) + self.validate_test("Member list contains invalid tenant IDs" in + TestGroups.response) @mock.patch.object(groups, 'tenant_servers_in_group') @mock.patch.object(groups, 'request') def test_index_put_delete(self, mock_request, mock_func): + """Test members_controller index_delete method.""" grp_with_member = self.index_put() mock_request.context = {'group': grp_with_member} @@ -137,6 +165,7 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'tenant_servers_in_group') @mock.patch.object(groups, 'request') def test_index_delete_member_item_controller(self, mock_request, mock_func): + """Members_item_controller index_delete, check status and members.""" grp = Group("test_name", "test_description", "test_type", None) grp.members = ["demo members"] @@ -151,7 +180,10 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'error', ApiBase.mock_error) @mock.patch.object(groups, 'tenant_servers_in_group') @mock.patch.object(groups, 'request') - def test_index_delete_member_item_controller_unhappy(self, mock_request, mock_func): + def test_index_delete_member_item_controller_unhappy(self, + mock_request, + mock_func): + """Members_item_controller index_delete, check member not found.""" grp = Group("test_name", "test_description", "test_type", None) grp.members = ["demo members"] @@ -166,6 +198,7 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'tenant_servers_in_group') @mock.patch.object(groups, 'request') def test_index_delete_unhappy(self, mock_request, mock_func): + """Members_controller index_delete, check TestGroups response.""" grp_with_member = self.index_put() mock_request.context = {'group': grp_with_member} @@ -176,13 +209,20 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'request') def test_index_put_groups_item_controller(self, mock_request): - mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + """Test index_put for item_controller, check status and description.""" + mock_request.context = {'group': Group("test_name", + "test_description", + "test_type", + None)} r = self.groups_item_controller.index_put(description="new description") self.validate_test(groups.response.status == 201) self.validate_test(r.description == "new description") - mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + mock_request.context = {'group': Group("test_name", + "test_description", + "test_type", + None)} r = self.groups_item_controller.index_put() self.validate_test(groups.response.status == 201) @@ -190,7 +230,11 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'request') def test_index_delete_groups_item_controller(self, mock_request): - mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + """Test groups_item_controller index_delete works, check response.""" + mock_request.context = {'group': Group("test_name", + "test_description", + "test_type", + None)} self.groups_item_controller.index_delete() self.validate_test(groups.response.status == 204) @@ -198,22 +242,28 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'error', ApiBase.mock_error) @mock.patch.object(groups, 'request') def test_index_delete_groups_item_controller_unhappy(self, mock_request): + """Test to check that you can't delete a group with members.""" grp = Group("test_name", "test_description", "test_type", None) grp.members = ["demo members"] mock_request.context = {'group': grp} self.groups_item_controller.index_delete() self.validate_test(groups.response.status == 204) - self.validate_test("Unable to delete a Group with members." in TestGroups.response) + self.validate_test("Unable to delete a Group with members." in + TestGroups.response) @mock.patch.object(groups, 'request') @mock.patch.object(Query, 'all') def test_index_get(self, mock_all, mock_request): + """Groups_controller index_get method, check response to verify.""" all_groups = ["group1", "group2", "group3"] mock_all.return_value = all_groups response = self.groups_controller.index_get() - mock_request.context = {'group': Group("test_name", "test_description", "test_type", None)} + mock_request.context = {'group': Group("test_name", + "test_description", + "test_type", + None)} item_controller_response = self.groups_item_controller.index_get() self.members_item_controller.index_get() @@ -225,19 +275,26 @@ class TestGroups(ApiBase): self.validate_test(all_groups == response["groups"]) def test_index_post(self): - group = self.groups_controller.index_post(name="testgroup", description="test description", type="testtype") + """Test group_controller index_post, check status and name.""" + group = self.groups_controller.index_post(name="testgroup", + description="test description", + type="testtype") self.validate_test(groups.response.status == 201) self.validate_test(group.name == "testgroup") @mock.patch.object(groups, 'error', ApiBase.mock_error) def test_index_post_unhappy(self): + """Test groups_controller index_post with error.""" pecan.conf.music = None - self.groups_controller.index_post(name="testgroup", description="test description", type="testtype") + self.groups_controller.index_post(name="testgroup", + description="test description", + type="testtype") self.validate_test("Unable to create Group" in TestGroups.response) def test_index_options(self): + """Test controller index_options method, check response status.""" self.groups_item_controller.index_options() self.validate_test(groups.response.status == 204) diff --git a/valet/tests/unit/api/v1/test_placements.py b/valet/tests/unit/api/v1/test_placements.py index e6c7e85..22c4fb5 100644 --- a/valet/tests/unit/api/v1/test_placements.py +++ b/valet/tests/unit/api/v1/test_placements.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Placements.""" + import mock import valet.api.v1.controllers.placements as placements from valet.api.v1.controllers.placements import PlacementsController, PlacementsItemController @@ -22,9 +24,10 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestPlacements(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.placements.""" def setUp(self): + """Setup Test Placements, call placements controller/ItemController.""" super(TestPlacements, self).setUp() self.placements_controller = PlacementsController() @@ -34,6 +37,7 @@ class TestPlacements(ApiBase): @mock.patch.object(Query, 'filter_by') @mock.patch.object(placements, 'request') def init_PlacementsItemController(self, mock_request, mock_filter): + """Called by Setup, return PlacementsItemController with uuid4.""" mock_request.context = {} mock_filter.return_value = Results(["", "second"]) try: @@ -43,18 +47,25 @@ class TestPlacements(ApiBase): self.validate_test("Placement not found" in ApiBase.response) mock_filter.return_value = Results([ - Placement("test_name", "test_orchestration_id", plan=Plan("plan_name", "stack_id", _insert=False), location="test_location", _insert=False)]) + Placement("test_name", + "test_orchestration_id", + plan=Plan("plan_name", "stack_id", _insert=False), + location="test_location", + _insert=False)]) return PlacementsItemController("uuid4") def test_allow(self): + """Test placements allow method with GET and GET,POST,DELETE.""" self.validate_test(self.placements_controller.allow() == 'GET') - self.validate_test(self.placements_item_controller.allow() == 'GET,POST,DELETE') + self.validate_test( + self.placements_item_controller.allow() == 'GET,POST,DELETE') @mock.patch.object(placements, 'error', ApiBase.mock_error) @mock.patch.object(placements, 'request') def test_index(self, mock_request): + """Test placements index method with POST and PUT (not allowed).""" mock_request.method = "POST" self.placements_controller.index() self.validate_test("The POST method is not allowed" in ApiBase.response) @@ -64,6 +75,7 @@ class TestPlacements(ApiBase): self.validate_test("The PUT method is not allowed" in ApiBase.response) def test_index_options(self): + """Test placements index_options method.""" self.placements_controller.index_options() self.validate_test(placements.response.status == 204) @@ -72,6 +84,7 @@ class TestPlacements(ApiBase): @mock.patch.object(Query, 'all') def test_index_get(self, mock_all): + """Test index_get method for placements, validate based on response.""" all_groups = ["group1", "group2", "group3"] mock_all.return_value = all_groups response = self.placements_controller.index_get() @@ -83,7 +96,8 @@ class TestPlacements(ApiBase): response = self.placements_item_controller.index_get() self.validate_test("test_name" in response['placement'].name) - self.validate_test("test_orchestration_id" in response['placement'].orchestration_id) + self.validate_test("test_orchestration_id" in + response['placement'].orchestration_id) self.validate_test("plan_name" in response['placement'].plan.name) self.validate_test("stack_id" in response['placement'].plan.stack_id) @@ -91,11 +105,13 @@ class TestPlacements(ApiBase): @mock.patch.object(Query, 'filter_by', mock.MagicMock) @mock.patch.object(placements, 'update_placements') def test_index_post(self, mock_plcment): + """Test index_post for placements, validate from response status.""" kwargs = {'resource_id': "resource_id", 'locations': ["test_location"]} self.placements_item_controller.index_post(**kwargs) self.validate_test(placements.response.status == 201) - with mock.patch('valet.api.v1.controllers.placements.Ostro') as mock_ostro: + with mock.patch('valet.api.v1.controllers.placements.Ostro') \ + as mock_ostro: kwargs = {'resource_id': "resource_id", 'locations': [""]} self.placements_item_controller.index_post(**kwargs) self.validate_test("Ostro error:" in ApiBase.response) @@ -103,12 +119,14 @@ class TestPlacements(ApiBase): mock_plcment.return_value = None status_type = mock.MagicMock() - status_type.response = {"status": {"type": "ok"}, "resources": {"iterkeys": []}} + status_type.response = {"status": {"type": "ok"}, + "resources": {"iterkeys": []}} mock_ostro.return_value = status_type self.placements_item_controller.index_post(**kwargs) self.validate_test(placements.response.status == 201) def test_index_delete(self): + """Test placements_item_controller index_delete method.""" self.placements_item_controller.index_delete() self.validate_test(placements.response.status == 204) diff --git a/valet/tests/unit/api/v1/test_plans.py b/valet/tests/unit/api/v1/test_plans.py index 7bc4471..d03f720 100644 --- a/valet/tests/unit/api/v1/test_plans.py +++ b/valet/tests/unit/api/v1/test_plans.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Plans.""" + import mock import valet.api.v1.controllers.plans as plans from valet.api.v1.controllers.plans import PlansController, PlansItemController @@ -22,9 +24,10 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestPlans(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.plans.""" def setUp(self): + """Setup TestPlans, set PlansController, init PlansItemController.""" super(TestPlans, self).setUp() self.plans_controller = PlansController() @@ -34,6 +37,7 @@ class TestPlans(ApiBase): @mock.patch.object(Query, 'filter_by') @mock.patch.object(plans, 'request') def init_PlansItemController(self, mock_request, mock_filter): + """Called by setup, return PlansItemController with uuid4.""" mock_request.context = {} mock_filter.return_value = Results(["", "second"]) try: @@ -42,18 +46,23 @@ class TestPlans(ApiBase): self.validate_test("'str' object has no attribute 'id'" in e) self.validate_test("Plan not found" in ApiBase.response) - mock_filter.return_value = Results([Plan("test_name", "stack_id", _insert=False)]) + mock_filter.return_value = Results([Plan("test_name", + "stack_id", + _insert=False)]) return PlansItemController("uuid4") def test_allow(self): + """Test plans_controller and plans_item_controller allow methods.""" self.validate_test(self.plans_controller.allow() == 'GET,POST') - self.validate_test(self.plans_item_controller.allow() == 'GET,PUT,DELETE') + self.validate_test( + self.plans_item_controller.allow() == 'GET,PUT,DELETE') @mock.patch.object(plans, 'error', ApiBase.mock_error) @mock.patch.object(plans, 'request') def test_index(self, mock_request): + """Test plans and plans_item_controller index method failure.""" mock_request.method = "PUT" self.plans_controller.index() self.validate_test("The PUT method is not allowed" in ApiBase.response) @@ -63,6 +72,7 @@ class TestPlans(ApiBase): self.validate_test("The POST method is not allowed" in ApiBase.response) def test_index_options(self): + """Test index_options method for plans and plans_item_controller.""" self.plans_controller.index_options() self.validate_test(plans.response.status == 204) @@ -71,6 +81,7 @@ class TestPlans(ApiBase): @mock.patch.object(Query, 'all') def test_index_get(self, mock_all): + """Test index_get method for plans and plans_item_controller.""" all_groups = ["group1", "group2", "group3"] mock_all.return_value = all_groups response = self.plans_controller.index_get() @@ -86,6 +97,7 @@ class TestPlans(ApiBase): @mock.patch.object(plans, 'error', ApiBase.mock_error) def test_index_post(self): + """Test plans_controller index_post.""" with mock.patch('valet.api.v1.controllers.plans.Ostro'): self.plans_controller.index_post() self.validate_test("Ostro error:" in ApiBase.response) @@ -93,10 +105,13 @@ class TestPlans(ApiBase): @mock.patch.object(plans, 'error', ApiBase.mock_error) @mock.patch.object(Query, 'filter_by', mock.MagicMock) def test_index_put(self): - kwargs = {'action': "migrate", 'excluded_hosts': [], "resources": ["ggg", "fff"]} + """Test plans_item_controller index_put method.""" + kwargs = {'action': "migrate", + 'excluded_hosts': [], + "resources": ["ggg", "fff"]} with mock.patch('valet.api.v1.controllers.plans.Ostro'): self.plans_item_controller.index_put(**kwargs) self.validate_test("Ostro error:" in ApiBase.response) -# TODO(YB): test_index_post, test_index_put needs to be written again +# TODO(UNKNOWN): test_index_post, test_index_put needs to be written again diff --git a/valet/tests/unit/api/v1/test_root.py b/valet/tests/unit/api/v1/test_root.py index 2739320..34572be 100644 --- a/valet/tests/unit/api/v1/test_root.py +++ b/valet/tests/unit/api/v1/test_root.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Root.""" + import mock import valet.api.v1.controllers.root as root from valet.api.v1.controllers.root import RootController @@ -20,29 +22,34 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestRoot(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.root.""" def setUp(self): + """Setup Test Root Class and set RootController.""" super(TestRoot, self).setUp() self.root_controller = RootController() def test_allow(self): + """Test root_controller allow method with GET.""" self.validate_test(self.root_controller.allow() == 'GET') @mock.patch.object(root, 'error', ApiBase.mock_error) @mock.patch.object(root, 'request') def test_index(self, mock_request): + """Test root_controller index method with incorrect (PUT) method.""" mock_request.method = "PUT" self.root_controller.index() self.validate_test("The PUT method is not allowed" in ApiBase.response) def test_index_options(self): + """Test root_controller index_options method.""" self.root_controller.index_options() self.validate_test(root.response.status == 204) @mock.patch.object(root, 'request') def test_index_get(self, mock_request): + """Test root_controller index_get method.""" mock_request.application_url.return_value = "application_url" response = self.root_controller.index_get() diff --git a/valet/tests/unit/api/v1/test_status.py b/valet/tests/unit/api/v1/test_status.py index 939f9e6..70a4b7b 100644 --- a/valet/tests/unit/api/v1/test_status.py +++ b/valet/tests/unit/api/v1/test_status.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Status.""" + import mock import valet.api.v1.controllers.status as status from valet.api.v1.controllers.status import StatusController @@ -20,33 +22,39 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestStatus(ApiBase): - '''Unit tests for valet.api.v1.controllers.placements ''' + """Unit tests for valet.api.v1.controllers.status.""" def setUp(self): + """Setup Test Status controller.""" super(TestStatus, self).setUp() self.status_controller = StatusController() def test_allow(self): + """Test status_controller allow method.""" self.validate_test(self.status_controller.allow() == 'HEAD,GET') @mock.patch.object(status, 'error', ApiBase.mock_error) @mock.patch.object(status, 'request') def test_index(self, mock_request): + """Test status_controller index method with errored (PUT) request.""" mock_request.method = "PUT" self.status_controller.index() self.validate_test("The PUT method is not allowed" in ApiBase.response) def test_index_options(self): + """Test status_controller index_options method.""" self.status_controller.index_options() self.validate_test(status.response.status == 204) def test_index_head(self): + """Test status_controller index_head method.""" with mock.patch('valet.api.v1.controllers.status.Ostro'): self.status_controller.index_head() self.validate_test(status.response.status == 204) def test_index_get(self): + """Test status_controller index_get method.""" with mock.patch('valet.api.v1.controllers.status.Ostro'): self.status_controller.index_get() self.validate_test(status.response.status == 200) diff --git a/valet/tests/unit/api/v1/test_v1.py b/valet/tests/unit/api/v1/test_v1.py index 6437842..c9e5f08 100644 --- a/valet/tests/unit/api/v1/test_v1.py +++ b/valet/tests/unit/api/v1/test_v1.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test v1.""" + import mock import pecan import valet.api.v1.controllers.v1 as v1 @@ -21,9 +23,11 @@ from valet.tests.unit.api.v1.api_base import ApiBase class TestV1(ApiBase): + """Unit tests for valet.api.v1.controllers.""" @mock.patch.object(pecan, 'conf') def setUp(self, mock_conf): + """Setup v1 controller for testing.""" super(TestV1, self).setUp() mock_conf.identity.engine.validate_token.return_value = True @@ -35,6 +39,7 @@ class TestV1(ApiBase): @mock.patch.object(v1, 'request') def test_check_permissions(self, mock_request): + """Test v1_controller check permissions.""" mock_request.headers.get.return_value = "auth_token" mock_request.path.return_value = "bla bla bla" mock_request.json.return_value = {"action": "create"} @@ -45,6 +50,7 @@ class TestV1(ApiBase): @mock.patch.object(v1, 'error', ApiBase.mock_error) @mock.patch.object(v1, 'request') def test_check_permissions_auth_unhappy(self, mock_request): + """Test v1_controller check permissions when unauthorized(no token).""" mock_request.headers.get.return_value = None mock_request.path.return_value = "bla bla bla" mock_request.json.return_value = {"action": "create"} @@ -54,21 +60,25 @@ class TestV1(ApiBase): self.validate_test("Unauthorized - No auth token" in ApiBase.response) def test_allow(self): + """Test v1_controller allow method.""" self.validate_test(self.v1_controller.allow() == 'GET') @mock.patch.object(v1, 'error', ApiBase.mock_error) @mock.patch.object(v1, 'request') def test_index(self, mock_request): + """Test v1_controller index method with error.""" mock_request.method = "PUT" self.v1_controller.index() self.validate_test("The PUT method is not allowed" in ApiBase.response) def test_index_options(self): + """Test v1_controller index_options method (return status 204).""" self.v1_controller.index_options() self.validate_test(v1.response.status == 204) @mock.patch.object(v1, 'request') def test_index_get(self, mock_request): + """Test v1_controller index_get.""" mock_request.application_url.return_value = "application_url" response = self.v1_controller.index_get() diff --git a/valet/tests/unit/cli/test_groupcli.py b/valet/tests/unit/cli/test_groupcli.py index ebb6060..01b4742 100644 --- a/valet/tests/unit/cli/test_groupcli.py +++ b/valet/tests/unit/cli/test_groupcli.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test GroupCli.""" + import mock # from valet.cli.groupcli import cmd_details import valet.cli.groupcli as grpcli @@ -21,17 +23,16 @@ from valet.tests.base import Base class TestGroupcli(Base): - ''' Unit tests for valet.valetcli ''' + """Unit tests for valet.valetcli.""" def setUp(self): + """Setup Test Group cli.""" super(TestGroupcli, self).setUp() @mock.patch.object(grpcli, 'requests') def test_cmd_details(self, mock_requests): + """Test command details, mock the requests and ar.""" mock_requests.post = 'post' ar = mock.MagicMock() ar.subcmd = "create" - -# res = grpcli.cmd_details(ar) -# print(res) diff --git a/valet/tests/unit/cli/test_valetcli.py b/valet/tests/unit/cli/test_valetcli.py index 94788f4..c0b66bc 100644 --- a/valet/tests/unit/cli/test_valetcli.py +++ b/valet/tests/unit/cli/test_valetcli.py @@ -1,30 +1,34 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Valetcli.""" + import mock from valet.cli.valetcli import Cli from valet.tests.base import Base class TestValetcli(Base): - ''' Unit tests for valet.valetcli ''' + """Unit tests for valet.valetcli.""" def setUp(self): + """Setup TestValetCli class.""" super(TestValetcli, self).setUp() def test_parse(self): + """Create cli parser and validate by parsing test args.""" cli = Cli() cli.create_parser() argv = ['/path/to/valetcli.py', 'group', 'list'] @@ -33,6 +37,7 @@ class TestValetcli(Base): self.validate_test(cli.args.service == 'group') def test_logic(self): + """Test cli logic methods getitem and getitem.run.""" cli = Cli() cli.submod = mock.MagicMock() cli.args = mock.MagicMock() @@ -40,5 +45,7 @@ class TestValetcli(Base): cli.logic() self.validate_test(len(cli.submod.mock_calls) == 2) - self.validate_test("call.__getitem__('group')" in str(cli.submod.mock_calls[0])) - self.validate_test("call.__getitem__().run" in str(cli.submod.mock_calls[1])) + self.validate_test("call.__getitem__('group')" in + str(cli.submod.mock_calls[0])) + self.validate_test("call.__getitem__().run" in + str(cli.submod.mock_calls[1])) diff --git a/valet/tests/unit/engine/test_config.py b/valet/tests/unit/engine/test_config.py index b61a332..53201bd 100644 --- a/valet/tests/unit/engine/test_config.py +++ b/valet/tests/unit/engine/test_config.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Config.""" + import sys from valet.engine.optimizer.ostro_server.configuration import Config from valet.tests.base import Base @@ -21,19 +23,15 @@ from oslo_config import cfg class TestConfig(Base): + """Unit tests for Valet.engine.optimizer.ostro_server.configuration.""" def setUp(self): + """Setup Test Config Testing Class.""" super(TestConfig, self).setUp() sys.argv = [sys.argv[0]] -# def test_simple_config(self): -# cfg.CONF.clear() -# config = Config() -# config_status = config.configure() -# -# self.validate_test(config_status == "success") - def test_unhappy_config_io(self): + """Test unhappy.cfg I/O and validate I/O error in config status.""" cfg.CONF.clear() try: config = Config("unhappy.cfg") @@ -44,6 +42,7 @@ class TestConfig(Base): self.validate_test(isinstance(ex, cfg.ConfigFilesNotFoundError)) def test_config_io(self): + """Test config I/O and validate config status is success.""" cfg.CONF.clear() config = Config("etc/valet/valet.conf") config_status = config.configure() diff --git a/valet/tests/unit/engine/test_search.py b/valet/tests/unit/engine/test_search.py index 3a89822..198f95e 100644 --- a/valet/tests/unit/engine/test_search.py +++ b/valet/tests/unit/engine/test_search.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Search.""" + import logging import mock from valet.engine.optimizer.ostro.search import Search @@ -22,13 +24,14 @@ LOG = logging.getLogger(__name__) class TestSearch(Base): + """Unit tests for valet.engine.optimizer.ostro.search.""" def setUp(self): + """Setup Test Search Class.""" super(TestSearch, self).setUp() self.search = Search(LOG) def test_copy_resource_status(self): + """Test Copy Resource Status.""" self.search.copy_resource_status(mock.MagicMock()) - -# def test_place_nodes(self): diff --git a/valet/tests/unit/engine/test_topology.py b/valet/tests/unit/engine/test_topology.py index 5931c10..c5a9b59 100644 --- a/valet/tests/unit/engine/test_topology.py +++ b/valet/tests/unit/engine/test_topology.py @@ -1,30 +1,36 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Topology.""" + from valet.engine.resource_manager.topology import Topology from valet.tests.base import Base class TestTopology(Base): + """Unit Tests for valet.engine.resource_manager.topology.""" def setUp(self): + """Setup TestTopology Test Class.""" super(TestTopology, self).setUp() self.topo = Topology(Config(), None) def test_simple_topology(self): - (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk15r05c001") + """Validate simple topology (region, rack, node_type and status).""" + (region, rack, node_type, status) = \ + self.topo._set_layout_by_name("pdk15r05c001") self.validate_test(region == "pdk15") self.validate_test(rack == "pdk15r05") @@ -32,7 +38,9 @@ class TestTopology(Base): self.validate_test(status == "success") def test_domain_topology(self): - (region, rack, node_type, status) = self.topo._set_layout_by_name("ihk01r01c001.emea.att.com") + """Test Domain Topology.""" + (region, rack, node_type, status) = \ + self.topo._set_layout_by_name("ihk01r01c001.emea.att.com") self.validate_test(region == "ihk01") self.validate_test(rack == "ihk01r01") @@ -40,30 +48,31 @@ class TestTopology(Base): self.validate_test(status == "success") def test_unhappy_topology_r(self): - (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk1505c001") + """Test unhappy topology, region/rack/node none, invalid status 0.""" + (region, rack, node_type, status) = \ + self.topo._set_layout_by_name("pdk1505c001") self.validate_test(region == "none") self.validate_test(rack == "none") self.validate_test(node_type is None) - self.validate_test(status == "invalid number of identification fields = 0") + self.validate_test(status == "invalid number of " + "identification fields = 0") def test_unhappy_topology_c(self): - (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk15r05001") + """Test unhappy topology with values none and 1 invalid status.""" + (region, rack, node_type, status) = \ + self.topo._set_layout_by_name("pdk15r05001") self.validate_test(region == "none") self.validate_test(rack == "none") self.validate_test(node_type is None) - self.validate_test(status == "invalid number of identification fields = 1") + self.validate_test(status == "invalid number of " + "identification fields = 1") -# def test_unhappy_topology_c_domain(self): -# (region, rack, node_type, status) = self.topo._set_layout_by_name("pdk15r05001.emea.att.com") -# self.validate_test(region == "none") -# self.validate_test(rack == "none") -# self.validate_test(node_type is None) -# self.validate_test(status == "invalid number of identification fields = 1") - -# TODO(GY): add validation to topology for region +# TODO(UNKNOWN): add validation to topology for region class Config(object): + """Config for topology.""" + num_of_region_chars = 3 rack_code_list = "r" node_code_list = "a,c,u,f,o,p,s" diff --git a/valet/tests/unit/test_general.py b/valet/tests/unit/test_general.py index cea98e0..a2fd25f 100644 --- a/valet/tests/unit/test_general.py +++ b/valet/tests/unit/test_general.py @@ -1,25 +1,30 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test General.""" + from valet.tests.base import Base class TestGeneral(Base): + """Test General Class.""" def setUp(self): + """Setup Test General.""" super(TestGeneral, self).setUp() def test_general(self): + """Test General performs validate test with True.""" self.validate_test(True) diff --git a/valet_plugins/README b/valet_plugins/README index 838f0a6..ae6d3a6 100644 --- a/valet_plugins/README +++ b/valet_plugins/README @@ -1,9 +1,5 @@ Ostro version 2.0.2 Installation and Usage Guide -Author: Gueyoung Jung -Contact: gjung@research.att.com - - INSTALLATION You can download the latest Ostro python code from repository (GitHub). @@ -53,5 +49,3 @@ Ostro will run as a daemon process. Go to “ostro_server” directory, then sta To stop this daemon process: python ostro_daemon.py stop - - diff --git a/valet_plugins/README.md b/valet_plugins/README.md index fb1520d..27b8e06 100644 --- a/valet_plugins/README.md +++ b/valet_plugins/README.md @@ -23,12 +23,3 @@ Additional documents: * [OpenStack Heat Resource Plugins](https://github.com/att-comdev/valet/blob/master/valet_plugins/valet_plugins/heat/README.md): Heat resources * [Placement API](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): API requests/responses * [Using Postman with valet-api](https://github.com/att-comdev/valet/blob/master/valet/tests/api/README.md): Postman support - -## Thank You - -Alicia Abella, Saar Alaluf, Bharath Balasubramanian, Roy Ben Hai, Shimon Benattar, Yael Ben Shalom, Benny Bustan, Rachel Cohen, Joe D'Andrea, Harel Dorfman, Boaz Elitzur, P.K. Esrawan, Inbal Harduf, Matti Hiltunen, Doron Honigsberg, Kaustubh Joshi, Gueyoung Jung, Gerald Karam, David Khanin, Israel Kliger, Erez Korn, Max Osipov, Chris Rice, Amnon Sagiv, Gideon Shafran, Galit Shemesh, Anna Yefimov; AT&T Advanced Technology and Architecture, AT&T Technology Development - AIC, Additional partners in AT&T Domain 2.0. Apologies if we missed anyone (please advise via email!). - -## Contact - -Joe D'Andrea - diff --git a/valet_plugins/RELEASE b/valet_plugins/RELEASE index f8ce116..3f87543 100644 --- a/valet_plugins/RELEASE +++ b/valet_plugins/RELEASE @@ -20,7 +20,3 @@ Valet1.0/Ostro features - Migration tip Working on this. - - - - diff --git a/valet_plugins/requirements.txt b/valet_plugins/requirements.txt index fb4b42f..37f7ec4 100644 --- a/valet_plugins/requirements.txt +++ b/valet_plugins/requirements.txt @@ -3,4 +3,4 @@ # process, which may cause wedges in the gate later. pip -simplejson \ No newline at end of file +simplejson diff --git a/valet_plugins/setup.cfg b/valet_plugins/setup.cfg index 64667d7..fc1eb8a 100644 --- a/valet_plugins/setup.cfg +++ b/valet_plugins/setup.cfg @@ -31,4 +31,3 @@ data_files = # ValetFilter = valet_os.cinder.valet_filter:ValetFilter heat.stack_lifecycle_plugins = valet.lifecycle_plugin = valet_plugins.plugins.heat.plugins:ValetLifecyclePlugin - diff --git a/valet_plugins/setup.py b/valet_plugins/setup.py index 88d71d0..8646d86 100644 --- a/valet_plugins/setup.py +++ b/valet_plugins/setup.py @@ -1,19 +1,19 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -'''Setup''' +"""Setup.""" import setuptools diff --git a/valet_plugins/tox.ini b/valet_plugins/tox.ini index 48c46db..0a94318 100644 --- a/valet_plugins/tox.ini +++ b/valet_plugins/tox.ini @@ -53,4 +53,3 @@ show-source = True ignore = E123,E125,E501,H401,H501,H301 builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*egg-info - diff --git a/valet_plugins/valet_plugins/common/valet_api.py b/valet_plugins/valet_plugins/common/valet_api.py index d7e944b..431d5cb 100644 --- a/valet_plugins/valet_plugins/common/valet_api.py +++ b/valet_plugins/valet_plugins/common/valet_api.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Valet API Wrapper''' +"""Valet API Wrapper.""" from heat.common.i18n import _ import json @@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__) def _exception(exc, exc_info, req): - '''Handle an exception''' + """Handle an exception.""" response = None try: response = json.loads(req.text) @@ -40,30 +40,34 @@ def _exception(exc, exc_info, req): if 'error' in response: error = response.get('error') msg = "%(explanation)s (valet-api: %(message)s)" % { - 'explanation': response.get('explanation', _('No remediation available')), + 'explanation': response.get('explanation', + _('No remediation available')), 'message': error.get('message', _('Unknown error')) } raise ValetAPIError(msg) else: # TODO(JD): Re-evaluate if this clause is necessary. exc_class, exc, traceback = exc_info # pylint: disable=W0612 - msg = _("%(exc)s for %(method)s %(url)s with body %(body)s") % {'exc': exc, 'method': exc.request.method, 'url': exc.request.url, 'body': exc.request.body} + msg = _("%(exc)s for %(method)s %(url)s with body %(body)s") %\ + {'exc': exc, 'method': exc.request.method, 'url': exc.request.url, + 'body': exc.request.body} my_exc = ValetAPIError(msg) # traceback can be added to the end of the raise raise my_exc.__class__, my_exc -# TODO(JD): Improve exception reporting back up to heat +# TODO(UNKNOWN): Improve exception reporting back up to heat class ValetAPIError(Exception): - '''Valet API Error''' + """Valet API Error.""" + pass class ValetAPIWrapper(object): - '''Valet API Wrapper''' + """Valet API Wrapper.""" def __init__(self): - '''Initializer''' + """Initializer.""" self.headers = {'Content-Type': 'application/json'} self.opt_group_str = 'valet' self.opt_name_str = 'url' @@ -72,7 +76,7 @@ class ValetAPIWrapper(object): self._register_opts() def _api_endpoint(self): - '''Returns API endpoint''' + """Return API endpoint.""" try: opt = getattr(cfg.CONF, self.opt_group_str) endpoint = opt[self.opt_name_str] @@ -85,7 +89,11 @@ class ValetAPIWrapper(object): raise # exception.Error(_('API Endpoint not defined.')) def _get_timeout(self): - '''Returns Valet plugin API request timeout tuple (conn_timeout, read_timeout)''' + """Get timeout. + + Return Valet plugin API request timeout + tuple (conn_timeout, read_timeout). + """ conn_timeout = 3 read_timeout = 5 try: @@ -97,47 +105,55 @@ class ValetAPIWrapper(object): return conn_timeout, read_timeout def _register_opts(self): - '''Register options''' + """Register options.""" opts = [] - option = cfg.StrOpt(self.opt_name_str, default=None, help=_('Valet API endpoint')) + option = cfg.StrOpt(self.opt_name_str, default=None, + help=_('Valet API endpoint')) opts.append(option) - option = cfg.IntOpt(self.opt_conn_timeout, default=3, help=_('Valet Plugin Connect Timeout')) + option = cfg.IntOpt(self.opt_conn_timeout, default=3, + help=_('Valet Plugin Connect Timeout')) opts.append(option) - option = cfg.IntOpt(self.opt_read_timeout, default=5, help=_('Valet Plugin Read Timeout')) + option = cfg.IntOpt(self.opt_read_timeout, default=5, + help=_('Valet Plugin Read Timeout')) opts.append(option) opt_group = cfg.OptGroup(self.opt_group_str) cfg.CONF.register_group(opt_group) cfg.CONF.register_opts(opts, group=opt_group) - # TODO(JD): Keep stack param for now. We may need it again. + # TODO(UNKOWN): Keep stack param for now. We may need it again. def plans_create(self, stack, plan, auth_token=None): # pylint: disable=W0613 - '''Create a plan''' + """Create a plan.""" response = None try: timeout = self._get_timeout() url = self._api_endpoint() + '/plans/' payload = json.dumps(plan) self.headers['X-Auth-Token'] = auth_token - req = requests.post(url, data=payload, headers=self.headers, timeout=timeout) + req = requests.post(url, data=payload, headers=self.headers, + timeout=timeout) req.raise_for_status() response = json.loads(req.text) - except (requests.exceptions.HTTPError, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError)\ + except (requests.exceptions.HTTPError, + requests.exceptions.ConnectTimeout, + requests.exceptions.ConnectionError) \ as exc: _exception(exc, sys.exc_info(), req) except Exception as e: LOG.error("Exception (at plans_create) is: %s" % e) return response - # TODO(JD): Keep stack param for now. We may need it again. + # TODO(UNKNOWN): Keep stack param for now. We may need it again. def plans_delete(self, stack, auth_token=None): # pylint: disable=W0613 - '''Delete a plan''' + """Delete a plan.""" try: timeout = self._get_timeout() url = self._api_endpoint() + '/plans/' + stack.id self.headers['X-Auth-Token'] = auth_token req = requests.delete(url, headers=self.headers, timeout=timeout) - except (requests.exceptions.HTTPError, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError)\ + except (requests.exceptions.HTTPError, + requests.exceptions.ConnectTimeout, + requests.exceptions.ConnectionError)\ as exc: _exception(exc, sys.exc_info(), req) except Exception as e: @@ -145,7 +161,7 @@ class ValetAPIWrapper(object): # Delete does not return a response body. def placement(self, orch_id, res_id, hosts=None, auth_token=None): - '''Reserve previously made placement.''' + """Reserve previously made placement.""" try: timeout = self._get_timeout() url = self._api_endpoint() + '/placements/' + orch_id @@ -156,11 +172,12 @@ class ValetAPIWrapper(object): "resource_id": res_id } payload = json.dumps(kwargs) - req = requests.post(url, data=payload, headers=self.headers, timeout=timeout) + req = requests.post(url, data=payload, headers=self.headers, + timeout=timeout) else: req = requests.get(url, headers=self.headers, timeout=timeout) - # TODO(JD): Raise an exception IFF the scheduler can handle it + # TODO(UNKNOWN): Raise an exception IFF the scheduler can handle it response = json.loads(req.text) except Exception: # pylint: disable=W0702 diff --git a/valet_plugins/valet_plugins/heat/GroupAssignment.py b/valet_plugins/valet_plugins/heat/GroupAssignment.py index f72cf66..c1b9f8c 100644 --- a/valet_plugins/valet_plugins/heat/GroupAssignment.py +++ b/valet_plugins/valet_plugins/heat/GroupAssignment.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''GroupAssignment Heat Resource Plugin''' +"""GroupAssignment Heat Resource Plugin.""" from heat.common.i18n import _ from heat.engine import constraints @@ -26,15 +26,20 @@ LOG = logging.getLogger(__name__) class GroupAssignment(resource.Resource): - ''' A Group Assignment describes one or more resources assigned to a particular type of group. + """Group Assignment. - Assignments can reference other assignments, so long as there are no circular references. - There are three types of groups: affinity, diversity, and exclusivity. - Exclusivity groups have a unique name, assigned through Valet. + Group Assignment describes one or more resources assigned to a + particular type of group. - This resource is purely informational in nature and makes no changes to heat, nova, or cinder. - The Valet Heat Lifecycle Plugin passes this information to the optimizer. - ''' + Assignments can reference other assignments, so long as there are no + circular references. There are three types of groups: affinity, diversity, + and exclusivity. Exclusivity groups have a unique name, assigned through + Valet. + + This resource is purely informational in nature and makes no changes to + heat, nova, or cinder. The Valet Heat Lifecycle Plugin passes this + information to the optimizer. + """ _RELATIONSHIP_TYPES = ( AFFINITY, DIVERSITY, EXCLUSIVITY, @@ -52,7 +57,7 @@ class GroupAssignment(resource.Resource): GROUP_NAME: properties.Schema( properties.Schema.STRING, _('Group name. Required for exclusivity groups.'), - # TODO(JD): Add a custom constraint + # TODO(UNKNOWN): Add a custom constraint # Constraint must ensure a valid and allowed name # when an exclusivity group is in use. # This is presently enforced by valet-api and can also @@ -87,18 +92,19 @@ class GroupAssignment(resource.Resource): } def handle_create(self): - '''Create resource''' + """Create resource.""" self.resource_id_set(self.physical_resource_name()) - def handle_update(self, json_snippet, templ_diff, prop_diff): # pylint: disable=W0613 - '''Update resource''' + def handle_update(self, json_snippet, templ_diff, # pylint: disable=W0613 + prop_diff): + """Update resource.""" self.resource_id_set(self.physical_resource_name()) def handle_delete(self): - '''Delete resource''' + """Delete resource.""" self.resource_id_set(None) def resource_mapping(): - '''Map names to resources.''' + """Map names to resources.""" return {'ATT::Valet::GroupAssignment': GroupAssignment, } diff --git a/valet_plugins/valet_plugins/plugins/heat/plugins.py b/valet_plugins/valet_plugins/plugins/heat/plugins.py index f1580e5..b86994a 100644 --- a/valet_plugins/valet_plugins/plugins/heat/plugins.py +++ b/valet_plugins/valet_plugins/plugins/heat/plugins.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Valet Plugins for Heat''' +"""Valet Plugins for Heat.""" from heat.engine import lifecycle_plugin @@ -30,13 +30,13 @@ LOG = logging.getLogger(__name__) def validate_uuid4(uuid_string): - ''' Validate that a UUID string is in fact a valid uuid4. + """Validate that a UUID string is in fact a valid uuid4. Happily, the uuid module does the actual checking for us. It is vital that the 'version' kwarg be passed to the UUID() call, otherwise any 32-character hex string is considered valid. - ''' + """ try: val = uuid.UUID(uuid_string, version=4) except ValueError: @@ -54,11 +54,13 @@ def validate_uuid4(uuid_string): class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): - ''' Base class for pre-op and post-op work on a stack. + """Base class for pre-op and post-op work on a stack. Implementations should extend this class and override the methods. - ''' + """ + def __init__(self): + """Initialize.""" self.api = valet_api.ValetAPIWrapper() self.hints_enabled = False @@ -67,10 +69,11 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): self.hints_enabled = cfg.CONF.stack_scheduler_hints def _parse_stack_preview(self, dest, preview): - ''' Walk the preview list (possibly nested) + """Walk the preview list (possibly nested). - extracting parsed template dicts and storing modified versions in a flat dict. - ''' + extracting parsed template dicts and storing modified versions in a flat + dict. + """ # The preview is either a list or not. if not isinstance(preview, list): # Heat does not assign orchestration UUIDs to @@ -85,21 +88,21 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): preview.uuid and validate_uuid4(preview.uuid): key = preview.uuid else: - # TODO(JD): Heat should be authoritative for UUID assignments. + # TODO(UNK): Heat should be authoritative for UUID assignments. # This will require a change to heat-engine. # Looks like it may be: heat/db/sqlalchemy/models.py#L279 # It could be that nested stacks aren't added to the DB yet. key = str(uuid.uuid4()) parsed = preview.parsed_template() parsed['name'] = preview.name - # TODO(JD): Replace resource referenced names with their UUIDs. + # TODO(UNKWN): Replace resource referenced names with their UUIDs. dest[key] = parsed else: for item in preview: self._parse_stack_preview(dest, item) def do_pre_op(self, cnxt, stack, current_stack=None, action=None): - ''' Method to be run by heat before stack operations. ''' + """Method to be run by heat before stack operations.""" if not self.hints_enabled or stack.status != 'IN_PROGRESS': return @@ -130,19 +133,19 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): self.api.plans_create(stack, plan, auth_token=cnxt.auth_token) - def do_post_op(self, cnxt, stack, current_stack=None, action=None, # pylint: disable=R0913 - is_stack_failure=False): - ''' Method to be run by heat after stack operations, including failures. + def do_post_op(self, cnxt, stack, # pylint: disable=R0913 + current_stack=None, action=None, is_stack_failure=False): + """Method to be run by heat after stack operations, including failures. On failure to execute all the registered pre_ops, this method will be called if and only if the corresponding pre_op was successfully called. On failures of the actual stack operation, this method will be called if all the pre operations were successfully called. - ''' + """ pass def get_ordinal(self): - ''' An ordinal used to order class instances for pre and post operation execution. + """Ordinal to order class instances for pre /post operation execution. The values returned by get_ordinal are used to create a partial order for pre and post operation method invocations. The default ordinal @@ -153,5 +156,5 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): class1inst will be executed after the method on class2inst. If class1inst.ordinal() == class2inst.ordinal(), then the order of method invocation is indeterminate. - ''' + """ return 100 diff --git a/valet_plugins/valet_plugins/plugins/nova/valet_filter.py b/valet_plugins/valet_plugins/plugins/nova/valet_filter.py index d2f99c1..499d005 100644 --- a/valet_plugins/valet_plugins/plugins/nova/valet_filter.py +++ b/valet_plugins/valet_plugins/plugins/nova/valet_filter.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -'''Valet Nova Scheduler Filter''' +"""Valet Nova Scheduler Filter.""" from keystoneclient.v2_0 import client @@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__) class ValetFilter(filters.BaseHostFilter): - '''Filter on Valet assignment.''' + """Filter on Valet assignment.""" # Host state does not change within a request run_filter_once_per_request = True @@ -40,7 +40,7 @@ class ValetFilter(filters.BaseHostFilter): _auth_token = None def __init__(self): - '''Initializer''' + """Initializer.""" self.api = valet_api.ValetAPIWrapper() self.opt_group_str = 'valet' self.opt_failure_mode_str = 'failure_mode' @@ -51,7 +51,7 @@ class ValetFilter(filters.BaseHostFilter): self._register_opts() def _authorize(self): - '''Keystone AuthN''' + """Keystone AuthN.""" opt = getattr(cfg.CONF, self.opt_group_str) project_name = opt[self.opt_project_name_str] username = opt[self.opt_username_str] @@ -68,32 +68,38 @@ class ValetFilter(filters.BaseHostFilter): self._auth_token = keystone_client.auth_token def _is_same_host(self, host, location): # pylint: disable=R0201 - '''Returns true if host matches location''' + """Return true if host matches location.""" return host == location def _register_opts(self): - '''Register Options''' + """Register Options.""" opts = [] - option = cfg.StrOpt(self.opt_failure_mode_str, choices=['reject', 'yield'], default='reject', - help=_('Mode to operate in if Valet planning fails for any reason.')) + option = cfg.StrOpt( + self.opt_failure_mode_str, + choices=['reject', 'yield'], + default='reject', + help=_('Mode to operate in if Valet planning fails for any reason.')) opts.append(option) - option = cfg.StrOpt(self.opt_project_name_str, default=None, help=_('Valet Project Name')) + option = cfg.StrOpt(self.opt_project_name_str, default=None, + help=_('Valet Project Name')) opts.append(option) - option = cfg.StrOpt(self.opt_username_str, default=None, help=_('Valet Username')) + option = cfg.StrOpt(self.opt_username_str, default=None, + help=_('Valet Username')) opts.append(option) - option = cfg.StrOpt(self.opt_password_str, default=None, help=_('Valet Password')) + option = cfg.StrOpt(self.opt_password_str, default=None, + help=_('Valet Password')) opts.append(option) - option = cfg.StrOpt(self.opt_auth_uri_str, default=None, help=_('Keystone Authorization API Endpoint')) + option = cfg.StrOpt(self.opt_auth_uri_str, default=None, + help=_('Keystone Authorization API Endpoint')) opts.append(option) opt_group = cfg.OptGroup(self.opt_group_str) cfg.CONF.register_group(opt_group) cfg.CONF.register_opts(opts, group=opt_group) - # TODO(JD): Factor out common code between this and the cinder filter + # TODO(UNKNOWN): Factor out common code between this and the cinder filter def filter_all(self, filter_obj_list, filter_properties): - '''Filter all hosts in one swell foop''' - + """Filter all hosts in one swell foop.""" hints_key = 'scheduler_hints' orch_id_key = 'heat_resource_uuid' @@ -115,14 +121,15 @@ class ValetFilter(filters.BaseHostFilter): if orch_id_key not in filter_properties.get(hints_key, {}): self._authorize() - LOG.warn(_LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. Performing ad-hoc placement.")) + LOG.warn(_LW("Valet: Heat Stack Lifecycle Scheduler Hints not " + "found. Performing ad-hoc placement.")) ad_hoc = True # We'll need the flavor. instance_type = filter_properties.get('instance_type') flavor = instance_type.get('name') - # Beacuse this wasn't orchestrated, there's no stack. + # Because this wasn't orchestrated, there's no stack. # We're going to compose a resource as if there as one. # In this particular case we use the physical # resource id as both the orchestration and stack id. @@ -150,10 +157,12 @@ class ValetFilter(filters.BaseHostFilter): 'resources': resources } try: - response = self.api.plans_create(None, plan, auth_token=self._auth_token) + response = self.api.plans_create(None, plan, + auth_token=self._auth_token) except Exception: - # TODO(JD): Get context from exception - LOG.error(_LE("Valet did not respond to ad hoc placement request.")) + # TODO(UNKNOWN): Get context from exception + LOG.error(_LE("Valet did not respond to ad hoc placement " + "request.")) response = None if response and response.get('plan'): @@ -165,9 +174,11 @@ class ValetFilter(filters.BaseHostFilter): location = placement['location'] if not location: - LOG.error(_LE("Valet ad-hoc placement unknown for resource id %s.") % res_id) + LOG.error(_LE("Valet ad-hoc placement unknown for resource id " + "%s.") % res_id) if failure_mode == 'yield': - LOG.warn(_LW("Valet will yield to Nova for placement decisions.")) + LOG.warn(_LW("Valet will yield to Nova for placement " + "decisions.")) yield_all = True else: yield_all = False @@ -177,7 +188,8 @@ class ValetFilter(filters.BaseHostFilter): hosts = [obj.host for obj in filter_obj_list] try: - response = self.api.placement(orch_id, res_id, hosts=hosts, auth_token=self._auth_token) + response = self.api.placement(orch_id, res_id, hosts=hosts, + auth_token=self._auth_token) except Exception: print("Exception in creating placement") LOG.error(_LW("Valet did not respond to placement request.")) @@ -190,10 +202,12 @@ class ValetFilter(filters.BaseHostFilter): location = placement['location'] if not location: - # TODO(JD): Get context from exception - LOG.error(_LE("Valet placement unknown for resource id {0}, orchestration id {1}.").format(res_id, orch_id)) + # TODO(UNKNOWN): Get context from exception + LOG.error(_LE("Valet placement unknown for resource id {0}," + "orchestration id {1}.").format(res_id, orch_id)) if failure_mode == 'yield': - LOG.warn(_LW("Valet will yield to Nova for placement decisions.")) + LOG.warn(_LW("Valet will yield to Nova for placement" + "decisions.")) yield_all = True else: yield_all = False @@ -206,15 +220,19 @@ class ValetFilter(filters.BaseHostFilter): match = self._is_same_host(obj.host, location) if match: if ad_hoc: - LOG.info(_LI("Valet ad-hoc placement for resource id {0}: {1}.").format(res_id, obj.host)) + LOG.info(_LI("Valet ad-hoc placement for resource " + "id {0}: {1}.").format(res_id, obj.host)) else: - LOG.info(_LI("Valet placement for resource id %s, orchestration id {0}: {1}.").format(res_id, orch_id, obj.host)) + LOG.info(_LI("Valet placement for resource id %s, " + "orchestration id {0}: {1}.").format( + res_id, orch_id, obj.host)) else: match = None if yield_all or match: yield obj - def host_passes(self, host_state, filter_properties): # pylint: disable=W0613,R0201 - '''Individual host pass check''' + def host_passes(self, host_state, # pylint: disable=W0613,R0201 + filter_properties): + """Individual host pass check.""" # Intentionally let filter_all() handle in one swell foop. return False diff --git a/valet_plugins/valet_plugins/tests/base.py b/valet_plugins/valet_plugins/tests/base.py index 614d628..5008115 100644 --- a/valet_plugins/valet_plugins/tests/base.py +++ b/valet_plugins/valet_plugins/tests/base.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Tests Base.""" + from oslo_config import fixture as fixture_config from oslo_log import log as logging from oslotest.base import BaseTestCase @@ -25,23 +27,27 @@ class Base(BaseTestCase): """Test case base class for all unit tests.""" def __init__(self, *args, **kwds): - ''' ''' + """Initialize.""" super(Base, self).__init__(*args, **kwds) self.CONF = self.useFixture(fixture_config.Config()).conf def setUp(self): + """Setup.""" super(Base, self).setUp() def run_test(self, stack_name, template_path): - ''' main function ''' + """Main function.""" pass def validate(self, result): + """Validate.""" self.assertEqual(True, result.ok, result.message) def validate_test(self, result): + """Validate Test.""" self.assertTrue(result) def get_name(self): + """Get Name.""" pass diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py index 37d7a56..c6830dc 100644 --- a/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py +++ b/valet_plugins/valet_plugins/tests/unit/mocks/heat/common/i18n.py @@ -1,16 +1,19 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""i18n.""" + + _ = None diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py b/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py index 380468c..60187c3 100644 --- a/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py +++ b/valet_plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py @@ -1,20 +1,23 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Lifecycle Plugin.""" + + class LifecyclePlugin(object): - ''' classdocs ''' + """Classdoc.""" def __init__(self, params): - ''' Constructor ''' + """Constructor.""" diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py b/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py index 0cac3a2..440899e 100644 --- a/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py +++ b/valet_plugins/valet_plugins/tests/unit/mocks/nova/i18n.py @@ -1,18 +1,21 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""i18n.""" + + def _(string): pass diff --git a/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py b/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py index 6d651cf..d0b1a82 100644 --- a/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py +++ b/valet_plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py @@ -1,20 +1,23 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Filters.""" + + class BaseHostFilter(object): - ''' classdocs ''' + """Classdocs.""" def __init__(self, params): - ''' Constructor ''' + """Constructor.""" diff --git a/valet_plugins/valet_plugins/tests/unit/test_plugins.py b/valet_plugins/valet_plugins/tests/unit/test_plugins.py index c0c600a..e2de414 100644 --- a/valet_plugins/valet_plugins/tests/unit/test_plugins.py +++ b/valet_plugins/valet_plugins/tests/unit/test_plugins.py @@ -1,36 +1,42 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Plugins.""" + import mock from valet_plugins.plugins.heat.plugins import ValetLifecyclePlugin from valet_plugins.tests.base import Base class TestPlugins(Base): + """Test valet_plugins.plugins.heat.plugins ValetLifecyclePlugin.""" def setUp(self): + """Setup Test Plugins and call ValetLifecyclePlugin Init.""" super(TestPlugins, self).setUp() self.valet_life_cycle_plugin = self.init_ValetLifecyclePlugin() @mock.patch('valet_plugins.common.valet_api.ValetAPIWrapper') def init_ValetLifecyclePlugin(self, mock_class): + """Called by setup to init, return ValetLifecyclePlugin().""" with mock.patch('oslo_config.cfg.CONF'): return ValetLifecyclePlugin() def test_do_pre_op(self): + """Validate life cycle pre_ops by checking api method calls.""" stack = mock.MagicMock() stack.status = "IN_PROGRESS" @@ -52,8 +58,10 @@ class TestPlugins(Base): self.valet_life_cycle_plugin.hints_enabled = True stack.status = "IN_PROGRESS" self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="DELETE") - self.validate_test("plans_delete" in self.valet_life_cycle_plugin.api.method_calls[0]) + self.validate_test("plans_delete" in + self.valet_life_cycle_plugin.api.method_calls[0]) # action create self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="CREATE") - self.validate_test("plans_create" in self.valet_life_cycle_plugin.api.method_calls[1]) + self.validate_test("plans_create" in + self.valet_life_cycle_plugin.api.method_calls[1]) diff --git a/valet_plugins/valet_plugins/tests/unit/test_valet_api.py b/valet_plugins/valet_plugins/tests/unit/test_valet_api.py index 7dfe408..b24fb7c 100644 --- a/valet_plugins/valet_plugins/tests/unit/test_valet_api.py +++ b/valet_plugins/valet_plugins/tests/unit/test_valet_api.py @@ -1,34 +1,40 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Valet API.""" + import mock from valet_plugins.tests.base import Base from valet_plugins.common.valet_api import ValetAPIWrapper, requests class TestValetApi(Base): + """Test Valet Plugins API.""" def setUp(self): + """Setup Test Valet Api and call ValetAPIWrapper init.""" super(TestValetApi, self).setUp() self.valet_api_wrapper = self.init_ValetAPIWrapper() @mock.patch.object(ValetAPIWrapper, "_register_opts") def init_ValetAPIWrapper(self, mock_api): + """Called by setup, mock api return value to none.""" mock_api.return_value = None return ValetAPIWrapper() @mock.patch.object(requests, 'request') def test_plans_create(self, mock_request): + """Test Plans create, mock request return value to none.""" mock_request.post.return_value = None diff --git a/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py b/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py index 407cf1c..1f18441 100644 --- a/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py +++ b/valet_plugins/valet_plugins/tests/unit/test_valet_filter.py @@ -1,18 +1,20 @@ # # Copyright 2014-2017 AT&T Intellectual Property -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Test Valet Filter.""" + from keystoneclient.v2_0 import client import mock from valet_plugins.common import valet_api @@ -21,13 +23,18 @@ from valet_plugins.tests.base import Base class TestResources(object): + """Test Resources.""" + def __init__(self, host_name): + """Initialize.""" self.host = host_name class TestValetFilter(Base): + """Test Valet Filter Base.""" def setUp(self): + """Setup by mocking client and init valet filter.""" super(TestValetFilter, self).setUp() client.Client = mock.MagicMock() @@ -36,6 +43,7 @@ class TestValetFilter(Base): @mock.patch.object(valet_api.ValetAPIWrapper, '_register_opts') @mock.patch.object(ValetFilter, '_register_opts') def init_ValetFilter(self, mock_opt, mock_init): + """Called by setup, mock init and opt and return ValetFilter().""" mock_init.return_value = None mock_opt.return_value = None return ValetFilter() @@ -43,31 +51,41 @@ class TestValetFilter(Base): @mock.patch.object(valet_api.ValetAPIWrapper, 'plans_create') @mock.patch.object(valet_api.ValetAPIWrapper, 'placement') def test_filter_all(self, mock_placement, mock_create): + """Test Filter All by validating resource host values.""" mock_placement.return_value = None mock_create.return_value = None with mock.patch('oslo_config.cfg.CONF') as config: - setattr(config, "valet", {self.valet_filter.opt_failure_mode_str: "yield", - self.valet_filter.opt_project_name_str: "test_admin_tenant_name", - self.valet_filter.opt_username_str: "test_admin_username", - self.valet_filter.opt_password_str: "test_admin_password", - self.valet_filter.opt_auth_uri_str: "test_admin_auth_url"}) + setattr(config, "valet", + {self.valet_filter.opt_failure_mode_str: "yield", + self.valet_filter.opt_project_name_str: + "test_admin_tenant_name", + self.valet_filter.opt_username_str: "test_admin_username", + self.valet_filter.opt_password_str: "test_admin_password", + self.valet_filter.opt_auth_uri_str: "test_admin_auth_url"}) - filter_properties = {'request_spec': {'instance_properties': {'uuid': ""}}, - 'scheduler_hints': {'heat_resource_uuid': "123456"}, + filter_properties = {'request_spec': {'instance_properties': + {'uuid': ""}}, + 'scheduler_hints': + {'heat_resource_uuid': "123456"}, 'instance_type': {'name': "instance_name"}} - resources = self.valet_filter.filter_all([TestResources("first_host"), TestResources("second_host")], filter_properties) + resources = self.valet_filter.filter_all( + [TestResources("first_host"), TestResources("second_host")], + filter_properties) for resource in resources: self.validate_test(resource.host in "first_host, second_host") self.validate_test(mock_placement.called) - filter_properties = {'request_spec': {'instance_properties': {'uuid': ""}}, + filter_properties = {'request_spec': {'instance_properties': + {'uuid': ""}}, 'scheduler_hints': "scheduler_hints", 'instance_type': {'name': "instance_name"}} - resources = self.valet_filter.filter_all([TestResources("first_host"), TestResources("second_host")], filter_properties) + resources = self.valet_filter.filter_all( + [TestResources("first_host"), TestResources("second_host")], + filter_properties) for _ in resources: self.validate_test(mock_create.called)