Fix pep8 and docstring violations

Fix issues in py files in the directories
valet/valet/api/common
valet/api
valet/cli
valet/engine/optimizer/
valet/tests/
valet/tests/tempest
valet_plugins
This commit is contained in:
Tanvir Talukder 2017-01-13 15:13:13 -06:00 committed by Omar Rivera
parent 5b9c5cf5b8
commit ad2b7fab31
158 changed files with 4931 additions and 2628 deletions

View File

@ -18,9 +18,7 @@ Valet responds to the challenges outlined above by enhancing OpenStack Nova sche
* [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet * [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet
* [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet * [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet
* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine * [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine
* Music: a data storage and persistence service
* [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music * [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music
* [havalet](https://github.com/att-comdev/valet/blob/master/doc/ha.md): a service that assists in providing high availability for Valet
## Additional documents: ## Additional documents:

11
doc/.idea/doc.iml Normal file
View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
</component>
</module>

4
doc/.idea/misc.xml Normal file
View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6.0 (C:\Users\nv757p\vEnv-36\Scripts\python.exe)" project-jdk-type="Python SDK" />
</project>

8
doc/.idea/modules.xml Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/doc.iml" filepath="$PROJECT_DIR$/.idea/doc.iml" />
</modules>
</component>
</project>

250
doc/.idea/workspace.xml Normal file
View File

@ -0,0 +1,250 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="c284aaf5-9cd1-41bb-acfb-a73c8e5a3cec" name="Default" comment="" />
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="TRACKING_ENABLED" value="true" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="CreatePatchCommitExecutor">
<option name="PATCH_PATH" value="" />
</component>
<component name="ExecutionTargetManager" SELECTED_TARGET="default_target" />
<component name="FileEditorManager">
<leaf />
</component>
<component name="ProjectFrameBounds">
<option name="x" value="722" />
<option name="y" value="6" />
<option name="width" value="1191" />
<option name="height" value="1030" />
</component>
<component name="ProjectView">
<navigator currentView="ProjectPane" proportions="" version="1">
<flattenPackages />
<showMembers />
<showModules />
<showLibraryContents />
<hideEmptyPackages />
<abbreviatePackageNames />
<autoscrollToSource />
<autoscrollFromSource />
<sortByType />
<manualOrder />
<foldersAlwaysOnTop value="true" />
</navigator>
<panes>
<pane id="ProjectPane">
<subPane>
<PATH>
<PATH_ELEMENT>
<option name="myItemId" value="doc" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
</PATH_ELEMENT>
<PATH_ELEMENT>
<option name="myItemId" value="doc" />
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
</PATH_ELEMENT>
</PATH>
</subPane>
</pane>
<pane id="Scope" />
<pane id="Scratches" />
</panes>
</component>
<component name="PropertiesComponent">
<property name="last_opened_file_path" value="$PROJECT_DIR$/../../hello_devstack" />
</component>
<component name="RunManager">
<configuration default="true" type="PythonConfigurationType" factoryName="Python">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<option name="SCRIPT_NAME" value="" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<method />
</configuration>
<configuration default="true" type="Tox" factoryName="Tox">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Attests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Doctests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Nosetests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<option name="PARAMS" value="" />
<option name="USE_PARAM" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="Unittests">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<option name="PUREUNITTEST" value="true" />
<option name="PARAMS" value="" />
<option name="USE_PARAM" value="false" />
<method />
</configuration>
<configuration default="true" type="tests" factoryName="py.test">
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs />
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<module name="doc" />
<option name="SCRIPT_NAME" value="" />
<option name="CLASS_NAME" value="" />
<option name="METHOD_NAME" value="" />
<option name="FOLDER_NAME" value="" />
<option name="TEST_TYPE" value="TEST_SCRIPT" />
<option name="PATTERN" value="" />
<option name="USE_PATTERN" value="false" />
<option name="testToRun" value="" />
<option name="keywords" value="" />
<option name="params" value="" />
<option name="USE_PARAM" value="false" />
<option name="USE_KEYWORD" value="false" />
<method />
</configuration>
</component>
<component name="ShelveChangesManager" show_recycled="false">
<option name="remove_strategy" value="false" />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="c284aaf5-9cd1-41bb-acfb-a73c8e5a3cec" name="Default" comment="" />
<created>1485886196137</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1485886196137</updated>
</task>
<servers />
</component>
<component name="ToolWindowManager">
<frame x="722" y="6" width="1191" height="1030" extended-state="0" />
<editor active="false" />
<layout>
<window_info id="Project" active="true" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.24936171" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" />
<window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="6" side_tool="false" content_ui="tabs" />
<window_info id="Event Log" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="true" content_ui="tabs" />
<window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="false" weight="0.33" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" />
<window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" />
<window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" />
<window_info id="Favorites" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="true" content_ui="tabs" />
<window_info id="Cvs" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="4" side_tool="false" content_ui="tabs" />
<window_info id="Hierarchy" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="2" side_tool="false" content_ui="combo" />
<window_info id="Message" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Commander" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
<window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Inspection" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="5" side_tool="false" content_ui="tabs" />
<window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="2" side_tool="false" content_ui="tabs" />
<window_info id="Ant Build" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
<window_info id="Debug" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="3" side_tool="false" content_ui="tabs" />
</layout>
</component>
<component name="VcsContentAnnotationSettings">
<option name="myLimit" value="2678400000" />
</component>
<component name="XDebuggerManager">
<breakpoint-manager />
<watches-manager />
</component>
<component name="editorHistoryManager">
<entry file="file://$PROJECT_DIR$/ostro_listener1.rst">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="0">
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
<folding />
</state>
</provider>
</entry>
</component>
</project>

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

111
doc/ha.md
View File

@ -1,111 +0,0 @@
High Availability Valet Tools
=============================
This tool monitors one or more configured processes to maintain high
availability.
~~~~ {.bash}
$ python ./ha_valet.py [-p name]
~~~~
ha\_valet.cfg
-------------
The ha\_valet configuration file contains a list of dictionaries. List
keys are logical process names. List values are dictionaries
representing a monitored Valet-related process.
Each dictionary **must** contain the following properties:
host
user
port
protocol
start_command
stop_command
test_command
Optional properties include:
order
priority
standy_by_list
### Notes
- The return value of `test_command` **must not** be 0 and should
reflect the monitored process priority (see next section).
- `stand_by_list` is an optional comma-delimited list of hosts used in
conjunction with active/stand-by scenarios. ha\_valet will attempt
to restart the instance with the lower priority. If that instance
fails to start, ha\_valet will try restarting the process of the
next host in the list.
- `priority` is used to establish the primary/secondary hierarchy. It
**must** be greater than 0. The lower the number, the higher the
priority.
### Monitored Process Priority
Monitored process priority is used in conjunction with active/stand-by
scenarios. Unless a process is down, its priority **must** be greater
than 0. The lower the number, the higher the priority.
For example, an instance returning `1` (in response to `test_command`)
will take precedence over an instance returning `2`. A priority of 0
means the process is down.
Examples
--------
### Host A
:Ostro
host = Host_A
stand_by_list = Host_A,Host_B
user = stack
port = 8091
protocol = http
priority = 1
start_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py start'" % (user, host)
stop_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py stop'" % (user, host)
test_command="ssh %s@%s 'exit $(@OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py status ; echo $?)'" % (user, host)
:Allegro
host = Host_A
user = stack
port = 8090
protocol = http
priority = 1
start_command="sudo python @ALLEGRO_WSGI_DIR@/wsgi.py &"
stop_command="sudo pkill -f wsgi"
test_command="netstat -nap | grep %s | grep LISTEN | wc -l | exit $(awk \'{print $1}\')" % (port)
### Host B (172.20.90.130)
:Ostro
host = Host_B
stand_by_list = Host_A,Host_B
user = stack
port = 8091
protocol = http
priority = 2
start_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py start'" % (user, host)
stop_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py stop'" % (user, host)
test_command="ssh %s@%s 'exit $(@OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py status ; echo $?)'" % (user, host)
:Allegro
host = Host_B
user = stack
port = 8090
protocol = http
priority = 1
start_command="sudo python @ALLEGRO_WSGI_DIR@/wsgi.py &"
stop_command="sudo pkill -f wsgi"
test_command="netstat -nap | grep %s | grep LISTEN | wc -l | exit $(awk \'{print $1}\')" % (port)
Contact
-------
Joe D'Andrea <jdandrea@research.att.com>

View File

@ -23,18 +23,19 @@ Throughout this document, the following installation-specific items are
required. Have values for these prepared and ready before continuing. required. Have values for these prepared and ready before continuing.
Suggestions for values are provided in this document where applicable. Suggestions for values are provided in this document where applicable.
Name Description Example | Name | Description | Example |
----------------------------- --------------------------------------------------- ------------------------------------------- |------|-------------|---------|
`$USER` User id `user1234` | `$USER` | User id | `user1234` |
`$VENV` Python virtual environment path (if any) `/etc/ostro-listener/venv` | `$VENV` | Python virtual environment path (if any) | `/etc/ostro-listener/venv` |
`$OSTRO_LISTENER_PATH` Local git repository's `ostro_listener` directory `/home/user1234/git/allegro/ostro_listener` | `$OSTRO_LISTENER_PATH` | Local git repository's `ostro_listener` directory | `/home/user1234/git/allegro/ostro_listener` |
`$CONFIG_FILE` Event Listener configuration file `/etc/ostro-listener/ostro-listener.conf` | `$CONFIG_FILE` | Event Listener configuration file | `/etc/ostro-listener/ostro-listener.conf` |
`$RABBITMQ_HOST` RabbitMQ hostname or IP address `localhost` | `$RABBITMQ_HOST` | RabbitMQ hostname or IP address | `localhost` |
`$RABBITMQ_USERNAME` RabbitMQ username `guest` | `$RABBITMQ_USERNAME` | RabbitMQ username | `guest` |
`$RABBITMQ_PASSWORD_FILE` Full path to RabbitMQ password file `/etc/ostro-listener/passwd` | `$RABBITMQ_PASSWORD_FILE` | Full path to RabbitMQ password file | `/etc/ostro-listener/passwd` |
`$MUSIC_URL` Music API endpoints and port in URL format `http://127.0.0.1:8080/` | `$MUSIC_URL` | Music API endpoints and port in URL format | `http://127.0.0.1:8080/` |
`$MUSIC_KEYSPACE` Music keyspace `valet` | `$MUSIC_KEYSPACE` | Music keyspace | `valet` |
`$MUSIC_REPLICATION_FACTOR` Music replication factor `1` | `$MUSIC_REPLICATION_FACTOR` | Music replication factor | `1` |
Root or sufficient sudo privileges are required for some steps. Root or sufficient sudo privileges are required for some steps.
@ -250,8 +251,3 @@ $ sudo pip uninstall ostro-listener
Remove previously made configuration file changes, files, and other Remove previously made configuration file changes, files, and other
settings as needed. settings as needed.
Contact
-------
Joe D'Andrea <jdandrea@research.att.com>

View File

@ -28,15 +28,6 @@ Valet1.0/Ostro features
load spikes of tenant applications. Later, we will deploy more load spikes of tenant applications. Later, we will deploy more
dynamic mechanism in the future version of Ostro. dynamic mechanism in the future version of Ostro.
- High availability Ostro replicas run as active-passive way. When
active Ostro fails, automatically the passive one is activated via
HAValet. All data is updated in MUSIC database at runtime whenever
it is changed. When the passive Ostro is activated, it gets data
from MUSIC to initialize its status rather than from OpenStack.
Ostro also takes ping messages to show if it is alive or not.
- Runtime update via the Oslo message bus or RO Working on this. - Runtime update via the Oslo message bus or RO Working on this.
- Migration tip Working on this. - Migration tip Working on this.

View File

@ -1,34 +0,0 @@
# Valet
Valet gives OpenStack the ability to optimize cloud resources while simultaneously meeting a cloud application's QoS requirements. Valet provides an api service, a placement optimizer (Ostro), a high availability data storage and persistence layer (Music), and a set of OpenStack plugins.
**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md).
Learn more about Valet:
* [OpenStack Newton Summit Presentation](https://www.openstack.org/videos/video/valet-holistic-data-center-optimization-for-openstack) (Austin, TX, 27 April 2016)
* [Presentation Slides](http://www.research.att.com/export/sites/att_labs/techdocs/TD_101806.pdf) (PDF)
Valet consists of the following components:
* [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet
* [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet
* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine
* Music: a data storage and persistence service
* [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music
* [havalet](https://github.com/att-comdev/valet/blob/master/doc/ha.md): a service that assists in providing high availability for Valet
Additional documents:
* [OpenStack Heat Resource Plugins](https://github.com/att-comdev/valet/blob/master/valet_plugins/valet_plugins/heat/README.md): Heat resources
* [Placement API](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): API requests/responses
* [Using Postman with valet-api](https://github.com/att-comdev/valet/blob/master/valet/tests/api/README.md): Postman support
## Thank You
Alicia Abella, Saar Alaluf, Bharath Balasubramanian, Roy Ben Hai, Shimon Benattar, Yael Ben Shalom, Benny Bustan, Rachel Cohen, Joe D'Andrea, Harel Dorfman, Boaz Elitzur, P.K. Esrawan, Inbal Harduf, Matti Hiltunen, Doron Honigsberg, Kaustubh Joshi, Gueyoung Jung, Gerald Karam, David Khanin, Israel Kliger, Erez Korn, Max Osipov, Chris Rice, Amnon Sagiv, Gideon Shafran, Galit Shemesh, Anna Yefimov; AT&T Advanced Technology and Architecture, AT&T Technology Development - AIC, Additional partners in AT&T Domain 2.0. Apologies if we missed anyone (please advise via email!).
## Contact
Joe D'Andrea <jdandrea@research.att.com>

View File

@ -4,7 +4,7 @@ Valet gives OpenStack the ability to optimize cloud resources while simultaneous
This document covers installation of valet-api, the API engine used to interact with Valet. This document covers installation of valet-api, the API engine used to interact with Valet.
**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md). These instructions are to be used by the Bedminster and Tel Aviv development teams. **IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md).
## Prerequisites ## Prerequisites
@ -13,7 +13,7 @@ Prior to installation:
* Ubuntu 14.04 LTS * Ubuntu 14.04 LTS
* Python 2.7.6 with pip * Python 2.7.6 with pip
* An OpenStack Kilo cloud * An OpenStack Kilo cloud
* Music 6.0 * [Music](https://github.com/att-comdev/valet) 6.0
* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md) 2.0 * [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md) 2.0
Throughout this document, the following installation-specific items are required. Have values for these prepared and ready before continuing. Suggestions for values are provided in this document where applicable. Throughout this document, the following installation-specific items are required. Have values for these prepared and ready before continuing. Suggestions for values are provided in this document where applicable.
@ -249,7 +249,3 @@ $ sudo pip uninstall valet-api
``` ```
Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed. Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed.
## Contact
Joe D'Andrea <jdandrea@research.att.com>

View File

@ -173,7 +173,3 @@ $ sudo pip uninstall valet-openstack
``` ```
Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed. Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed.
## Contact
Joe D'Andrea <jdandrea@research.att.com>

View File

@ -94,8 +94,9 @@ ostro = {
messaging = { messaging = {
'config': { 'config': {
'transport_url': 'rabbit://' + CONF.messaging.username + ':' + CONF.messaging.password + 'transport_url': 'rabbit://' + CONF.messaging.username + ':' +
'@' + CONF.messaging.host + ':' + str(CONF.messaging.port) + '/' CONF.messaging.password + '@' + CONF.messaging.host + ':' +
str(CONF.messaging.port) + '/'
} }
} }

View File

@ -13,21 +13,27 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Notification Listener."""
import json import json
from oslo_config import cfg from oslo_config import cfg
import oslo_messaging import oslo_messaging
class NotificationEndpoint(object): class NotificationEndpoint(object):
"""Endponit for a notifcation (info, warn, error)."""
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""Print notifaction was received and dumb json data to print."""
print('recv notification:') print('recv notification:')
print(json.dumps(payload, indent=4)) print(json.dumps(payload, indent=4))
def warn(self, ctxt, publisher_id, event_type, payload, metadata): def warn(self, ctxt, publisher_id, event_type, payload, metadata):
"""Warn."""
None None
def error(self, ctxt, publisher_id, event_type, payload, metadata): def error(self, ctxt, publisher_id, event_type, payload, metadata):
"""Error."""
None None
transport = oslo_messaging.get_transport(cfg.CONF) transport = oslo_messaging.get_transport(cfg.CONF)

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Setup''' """ Setup """
import setuptools import setuptools

View File

@ -63,4 +63,3 @@ show-source = True
ignore = E123,E125,E501,H401,H105,H301 ignore = E123,E125,E501,H401,H105,H301
builtins = _ builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Application''' """Application."""
from pecan.deploy import deploy from pecan.deploy import deploy
from pecan import make_app from pecan import make_app
@ -23,7 +23,7 @@ from valet.api.db import models
def setup_app(config): def setup_app(config):
""" App Setup """ """App Setup."""
identity.init_identity() identity.init_identity()
messaging.init_messaging() messaging.init_messaging()
models.init_model() models.init_model()
@ -36,6 +36,7 @@ def setup_app(config):
# entry point for apache2 # entry point for apache2
def load_app(config_file): def load_app(config_file):
"""App Load."""
register_conf() register_conf()
set_domain(project='valet') set_domain(project='valet')
return deploy(config_file) return deploy(config_file)

View File

@ -1,19 +1,25 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain # Copyright 2014-2017 AT&T Intellectual Property
# a copy of the License at #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS,
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# License for the specific language governing permissions and limitations # See the License for the specific language governing permissions and
# under the License. # limitations under the License.
"""Terminate thread."""
import ctypes import ctypes
def terminate_thread(thread): def terminate_thread(thread):
"""Terminates a python thread from another thread. """Terminate a python thread from another thread.
:param thread: a threading.Thread instance :param thread: a threading.Thread instance
""" """
@ -26,8 +32,8 @@ def terminate_thread(thread):
if res == 0: if res == 0:
raise ValueError("nonexistent thread id") raise ValueError("nonexistent thread id")
elif res > 1: elif res > 1:
# """if it returns a number greater than one, you're in trouble, # If it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect""" # and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed") raise SystemError("PyThreadState_SetAsyncExc failed")
print('valet watcher thread exits') print('valet watcher thread exits')

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Compute helper library''' """Compute helper library."""
from novaclient import client from novaclient import client
from pecan import conf from pecan import conf
@ -23,7 +23,7 @@ VERSION = 2
def nova_client(): def nova_client():
'''Returns a nova client''' """Return a nova client."""
sess = conf.identity.engine.session sess = conf.identity.engine.session
nova = client.Client(VERSION, session=sess) nova = client.Client(VERSION, session=sess)
return nova return nova

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Hooks''' """Hooks."""
import json import json
import logging import logging
@ -31,8 +31,10 @@ LOG = logging.getLogger(__name__)
class MessageNotificationHook(PecanHook): class MessageNotificationHook(PecanHook):
'''Send API request/responses out as Oslo msg notifications.''' """Send API request/responses out as Oslo msg notifications."""
def after(self, state): def after(self, state):
"""Function sends valet notification."""
self.dummy = True self.dummy = True
LOG.info('sending notification') LOG.info('sending notification')
notifier = conf.messaging.notifier notifier = conf.messaging.notifier
@ -44,7 +46,8 @@ class MessageNotificationHook(PecanHook):
else: else:
notifier_fn = notifier.error notifier_fn = notifier.error
ctxt = {} # Not using this just yet. # Not using this just yet.
ctxt = {}
request_path = state.request.path request_path = state.request.path
@ -86,7 +89,8 @@ class MessageNotificationHook(PecanHook):
} }
} }
# notifier_fn blocks in case rabbit mq is down - it prevents Valet API to return its response :( # notifier_fn blocks in case rabbit mq is down
# it prevents Valet API to return its response
# send the notification in a different thread # send the notification in a different thread
notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload)) notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload))
notifier_thread.start() notifier_thread.start()
@ -99,10 +103,11 @@ class MessageNotificationHook(PecanHook):
class NotFoundHook(PecanHook): class NotFoundHook(PecanHook):
'''Catchall 'not found' hook for API''' """Catchall 'not found' hook for API."""
def on_error(self, state, exc): def on_error(self, state, exc):
"""Redirect to app-specific not_found endpoint if 404 only."""
self.dummy = True self.dummy = True
'''Redirects to app-specific not_found endpoint if 404 only'''
if isinstance(exc, webob.exc.WSGIHTTPException) and exc.code == 404: if isinstance(exc, webob.exc.WSGIHTTPException) and exc.code == 404:
message = _('The resource could not be found.') message = _('The resource could not be found.')
error('/errors/not_found', message) error('/errors/not_found', message)

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""i18n library""" """i18n library."""
import gettext import gettext

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Identity helper library''' """Identity helper library."""
from datetime import datetime from datetime import datetime
@ -30,12 +30,13 @@ LOG = logging.getLogger(__name__)
def utcnow(): def utcnow():
'''Returns the time (UTC)''' """Return the time (UTC)."""
return datetime.now(tz=pytz.utc) return datetime.now(tz=pytz.utc)
class Identity(object): class Identity(object):
'''Convenience library for all identity service-related queries.''' """Convenience library for all identity service-related queries."""
_args = None _args = None
_client = None _client = None
_interface = None _interface = None
@ -43,7 +44,7 @@ class Identity(object):
@classmethod @classmethod
def is_token_admin(cls, token): def is_token_admin(cls, token):
'''Returns true if decoded token has an admin role''' """Return true if decoded token has an admin role."""
for role in token.user.get('roles', []): for role in token.user.get('roles', []):
if role.get('name') == 'admin': if role.get('name') == 'admin':
return True return True
@ -51,16 +52,16 @@ class Identity(object):
@classmethod @classmethod
def tenant_from_token(cls, token): def tenant_from_token(cls, token):
'''Returns tenant id from decoded token''' """Return tenant id from decoded token."""
return token.tenant.get('id', None) return token.tenant.get('id', None)
@classmethod @classmethod
def user_from_token(cls, token): def user_from_token(cls, token):
'''Returns user id from decoded token''' """Return user id from decoded token."""
return token.user.get('id', None) return token.user.get('id', None)
def __init__(self, interface='admin', **kwargs): def __init__(self, interface='admin', **kwargs):
'''Initializer.''' """Initializer."""
self._interface = interface self._interface = interface
self._args = kwargs self._args = kwargs
self._client = None self._client = None
@ -68,7 +69,7 @@ class Identity(object):
@property @property
def _client_expired(self): def _client_expired(self):
'''Returns True if cached client's token is expired.''' """Return True if cached client's token is expired."""
# NOTE: Keystone may auto-regen the client now (v2? v3?) # NOTE: Keystone may auto-regen the client now (v2? v3?)
# If so, this trip may no longer be necessary. Doesn't # If so, this trip may no longer be necessary. Doesn't
# hurt to keep it around for the time being. # hurt to keep it around for the time being.
@ -84,7 +85,7 @@ class Identity(object):
@property @property
def client(self): def client(self):
'''Returns an identity client.''' """Return an identity client."""
if not self._client or self._client_expired: if not self._client or self._client_expired:
auth = v2.Password(**self._args) auth = v2.Password(**self._args)
self._session = session.Session(auth=auth) self._session = session.Session(auth=auth)
@ -94,11 +95,11 @@ class Identity(object):
@property @property
def session(self): def session(self):
'''Read-only access to the session.''' """Read-only access to the session."""
return self._session return self._session
def validate_token(self, auth_token): def validate_token(self, auth_token):
'''Returns validated token or None if invalid''' """Return validated token or None if invalid."""
kwargs = { kwargs = {
'token': auth_token, 'token': auth_token,
} }
@ -110,7 +111,7 @@ class Identity(object):
return None return None
def is_tenant_list_valid(self, tenant_list): def is_tenant_list_valid(self, tenant_list):
'''Returns true if tenant list contains valid tenant IDs''' """Return true if tenant list contains valid tenant IDs."""
tenants = self.client.tenants.list() tenants = self.client.tenants.list()
if isinstance(tenant_list, list): if isinstance(tenant_list, list):
found = False found = False
@ -123,6 +124,7 @@ class Identity(object):
def is_tenant_in_tenants(tenant_id, tenants): def is_tenant_in_tenants(tenant_id, tenants):
"""Return true if tenant exists."""
for tenant in tenants: for tenant in tenants:
if tenant_id == tenant.id: if tenant_id == tenant.id:
return True return True
@ -130,7 +132,7 @@ def is_tenant_in_tenants(tenant_id, tenants):
def _identity_engine_from_config(config): def _identity_engine_from_config(config):
'''Initialize the identity engine based on supplied config.''' """Initialize the identity engine based on supplied config."""
# Using tenant_name instead of project name due to keystone v2 # Using tenant_name instead of project name due to keystone v2
kwargs = { kwargs = {
'username': config.get('username'), 'username': config.get('username'),
@ -144,7 +146,7 @@ def _identity_engine_from_config(config):
def init_identity(): def init_identity():
'''Initialize the identity engine and place in the config.''' """Initialize the identity engine and place in the config."""
config = conf.identity.config config = conf.identity.config
engine = _identity_engine_from_config(config) engine = _identity_engine_from_config(config)
conf.identity.engine = engine conf.identity.engine = engine

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Messaging helper library''' """Messaging helper library."""
from oslo_config import cfg from oslo_config import cfg
import oslo_messaging as messaging import oslo_messaging as messaging
@ -22,7 +22,7 @@ from valet.api.conf import set_domain, DOMAIN
def _messaging_notifier_from_config(config): def _messaging_notifier_from_config(config):
'''Initialize the messaging engine based on supplied config.''' """Initialize the messaging engine based on supplied config."""
transport_url = config.get('transport_url') transport_url = config.get('transport_url')
transport = messaging.get_transport(cfg.CONF, transport_url) transport = messaging.get_transport(cfg.CONF, transport_url)
notifier = messaging.Notifier(transport, driver='messaging', notifier = messaging.Notifier(transport, driver='messaging',
@ -32,7 +32,7 @@ def _messaging_notifier_from_config(config):
def init_messaging(): def init_messaging():
'''Initialize the messaging engine and place in the config.''' """Initialize the messaging engine and place in the config."""
set_domain(DOMAIN) set_domain(DOMAIN)
config = conf.messaging.config config = conf.messaging.config
notifier = _messaging_notifier_from_config(config) notifier = _messaging_notifier_from_config(config)

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Ostro helper library''' """Ostro helper library."""
import json import json
import logging import logging
@ -42,13 +42,13 @@ EXCLUSIVITY = 'exclusivity'
def _log(text, title="Ostro"): def _log(text, title="Ostro"):
'''Log helper''' """Log helper."""
log_text = "%s: %s" % (title, text) log_text = "%s: %s" % (title, text)
LOG.debug(log_text) LOG.debug(log_text)
class Ostro(object): class Ostro(object):
'''Ostro optimization engine helper class.''' """Ostro optimization engine helper class."""
args = None args = None
request = None request = None
@ -56,12 +56,15 @@ class Ostro(object):
error_uri = None error_uri = None
tenant_id = None tenant_id = None
tries = None # Number of times to poll for placement. # Number of times to poll for placement.
interval = None # Interval in seconds to poll for placement. tries = None
# Interval in seconds to poll for placement.
interval = None
@classmethod @classmethod
def _build_error(cls, message): def _build_error(cls, message):
'''Build an Ostro-style error message''' """Build an Ostro-style error message."""
if not message: if not message:
message = _("Unknown error") message = _("Unknown error")
error = { error = {
@ -74,7 +77,7 @@ class Ostro(object):
@classmethod @classmethod
def _build_uuid_map(cls, resources): def _build_uuid_map(cls, resources):
'''Build a dict mapping names to UUIDs.''' """Build a dict mapping names to UUIDs."""
mapping = {} mapping = {}
for key in resources.iterkeys(): for key in resources.iterkeys():
if 'name' in resources[key]: if 'name' in resources[key]:
@ -84,7 +87,7 @@ class Ostro(object):
@classmethod @classmethod
def _sanitize_resources(cls, resources): def _sanitize_resources(cls, resources):
'''Ensure lowercase keys at the top level of each resource.''' """Ensure lowercase keys at the top level of each resource."""
for res in resources.itervalues(): for res in resources.itervalues():
for key in list(res.keys()): for key in list(res.keys()):
if not key.islower(): if not key.islower():
@ -92,12 +95,12 @@ class Ostro(object):
return resources return resources
def __init__(self): def __init__(self):
'''Initializer''' """Initializer."""
self.tries = conf.music.get('tries', 10) self.tries = conf.music.get('tries', 10)
self.interval = conf.music.get('interval', 1) self.interval = conf.music.get('interval', 1)
def _map_names_to_uuids(self, mapping, data): def _map_names_to_uuids(self, mapping, data):
'''Map resource names to their UUID equivalents.''' """Map resource names to their UUID equivalents."""
if isinstance(data, dict): if isinstance(data, dict):
for key in data.iterkeys(): for key in data.iterkeys():
if key != 'name': if key != 'name':
@ -110,11 +113,11 @@ class Ostro(object):
return data return data
def _prepare_resources(self, resources): def _prepare_resources(self, resources):
''' Pre-digests resource data for use by Ostro. """Pre-digest resource data for use by Ostro.
Maps Heat resource names to Orchestration UUIDs. Maps Heat resource names to Orchestration UUIDs.
Ensures exclusivity groups exist and have tenant_id as a member. Ensures exclusivity groups exist and have tenant_id as a member.
''' """
mapping = self._build_uuid_map(resources) mapping = self._build_uuid_map(resources)
ostro_resources = self._map_names_to_uuids(mapping, resources) ostro_resources = self._map_names_to_uuids(mapping, resources)
self._sanitize_resources(ostro_resources) self._sanitize_resources(ostro_resources)
@ -126,8 +129,7 @@ class Ostro(object):
# TODO(JD): This really belongs in valet-engine once it exists. # TODO(JD): This really belongs in valet-engine once it exists.
def _send(self, stack_id, request): def _send(self, stack_id, request):
'''Send request.''' """Send request."""
# Creating the placement request effectively enqueues it. # Creating the placement request effectively enqueues it.
PlacementRequest(stack_id=stack_id, request=request) # pylint: disable=W0612 PlacementRequest(stack_id=stack_id, request=request) # pylint: disable=W0612
@ -149,13 +151,13 @@ class Ostro(object):
return json.dumps(response) return json.dumps(response)
def _verify_groups(self, resources, tenant_id): def _verify_groups(self, resources, tenant_id):
''' Verifies group settings. Returns an error status dict if the """Verify group settings.
group type is invalid, if a group name is used when the type Returns an error status dict if the group type is invalid, if a
is affinity or diversity, if a nonexistant exclusivity group group name is used when the type is affinity or diversity, if a
is found, or if the tenant is not a group member. nonexistant exclusivity group is found, or if the tenant
Returns None if ok. is not a group member. Returns None if ok.
''' """
message = None message = None
for res in resources.itervalues(): for res in resources.itervalues():
res_type = res.get('type') res_type = res.get('type')
@ -167,13 +169,17 @@ class Ostro(object):
group_type == DIVERSITY: group_type == DIVERSITY:
if group_name: if group_name:
self.error_uri = '/errors/conflict' self.error_uri = '/errors/conflict'
message = _("%s must not be used when {0} is '{1}'. ").format(GROUP_NAME, GROUP_TYPE, group_type) message = _("%s must not be used when"
" {0} is '{1}'.").format(GROUP_NAME,
GROUP_TYPE,
group_type)
break break
elif group_type == EXCLUSIVITY: elif group_type == EXCLUSIVITY:
message = self._verify_exclusivity(group_name, tenant_id) message = self._verify_exclusivity(group_name, tenant_id)
else: else:
self.error_uri = '/errors/invalid' self.error_uri = '/errors/invalid'
message = _("{0} '{1}' is invalid.").format(GROUP_TYPE, group_type) message = _("{0} '{1}' is invalid.").format(GROUP_TYPE,
group_type)
break break
if message: if message:
return self._build_error(message) return self._build_error(message)
@ -182,7 +188,9 @@ class Ostro(object):
return_message = None return_message = None
if not group_name: if not group_name:
self.error_uri = '/errors/invalid' self.error_uri = '/errors/invalid'
return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME, GROUP_TYPE, EXCLUSIVITY) return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME,
GROUP_TYPE,
EXCLUSIVITY)
group = Group.query.filter_by( # pylint: disable=E1101 group = Group.query.filter_by( # pylint: disable=E1101
name=group_name).first() name=group_name).first()
@ -191,15 +199,19 @@ class Ostro(object):
return_message = "%s '%s' not found" % (GROUP_NAME, group_name) return_message = "%s '%s' not found" % (GROUP_NAME, group_name)
elif group and tenant_id not in group.members: elif group and tenant_id not in group.members:
self.error_uri = '/errors/conflict' self.error_uri = '/errors/conflict'
return_message = _("Tenant ID %s not a member of {0} '{1}' ({2})").format(self.tenant_id, GROUP_NAME, group.name, group.id) return_message = _("Tenant ID %s not a member of "
"{0} '{1}' ({2})").format(self.tenant_id,
GROUP_NAME,
group.name,
group.id)
return return_message return return_message
def build_request(self, **kwargs): def build_request(self, **kwargs):
''' Build an Ostro request. If False is returned, """Build an Ostro request.
the response attribute contains status as to the error.
'''
If False is returned then the response attribute contains
status as to the error.
"""
# TODO(JD): Refactor this into create and update methods? # TODO(JD): Refactor this into create and update methods?
self.args = kwargs.get('args') self.args = kwargs.get('args')
self.tenant_id = kwargs.get('tenant_id') self.tenant_id = kwargs.get('tenant_id')
@ -235,7 +247,7 @@ class Ostro(object):
return True return True
def is_request_serviceable(self): def is_request_serviceable(self):
''' Returns true if the request has at least one serviceable resource. ''' """Return true if request has at least one serviceable resource."""
# TODO(JD): Ostro should return no placements vs throw an error. # TODO(JD): Ostro should return no placements vs throw an error.
resources = self.request.get('resources', {}) resources = self.request.get('resources', {})
for res in resources.itervalues(): for res in resources.itervalues():
@ -245,7 +257,7 @@ class Ostro(object):
return False return False
def ping(self): def ping(self):
'''Send a ping request and obtain a response.''' """Send a ping request and obtain a response."""
stack_id = str(uuid.uuid4()) stack_id = str(uuid.uuid4())
self.args = {'stack_id': stack_id} self.args = {'stack_id': stack_id}
self.response = None self.response = None
@ -256,7 +268,7 @@ class Ostro(object):
} }
def replan(self, **kwargs): def replan(self, **kwargs):
'''Replan a placement.''' """Replan a placement."""
self.args = kwargs.get('args') self.args = kwargs.get('args')
self.response = None self.response = None
self.error_uri = None self.error_uri = None
@ -269,7 +281,7 @@ class Ostro(object):
} }
def migrate(self, **kwargs): def migrate(self, **kwargs):
'''Replan the placement for an existing resource.''' """Replan the placement for an existing resource."""
self.args = kwargs.get('args') self.args = kwargs.get('args')
self.response = None self.response = None
self.error_uri = None self.error_uri = None
@ -281,7 +293,7 @@ class Ostro(object):
} }
def query(self, **kwargs): def query(self, **kwargs):
'''Send a query.''' """Send a query."""
stack_id = str(uuid.uuid4()) stack_id = str(uuid.uuid4())
self.args = kwargs.get('args') self.args = kwargs.get('args')
self.args['stack_id'] = stack_id self.args['stack_id'] = stack_id
@ -295,7 +307,7 @@ class Ostro(object):
} }
def send(self): def send(self):
'''Send the request and obtain a response.''' """Send the request and obtain a response."""
request_json = json.dumps([self.request]) request_json = json.dumps([self.request])
# TODO(JD): Pass timeout value? # TODO(JD): Pass timeout value?

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Conf."""
from oslo_config import cfg from oslo_config import cfg
@ -70,10 +72,12 @@ music_opts = [
def set_domain(project=DOMAIN): def set_domain(project=DOMAIN):
"""Set Domain."""
CONF([], project) CONF([], project)
def register_conf(): def register_conf():
"""Register confs."""
CONF.register_group(server_group) CONF.register_group(server_group)
CONF.register_opts(server_opts, server_group) CONF.register_opts(server_opts, server_group)
CONF.register_group(music_group) CONF.register_group(music_group)

View File

@ -1,6 +1,5 @@
# -*- encoding: utf-8 -*-
# #
# Copyright (c) 2014-2016 AT&T # Copyright 2014-2017 AT&T Intellectual Property
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -10,9 +9,7 @@
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# implied.
#
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.

View File

@ -1,6 +1,5 @@
# -*- encoding: utf-8 -*-
# #
# Copyright (c) 2014-2016 AT&T # Copyright 2014-2017 AT&T Intellectual Property
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -10,13 +9,11 @@
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# implied.
#
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Music ORM - Common Methods''' """Music ORM - Common Methods"""
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
import inspect import inspect
@ -28,7 +25,7 @@ from valet.api.db.models.music.music import Music
def get_class(kls): def get_class(kls):
'''Returns a class given a fully qualified class name''' """Returns a class given a fully qualified class name"""
parts = kls.split('.') parts = kls.split('.')
module = ".".join(parts[:-1]) module = ".".join(parts[:-1])
mod = __import__(module) mod = __import__(module)
@ -38,7 +35,7 @@ def get_class(kls):
class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903 class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
'''Abstract Class Method from Python 3.3's abc module''' """Abstract Class Method from Python 3.3's abc module"""
__isabstractmethod__ = True __isabstractmethod__ = True
@ -48,28 +45,28 @@ class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
class ClassPropertyDescriptor(object): # pylint: disable=R0903 class ClassPropertyDescriptor(object): # pylint: disable=R0903
'''Supports the notion of a class property''' """Supports the notion of a class property"""
def __init__(self, fget, fset=None): def __init__(self, fget, fset=None):
'''Initializer''' """Initializer"""
self.fget = fget self.fget = fget
self.fset = fset self.fset = fset
def __get__(self, obj, klass=None): def __get__(self, obj, klass=None):
'''Get attribute''' """Get attribute"""
if klass is None: if klass is None:
klass = type(obj) klass = type(obj)
return self.fget.__get__(obj, klass)() return self.fget.__get__(obj, klass)()
def __set__(self, obj, value): def __set__(self, obj, value):
'''Set attribute''' """Set attribute"""
if not self.fset: if not self.fset:
raise AttributeError(_("Can't set attribute")) raise AttributeError(_("Can't set attribute"))
type_ = type(obj) type_ = type(obj)
return self.fset.__get__(obj, type_)(value) return self.fset.__get__(obj, type_)(value)
def setter(self, func): def setter(self, func):
'''Setter''' """Setter"""
if not isinstance(func, (classmethod, staticmethod)): if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func) func = classmethod(func)
self.fset = func self.fset = func
@ -77,7 +74,7 @@ class ClassPropertyDescriptor(object): # pylint: disable=R0903
def classproperty(func): def classproperty(func):
'''Class Property decorator''' """Class Property decorator"""
if not isinstance(func, (classmethod, staticmethod)): if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func) func = classmethod(func)
@ -85,36 +82,36 @@ def classproperty(func):
class Results(list): class Results(list):
'''Query results''' """Query results"""
def __init__(self, *args, **kwargs): # pylint: disable=W0613 def __init__(self, *args, **kwargs): # pylint: disable=W0613
'''Initializer''' """Initializer"""
super(Results, self).__init__(args[0]) super(Results, self).__init__(args[0])
def all(self): def all(self):
'''Return all''' """Return all"""
return self return self
def first(self): def first(self):
'''Return first''' """Return first"""
if len(self) > 0: if len(self) > 0:
return self[0] return self[0]
@six.add_metaclass(ABCMeta) @six.add_metaclass(ABCMeta)
class Base(object): class Base(object):
''' A custom declarative base that provides some Elixir-inspired shortcuts. ''' """ A custom declarative base that provides some Elixir-inspired shortcuts. """
__tablename__ = None __tablename__ = None
@classproperty @classproperty
def query(cls): # pylint: disable=E0213 def query(cls): # pylint: disable=E0213
'''Return a query object a la sqlalchemy''' """Return a query object a la sqlalchemy"""
return Query(cls) return Query(cls)
@classmethod @classmethod
def __kwargs(cls): def __kwargs(cls):
'''Return common keyword args''' """Return common keyword args"""
keyspace = conf.music.get('keyspace') keyspace = conf.music.get('keyspace')
kwargs = { kwargs = {
'keyspace': keyspace, 'keyspace': keyspace,
@ -124,33 +121,33 @@ class Base(object):
@classmethod @classmethod
def create_table(cls): def create_table(cls):
'''Create table''' """Create table"""
kwargs = cls.__kwargs() kwargs = cls.__kwargs()
kwargs['schema'] = cls.schema() kwargs['schema'] = cls.schema()
conf.music.engine.create_table(**kwargs) conf.music.engine.create_table(**kwargs)
@abstractclassmethod @abstractclassmethod
def schema(cls): def schema(cls):
'''Return schema''' """Return schema"""
return cls() return cls()
@abstractclassmethod @abstractclassmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name"""
return cls() return cls()
@abstractmethod @abstractmethod
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value"""
pass pass
@abstractmethod @abstractmethod
def values(self): def values(self):
'''Values''' """Values"""
pass pass
def insert(self): def insert(self):
'''Insert row''' """Insert row"""
kwargs = self.__kwargs() kwargs = self.__kwargs()
kwargs['values'] = self.values() kwargs['values'] = self.values()
pk_name = self.pk_name() pk_name = self.pk_name()
@ -161,7 +158,7 @@ class Base(object):
conf.music.engine.create_row(**kwargs) conf.music.engine.create_row(**kwargs)
def update(self): def update(self):
'''Update row''' """Update row"""
kwargs = self.__kwargs() kwargs = self.__kwargs()
kwargs['pk_name'] = self.pk_name() kwargs['pk_name'] = self.pk_name()
kwargs['pk_value'] = self.pk_value() kwargs['pk_value'] = self.pk_value()
@ -169,7 +166,7 @@ class Base(object):
conf.music.engine.update_row_eventually(**kwargs) conf.music.engine.update_row_eventually(**kwargs)
def delete(self): def delete(self):
'''Delete row''' """Delete row"""
kwargs = self.__kwargs() kwargs = self.__kwargs()
kwargs['pk_name'] = self.pk_name() kwargs['pk_name'] = self.pk_name()
kwargs['pk_value'] = self.pk_value() kwargs['pk_value'] = self.pk_value()
@ -177,26 +174,26 @@ class Base(object):
@classmethod @classmethod
def filter_by(cls, **kwargs): def filter_by(cls, **kwargs):
'''Filter objects''' """Filter objects"""
return cls.query.filter_by(**kwargs) # pylint: disable=E1101 return cls.query.filter_by(**kwargs) # pylint: disable=E1101
def flush(self, *args, **kwargs): def flush(self, *args, **kwargs):
'''Flush changes to storage''' """Flush changes to storage"""
# TODO(JD): Implement in music? May be a no-op # TODO(JD): Implement in music? May be a no-op
pass pass
def as_dict(self): def as_dict(self):
'''Return object representation as a dictionary''' """Return object representation as a dictionary"""
return dict((k, v) for k, v in self.__dict__.items() return dict((k, v) for k, v in self.__dict__.items()
if not k.startswith('_')) if not k.startswith('_'))
class Query(object): class Query(object):
'''Data Query''' """Data Query"""
model = None model = None
def __init__(self, model): def __init__(self, model):
'''Initializer''' """Initializer"""
if inspect.isclass(model): if inspect.isclass(model):
self.model = model self.model = model
elif isinstance(model, basestring): elif isinstance(model, basestring):
@ -204,7 +201,7 @@ class Query(object):
assert inspect.isclass(self.model) assert inspect.isclass(self.model)
def __kwargs(self): def __kwargs(self):
'''Return common keyword args''' """Return common keyword args"""
keyspace = conf.music.get('keyspace') keyspace = conf.music.get('keyspace')
kwargs = { kwargs = {
'keyspace': keyspace, 'keyspace': keyspace,
@ -213,7 +210,7 @@ class Query(object):
return kwargs return kwargs
def __rows_to_objects(self, rows): def __rows_to_objects(self, rows):
'''Convert query response rows to objects''' """Convert query response rows to objects"""
results = [] results = []
pk_name = self.model.pk_name() # pylint: disable=E1101 pk_name = self.model.pk_name() # pylint: disable=E1101
for __, row in rows.iteritems(): # pylint: disable=W0612 for __, row in rows.iteritems(): # pylint: disable=W0612
@ -224,13 +221,13 @@ class Query(object):
return Results(results) return Results(results)
def all(self): def all(self):
'''Return all objects''' """Return all objects"""
kwargs = self.__kwargs() kwargs = self.__kwargs()
rows = conf.music.engine.read_all_rows(**kwargs) rows = conf.music.engine.read_all_rows(**kwargs)
return self.__rows_to_objects(rows) return self.__rows_to_objects(rows)
def filter_by(self, **kwargs): def filter_by(self, **kwargs):
'''Filter objects''' """Filter objects"""
# Music doesn't allow filtering on anything but the primary key. # Music doesn't allow filtering on anything but the primary key.
# We need to get all items and then go looking for what we want. # We need to get all items and then go looking for what we want.
all_items = self.all() all_items = self.all()
@ -250,14 +247,14 @@ class Query(object):
def init_model(): def init_model():
'''Data Store Initialization''' """Data Store Initialization"""
conf.music.engine = _engine_from_config(conf.music) conf.music.engine = _engine_from_config(conf.music)
keyspace = conf.music.get('keyspace') keyspace = conf.music.get('keyspace')
conf.music.engine.create_keyspace(keyspace) conf.music.engine.create_keyspace(keyspace)
def _engine_from_config(configuration): def _engine_from_config(configuration):
'''Create database engine object based on configuration''' """Create database engine object based on configuration"""
configuration = dict(configuration) configuration = dict(configuration)
kwargs = { kwargs = {
'host': configuration.get('host'), 'host': configuration.get('host'),
@ -268,36 +265,30 @@ def _engine_from_config(configuration):
def start(): def start():
'''Start transaction''' """Start transaction"""
pass pass
def start_read_only(): def start_read_only():
'''Start read-only transaction''' """Start read-only transaction"""
start() start()
def commit(): def commit():
'''Commit transaction''' """Commit transaction"""
pass pass
def rollback(): def rollback():
'''Rollback transaction''' """Rollback transaction"""
pass pass
def clear(): def clear():
'''Clear transaction''' """Clear transaction"""
pass pass
def flush(): def flush():
'''Flush to disk''' """Flush to disk"""
pass pass
from valet.api.db.models.music.groups import Group
from valet.api.db.models.music.ostro import PlacementRequest, PlacementResult, Event
from valet.api.db.models.music.placements import Placement
from valet.api.db.models.music.plans import Plan

View File

@ -13,14 +13,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Group Model''' """Group Model."""
from . import Base from . import Base
import simplejson import simplejson
class Group(Base): class Group(Base):
'''Group model''' """Group model."""
__tablename__ = 'groups' __tablename__ = 'groups'
id = None # pylint: disable=C0103 id = None # pylint: disable=C0103
@ -31,7 +32,7 @@ class Group(Base):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'id': 'text', 'id': 'text',
'name': 'text', 'name': 'text',
@ -44,16 +45,16 @@ class Group(Base):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name."""
return 'id' return 'id'
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value."""
return self.id return self.id
def values(self): def values(self):
'''Values''' """Values."""
# TODO(JD): Support lists in Music # TODO(UNKNOWN): Support lists in Music
# Lists aren't directly supported in Music, so we have to # Lists aren't directly supported in Music, so we have to
# convert to/from json on the way out/in. # convert to/from json on the way out/in.
return { return {
@ -64,7 +65,7 @@ class Group(Base):
} }
def __init__(self, name, description, type, members, _insert=True): def __init__(self, name, description, type, members, _insert=True):
'''Initializer''' """Initializer."""
super(Group, self).__init__() super(Group, self).__init__()
self.name = name self.name = name
self.description = description or "" self.description = description or ""
@ -73,15 +74,15 @@ class Group(Base):
self.members = [] # members ignored at init time self.members = [] # members ignored at init time
self.insert() self.insert()
else: else:
# TODO(JD): Support lists in Music # TODO(UNKNOWN): Support lists in Music
self.members = simplejson.loads(members) self.members = simplejson.loads(members)
def __repr__(self): def __repr__(self):
'''Object representation''' """Object representation."""
return '<Group %r>' % self.name return '<Group %r>' % self.name
def __json__(self): def __json__(self):
'''JSON representation''' """JSON representation."""
json_ = {} json_ = {}
json_['id'] = self.id json_['id'] = self.id
json_['name'] = self.name json_['name'] = self.name

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Music Data Store API''' """Music Data Store API."""
import json import json
import logging import logging
@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
class REST(object): class REST(object):
'''Helper class for REST operations.''' """Helper class for REST operations."""
hosts = None hosts = None
port = None port = None
@ -37,8 +37,7 @@ class REST(object):
_urls = None _urls = None
def __init__(self, hosts, port, path='/', timeout='10'): def __init__(self, hosts, port, path='/', timeout='10'):
'''Initializer. Accepts target host list, port, and path.''' """Initializer. Accepts target host list, port, and path."""
self.hosts = hosts # List of IP or FQDNs self.hosts = hosts # List of IP or FQDNs
self.port = port # Port Number self.port = port # Port Number
self.path = path # Path starting with / self.path = path # Path starting with /
@ -46,8 +45,7 @@ class REST(object):
@property @property
def urls(self): def urls(self):
'''Returns list of URLs using each host, plus the port/path.''' """Return list of URLs using each host, plus the port/path."""
if not self._urls: if not self._urls:
urls = [] urls = []
for host in self.hosts: for host in self.hosts:
@ -62,17 +60,19 @@ class REST(object):
@staticmethod @staticmethod
def __headers(content_type='application/json'): def __headers(content_type='application/json'):
'''Returns HTTP request headers.''' """Return HTTP request headers."""
headers = { headers = {
'accept': content_type, 'accept': content_type,
'content-type': content_type, 'content-type': content_type,
} }
return headers return headers
def request(self, method='get', content_type='application/json', path='/', data=None): def request(self, method='get', content_type='application/json', path='/',
''' Performs HTTP request ''' data=None):
"""Perform HTTP request."""
if method not in ('post', 'get', 'put', 'delete'): if method not in ('post', 'get', 'put', 'delete'):
raise KeyError(_("Method must be one of post, get, put, or delete.")) raise KeyError(_("Method must be one of post, get, put, "
"or delete."))
method_fn = getattr(requests, method) method_fn = getattr(requests, method)
response = None response = None
@ -107,7 +107,8 @@ class REST(object):
class Music(object): class Music(object):
'''Wrapper for Music API''' """Wrapper for Music API."""
lock_names = None # Cache of lock names created during session lock_names = None # Cache of lock names created during session
lock_timeout = None # Maximum time in seconds to acquire a lock lock_timeout = None # Maximum time in seconds to acquire a lock
@ -116,8 +117,7 @@ class Music(object):
def __init__(self, host=None, hosts=None, # pylint: disable=R0913 def __init__(self, host=None, hosts=None, # pylint: disable=R0913
port='8080', lock_timeout=10, replication_factor=3): port='8080', lock_timeout=10, replication_factor=3):
'''Initializer. Accepts a lock_timeout for atomic operations.''' """Initializer. Accept a lock_timeout for atomic operations."""
# If one host is provided, that overrides the list # If one host is provided, that overrides the list
if not hosts: if not hosts:
hosts = ['localhost'] hosts = ['localhost']
@ -137,7 +137,7 @@ class Music(object):
self.replication_factor = replication_factor self.replication_factor = replication_factor
def create_keyspace(self, keyspace): def create_keyspace(self, keyspace):
'''Creates a keyspace.''' """Create a keyspace."""
data = { data = {
'replicationInfo': { 'replicationInfo': {
'class': 'SimpleStrategy', 'class': 'SimpleStrategy',
@ -154,7 +154,7 @@ class Music(object):
return response.ok return response.ok
def create_table(self, keyspace, table, schema): def create_table(self, keyspace, table, schema):
'''Creates a table.''' """Create a table."""
data = { data = {
'fields': schema, 'fields': schema,
'consistencyInfo': { 'consistencyInfo': {
@ -171,14 +171,14 @@ class Music(object):
return response.ok return response.ok
def version(self): def version(self):
'''Returns version string.''' """Return version string."""
path = '/version' path = '/version'
response = self.rest.request(method='get', response = self.rest.request(method='get',
content_type='text/plain', path=path) content_type='text/plain', path=path)
return response.text return response.text
def create_row(self, keyspace, table, values): def create_row(self, keyspace, table, values):
'''Create a row.''' """Create a row."""
data = { data = {
'values': values, 'values': values,
'consistencyInfo': { 'consistencyInfo': {
@ -194,14 +194,14 @@ class Music(object):
return response.ok return response.ok
def create_lock(self, lock_name): def create_lock(self, lock_name):
'''Returns the lock id. Use for acquiring and releasing.''' """Return the lock id. Use for acquiring and releasing."""
path = '/locks/create/%s' % lock_name path = '/locks/create/%s' % lock_name
response = self.rest.request(method='post', response = self.rest.request(method='post',
content_type='text/plain', path=path) content_type='text/plain', path=path)
return response.text return response.text
def acquire_lock(self, lock_id): def acquire_lock(self, lock_id):
'''Acquire a lock.''' """Acquire a lock."""
path = '/locks/acquire/%s' % lock_id path = '/locks/acquire/%s' % lock_id
response = self.rest.request(method='get', response = self.rest.request(method='get',
content_type='text/plain', path=path) content_type='text/plain', path=path)
@ -209,7 +209,7 @@ class Music(object):
return response.text.lower() == 'true' return response.text.lower() == 'true'
def release_lock(self, lock_id): def release_lock(self, lock_id):
'''Release a lock.''' """Release a lock."""
path = '/locks/release/%s' % lock_id path = '/locks/release/%s' % lock_id
response = self.rest.request(method='delete', response = self.rest.request(method='delete',
content_type='text/plain', path=path) content_type='text/plain', path=path)
@ -217,7 +217,7 @@ class Music(object):
@staticmethod @staticmethod
def __row_url_path(keyspace, table, pk_name, pk_value): def __row_url_path(keyspace, table, pk_name, pk_value):
'''Returns a Music-compliant row URL path.''' """Return a Music-compliant row URL path."""
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
'keyspace': keyspace, 'keyspace': keyspace,
'table': table, 'table': table,
@ -229,7 +229,7 @@ class Music(object):
def update_row_eventually(self, keyspace, table, # pylint: disable=R0913 def update_row_eventually(self, keyspace, table, # pylint: disable=R0913
pk_name, pk_value, values): pk_name, pk_value, values):
'''Update a row. Not atomic.''' """Update a row. Not atomic."""
data = { data = {
'values': values, 'values': values,
'consistencyInfo': { 'consistencyInfo': {
@ -243,8 +243,7 @@ class Music(object):
def update_row_atomically(self, keyspace, table, # pylint: disable=R0913 def update_row_atomically(self, keyspace, table, # pylint: disable=R0913
pk_name, pk_value, values): pk_name, pk_value, values):
'''Update a row atomically.''' """Update a row atomically."""
# Create lock for the candidate. The Music API dictates that the # Create lock for the candidate. The Music API dictates that the
# lock name must be of the form keyspace.table.primary_key # lock name must be of the form keyspace.table.primary_key
lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % { lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % {
@ -279,7 +278,7 @@ class Music(object):
return response.ok return response.ok
def delete_row_eventually(self, keyspace, table, pk_name, pk_value): def delete_row_eventually(self, keyspace, table, pk_name, pk_value):
'''Delete a row. Not atomic.''' """Delete a row. Not atomic."""
data = { data = {
'consistencyInfo': { 'consistencyInfo': {
'type': 'eventual', 'type': 'eventual',
@ -291,7 +290,7 @@ class Music(object):
return response.ok return response.ok
def read_row(self, keyspace, table, pk_name, pk_value, log=None): def read_row(self, keyspace, table, pk_name, pk_value, log=None):
'''Read one row based on a primary key name/value.''' """Read one row based on a primary key name/value."""
path = self.__row_url_path(keyspace, table, pk_name, pk_value) path = self.__row_url_path(keyspace, table, pk_name, pk_value)
response = self.rest.request(path=path) response = self.rest.request(path=path)
if log: if log:
@ -299,11 +298,11 @@ class Music(object):
return response.json() return response.json()
def read_all_rows(self, keyspace, table): def read_all_rows(self, keyspace, table):
'''Read all rows.''' """Read all rows."""
return self.read_row(keyspace, table, pk_name=None, pk_value=None) return self.read_row(keyspace, table, pk_name=None, pk_value=None)
def drop_keyspace(self, keyspace): def drop_keyspace(self, keyspace):
'''Drops a keyspace.''' """Drop a keyspace."""
data = { data = {
'consistencyInfo': { 'consistencyInfo': {
'type': 'eventual', 'type': 'eventual',
@ -315,16 +314,15 @@ class Music(object):
return response.ok return response.ok
def delete_lock(self, lock_name): def delete_lock(self, lock_name):
'''Deletes a lock by name.''' """Delete a lock by name."""
path = '/locks/delete/%s' % lock_name path = '/locks/delete/%s' % lock_name
response = self.rest.request(content_type='text/plain', response = self.rest.request(content_type='text/plain',
method='delete', path=path) method='delete', path=path)
return response.ok return response.ok
def delete_all_locks(self): def delete_all_locks(self):
'''Delete all locks created during the lifetime of this object.''' """Delete all locks created during the lifetime of this object."""
# TODO(UNKNOWN): Shouldn't this really be part of internal cleanup?
# TODO(JD): Shouldn't this really be part of internal cleanup?
# FIXME: It can be several API calls. Any way to do in one fell swoop? # FIXME: It can be several API calls. Any way to do in one fell swoop?
for lock_name in self.lock_names: for lock_name in self.lock_names:
self.delete_lock(lock_name) self.delete_lock(lock_name)

View File

@ -13,13 +13,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Ostro Models''' """Ostro Models."""
from . import Base from . import Base
class PlacementRequest(Base): class PlacementRequest(Base):
'''Placement Request Model''' """Placement Request Model."""
__tablename__ = 'placement_requests' __tablename__ = 'placement_requests'
stack_id = None stack_id = None
@ -27,7 +28,7 @@ class PlacementRequest(Base):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'stack_id': 'text', 'stack_id': 'text',
'request': 'text', 'request': 'text',
@ -37,22 +38,22 @@ class PlacementRequest(Base):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name."""
return 'stack_id' return 'stack_id'
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value."""
return self.stack_id return self.stack_id
def values(self): def values(self):
'''Values''' """Values."""
return { return {
'stack_id': self.stack_id, 'stack_id': self.stack_id,
'request': self.request, 'request': self.request,
} }
def __init__(self, request, stack_id=None, _insert=True): def __init__(self, request, stack_id=None, _insert=True):
'''Initializer''' """Initializer."""
super(PlacementRequest, self).__init__() super(PlacementRequest, self).__init__()
self.stack_id = stack_id self.stack_id = stack_id
self.request = request self.request = request
@ -60,11 +61,11 @@ class PlacementRequest(Base):
self.insert() self.insert()
def __repr__(self): def __repr__(self):
'''Object representation''' """Object representation."""
return '<PlacementRequest %r>' % self.stack_id return '<PlacementRequest %r>' % self.stack_id
def __json__(self): def __json__(self):
'''JSON representation''' """JSON representation."""
json_ = {} json_ = {}
json_['stack_id'] = self.stack_id json_['stack_id'] = self.stack_id
json_['request'] = self.request json_['request'] = self.request
@ -72,7 +73,8 @@ class PlacementRequest(Base):
class PlacementResult(Base): class PlacementResult(Base):
'''Placement Result Model''' """Placement Result Model."""
__tablename__ = 'placement_results' __tablename__ = 'placement_results'
stack_id = None stack_id = None
@ -80,7 +82,7 @@ class PlacementResult(Base):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'stack_id': 'text', 'stack_id': 'text',
'placement': 'text', 'placement': 'text',
@ -90,22 +92,22 @@ class PlacementResult(Base):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name."""
return 'stack_id' return 'stack_id'
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value."""
return self.stack_id return self.stack_id
def values(self): def values(self):
'''Values''' """Values."""
return { return {
'stack_id': self.stack_id, 'stack_id': self.stack_id,
'placement': self.placement, 'placement': self.placement,
} }
def __init__(self, placement, stack_id=None, _insert=True): def __init__(self, placement, stack_id=None, _insert=True):
'''Initializer''' """Initializer."""
super(PlacementResult, self).__init__() super(PlacementResult, self).__init__()
self.stack_id = stack_id self.stack_id = stack_id
self.placement = placement self.placement = placement
@ -113,11 +115,11 @@ class PlacementResult(Base):
self.insert() self.insert()
def __repr__(self): def __repr__(self):
'''Object representation''' """Object representation."""
return '<PlacementResult %r>' % self.stack_id return '<PlacementResult %r>' % self.stack_id
def __json__(self): def __json__(self):
'''JSON representation''' """JSON representation."""
json_ = {} json_ = {}
json_['stack_id'] = self.stack_id json_['stack_id'] = self.stack_id
json_['placement'] = self.placement json_['placement'] = self.placement
@ -125,7 +127,8 @@ class PlacementResult(Base):
class Event(Base): class Event(Base):
'''Event Model''' """Event Model."""
__tablename__ = 'events' __tablename__ = 'events'
event_id = None event_id = None
@ -133,7 +136,7 @@ class Event(Base):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'event_id': 'text', 'event_id': 'text',
'event': 'text', 'event': 'text',
@ -143,22 +146,22 @@ class Event(Base):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name."""
return 'event_id' return 'event_id'
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value."""
return self.event_id return self.event_id
def values(self): def values(self):
'''Values''' """Values."""
return { return {
'event_id': self.event_id, 'event_id': self.event_id,
'event': self.event, 'event': self.event,
} }
def __init__(self, event, event_id=None, _insert=True): def __init__(self, event, event_id=None, _insert=True):
'''Initializer''' """Initializer."""
super(Event, self).__init__() super(Event, self).__init__()
self.event_id = event_id self.event_id = event_id
self.event = event self.event = event
@ -166,11 +169,11 @@ class Event(Base):
self.insert() self.insert()
def __repr__(self): def __repr__(self):
'''Object representation''' """Object representation."""
return '<Event %r>' % self.event_id return '<Event %r>' % self.event_id
def __json__(self): def __json__(self):
'''JSON representation''' """JSON representation."""
json_ = {} json_ = {}
json_['event_id'] = self.event_id json_['event_id'] = self.event_id
json_['event'] = self.event json_['event'] = self.event

View File

@ -13,13 +13,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Placement Model''' """Placement Model."""
from . import Base, Query from . import Base, Query
class Placement(Base): class Placement(Base):
'''Placement Model''' """Placement Model."""
__tablename__ = 'placements' __tablename__ = 'placements'
id = None # pylint: disable=C0103 id = None # pylint: disable=C0103
@ -32,7 +33,7 @@ class Placement(Base):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'id': 'text', 'id': 'text',
'name': 'text', 'name': 'text',
@ -47,15 +48,15 @@ class Placement(Base):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name."""
return 'id' return 'id'
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value."""
return self.id return self.id
def values(self): def values(self):
'''Values''' """Values."""
return { return {
'name': self.name, 'name': self.name,
'orchestration_id': self.orchestration_id, 'orchestration_id': self.orchestration_id,
@ -67,7 +68,7 @@ class Placement(Base):
def __init__(self, name, orchestration_id, resource_id=None, plan=None, def __init__(self, name, orchestration_id, resource_id=None, plan=None,
plan_id=None, location=None, reserved=False, _insert=True): plan_id=None, location=None, reserved=False, _insert=True):
'''Initializer''' """Initializer."""
super(Placement, self).__init__() super(Placement, self).__init__()
self.name = name self.name = name
self.orchestration_id = orchestration_id self.orchestration_id = orchestration_id
@ -82,11 +83,11 @@ class Placement(Base):
self.insert() self.insert()
def __repr__(self): def __repr__(self):
'''Object representation''' """Object representation."""
return '<Placement %r>' % self.name return '<Placement %r>' % self.name
def __json__(self): def __json__(self):
'''JSON representation''' """JSON representation."""
json_ = {} json_ = {}
json_['id'] = self.id json_['id'] = self.id
json_['name'] = self.name json_['name'] = self.name

View File

@ -13,13 +13,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Plan Model''' """Plan Model."""
from . import Base, Query from . import Base, Query
class Plan(Base): class Plan(Base):
'''Plan model''' """Plan model."""
__tablename__ = 'plans' __tablename__ = 'plans'
id = None # pylint: disable=C0103 id = None # pylint: disable=C0103
@ -28,7 +29,7 @@ class Plan(Base):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'id': 'text', 'id': 'text',
'name': 'text', 'name': 'text',
@ -39,22 +40,22 @@ class Plan(Base):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
'''Primary key name''' """Primary key name."""
return 'id' return 'id'
def pk_value(self): def pk_value(self):
'''Primary key value''' """Primary key value."""
return self.id return self.id
def values(self): def values(self):
'''Values''' """Values."""
return { return {
'name': self.name, 'name': self.name,
'stack_id': self.stack_id, 'stack_id': self.stack_id,
} }
def __init__(self, name, stack_id, _insert=True): def __init__(self, name, stack_id, _insert=True):
'''Initializer''' """Initializer."""
super(Plan, self).__init__() super(Plan, self).__init__()
self.name = name self.name = name
self.stack_id = stack_id self.stack_id = stack_id
@ -62,9 +63,8 @@ class Plan(Base):
self.insert() self.insert()
def placements(self): def placements(self):
'''Return list of placements''' """Return list of placements."""
# TODO(UNKNOWN): Make this a property?
# TODO(JD): Make this a property?
all_results = Query("Placement").all() all_results = Query("Placement").all()
results = [] results = []
for placement in all_results: for placement in all_results:
@ -74,15 +74,15 @@ class Plan(Base):
@property @property
def orchestration_ids(self): def orchestration_ids(self):
'''Return list of orchestration IDs''' """Return list of orchestration IDs."""
return list(set([p.orchestration_id for p in self.placements()])) return list(set([p.orchestration_id for p in self.placements()]))
def __repr__(self): def __repr__(self):
'''Object representation''' """Object representation."""
return '<Plan %r>' % self.name return '<Plan %r>' % self.name
def __json__(self): def __json__(self):
'''JSON representation''' """JSON representation."""
json_ = {} json_ = {}
json_['id'] = self.id json_['id'] = self.id
json_['stack_id'] = self.stack_id json_['stack_id'] = self.stack_id

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Populate command''' """Populate command."""
from pecan.commands.base import BaseCommand from pecan.commands.base import BaseCommand
@ -29,14 +29,15 @@ from valet.api.db.models import Plan
def out(string): def out(string):
'''Output helper''' """Output helper."""
print("==> %s" % string) print("==> %s" % string)
class PopulateCommand(BaseCommand): class PopulateCommand(BaseCommand):
'''Load a pecan environment and initializate the database.''' """Load a pecan environment and initializate the database."""
def run(self, args): def run(self, args):
"""Function creates and initializes database and environment."""
super(PopulateCommand, self).run(args) super(PopulateCommand, self).run(args)
out(_("Loading environment")) out(_("Loading environment"))
register_conf() register_conf()

View File

@ -1,6 +1,5 @@
# -*- encoding: utf-8 -*-
# #
# Copyright (c) 2014-2016 AT&T # Copyright 2014-2017 AT&T Intellectual Property
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -10,13 +9,11 @@
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# implied.
#
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Controllers Package''' """Controllers Package."""
import logging import logging
from notario.decorators import instance_of from notario.decorators import instance_of
@ -36,7 +33,7 @@ LOG = logging.getLogger(__name__)
# #
def valid_group_name(value): def valid_group_name(value):
'''Validator for group name type.''' """Validator for group name type."""
if not value or not set(value) <= set(string.letters + string.digits + "-._~"): if not value or not set(value) <= set(string.letters + string.digits + "-._~"):
LOG.error("group name is not valid") LOG.error("group name is not valid")
LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \ LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \
@ -45,12 +42,12 @@ def valid_group_name(value):
@instance_of((list, dict)) @instance_of((list, dict))
def valid_plan_resources(value): def valid_plan_resources(value):
'''Validator for plan resources.''' """Validator for plan resources."""
ensure(len(value) > 0) ensure(len(value) > 0)
def valid_plan_update_action(value): def valid_plan_update_action(value):
'''Validator for plan update action.''' """Validator for plan update action."""
assert value in ['update', 'migrate'], _("must be update or migrate") assert value in ['update', 'migrate'], _("must be update or migrate")
# #
@ -59,7 +56,7 @@ def valid_plan_update_action(value):
def set_placements(plan, resources, placements): def set_placements(plan, resources, placements):
'''Set placements''' """Set placements."""
for uuid in placements.iterkeys(): for uuid in placements.iterkeys():
name = resources[uuid]['name'] name = resources[uuid]['name']
properties = placements[uuid]['properties'] properties = placements[uuid]['properties']
@ -70,11 +67,11 @@ def set_placements(plan, resources, placements):
def reserve_placement(placement, resource_id=None, reserve=True, update=True): def reserve_placement(placement, resource_id=None, reserve=True, update=True):
''' Reserve placement. Can optionally set the physical resource id. """Reserve placement. Can optionally set the physical resource id.
Set reserve=False to unreserve. Set update=False to not update Set reserve=False to unreserve. Set update=False to not update
the data store (if the update will be made later). the data store (if the update will be made later).
''' """
if placement: if placement:
LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'), LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'),
{'rsrv': _("Reserving") if reserve else _("Unreserving"), {'rsrv': _("Reserving") if reserve else _("Unreserving"),
@ -92,7 +89,7 @@ def reserve_placement(placement, resource_id=None, reserve=True, update=True):
def update_placements(placements, reserve_id=None, unlock_all=False): def update_placements(placements, reserve_id=None, unlock_all=False):
'''Update placements. Optionally reserve one placement.''' """Update placements. Optionally reserve one placement."""
for uuid in placements.iterkeys(): for uuid in placements.iterkeys():
placement = Placement.query.filter_by( # pylint: disable=E1101 placement = Placement.query.filter_by( # pylint: disable=E1101
orchestration_id=uuid).first() orchestration_id=uuid).first()
@ -119,7 +116,7 @@ def update_placements(placements, reserve_id=None, unlock_all=False):
# #
def error(url, msg=None, **kwargs): def error(url, msg=None, **kwargs):
'''Error handler''' """Error handler."""
if msg: if msg:
request.context['error_message'] = msg request.context['error_message'] = msg
if kwargs: if kwargs:

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Errors''' """Errors."""
import logging import logging
from pecan import expose, request, response from pecan import expose, request, response
@ -26,10 +26,9 @@ LOG = logging.getLogger(__name__)
def error_wrapper(func): def error_wrapper(func):
'''Error decorator.''' """Error decorator."""
def func_wrapper(self, **kw): def func_wrapper(self, **kw):
'''Wrapper.''' """Wrapper."""
kwargs = func(self, **kw) kwargs = func(self, **kw)
status = status_map.get(response.status_code) status = status_map.get(response.status_code)
message = getattr(status, 'explanation', '') message = getattr(status, 'explanation', '')
@ -56,12 +55,12 @@ def error_wrapper(func):
# pylint: disable=W0613 # pylint: disable=W0613
class ErrorsController(object): class ErrorsController(object):
''' Errors Controller /errors/{error_name} ''' """Error Controller /errors/{error_name}."""
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def schema(self, **kw): def schema(self, **kw):
'''400''' """400."""
request.context['error_message'] = str(request.validation_error) request.context['error_message'] = str(request.validation_error)
response.status = 400 response.status = 400
return request.context.get('kwargs') return request.context.get('kwargs')
@ -69,13 +68,13 @@ class ErrorsController(object):
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def invalid(self, **kw): def invalid(self, **kw):
'''400''' """400."""
response.status = 400 response.status = 400
return request.context.get('kwargs') return request.context.get('kwargs')
@expose() @expose()
def unauthorized(self, **kw): def unauthorized(self, **kw):
'''401''' """401."""
# This error is terse and opaque on purpose. # This error is terse and opaque on purpose.
# Don't give any clues to help AuthN along. # Don't give any clues to help AuthN along.
response.status = 401 response.status = 401
@ -92,21 +91,21 @@ class ErrorsController(object):
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def forbidden(self, **kw): def forbidden(self, **kw):
'''403''' """403."""
response.status = 403 response.status = 403
return request.context.get('kwargs') return request.context.get('kwargs')
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def not_found(self, **kw): def not_found(self, **kw):
'''404''' """404."""
response.status = 404 response.status = 404
return request.context.get('kwargs') return request.context.get('kwargs')
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def not_allowed(self, **kw): def not_allowed(self, **kw):
'''405''' """405."""
kwargs = request.context.get('kwargs') kwargs = request.context.get('kwargs')
if kwargs: if kwargs:
allow = kwargs.get('allow', None) allow = kwargs.get('allow', None)
@ -118,20 +117,20 @@ class ErrorsController(object):
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def conflict(self, **kw): def conflict(self, **kw):
'''409''' """409."""
response.status = 409 response.status = 409
return request.context.get('kwargs') return request.context.get('kwargs')
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def server_error(self, **kw): def server_error(self, **kw):
'''500''' """500."""
response.status = 500 response.status = 500
return request.context.get('kwargs') return request.context.get('kwargs')
@expose('json') @expose('json')
@error_wrapper @error_wrapper
def unavailable(self, **kw): def unavailable(self, **kw):
'''503''' """503."""
response.status = 503 response.status = 503
return request.context.get('kwargs') return request.context.get('kwargs')

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Groups''' """Groups."""
import logging import logging
@ -48,7 +48,7 @@ MEMBERS_SCHEMA = (
def server_list_for_group(group): def server_list_for_group(group):
'''Returns a list of VMs associated with a member/group.''' """Return a list of VMs associated with a member/group."""
args = { args = {
"type": "group_vms", "type": "group_vms",
"parameters": { "parameters": {
@ -72,7 +72,7 @@ def server_list_for_group(group):
def tenant_servers_in_group(tenant_id, group): def tenant_servers_in_group(tenant_id, group):
''' Returns a list of servers the current tenant has in group_name ''' """Return a list of servers the current tenant has in group_name."""
servers = [] servers = []
server_list = server_list_for_group(group) server_list = server_list_for_group(group)
nova = nova_client() nova = nova_client()
@ -89,20 +89,23 @@ def tenant_servers_in_group(tenant_id, group):
def no_tenant_servers_in_group(tenant_id, group): def no_tenant_servers_in_group(tenant_id, group):
''' Verify no servers from tenant_id are in group. """Verify no servers from tenant_id are in group.
Throws a 409 Conflict if any are found. Throws a 409 Conflict if any are found.
''' """
server_list = tenant_servers_in_group(tenant_id, group) server_list = tenant_servers_in_group(tenant_id, group)
if server_list: if server_list:
error('/errors/conflict', _('Tenant Member {0} has servers in group "{1}": {2}').format(tenant_id, group.name, server_list)) error('/errors/conflict', _('Tenant Member {0} has servers in group '
'"{1}": {2}').format(tenant_id,
group.name,
server_list))
class MembersItemController(object): class MembersItemController(object):
''' Members Item Controller /v1/groups/{group_id}/members/{member_id} ''' """Member Item Controller /v1/groups/{group_id}/members/{member_id}."""
def __init__(self, member_id): def __init__(self, member_id):
'''Initialize group member''' """Initialize group member."""
group = request.context['group'] group = request.context['group']
if member_id not in group.members: if member_id not in group.members:
error('/errors/not_found', _('Member not found in group')) error('/errors/not_found', _('Member not found in group'))
@ -110,30 +113,30 @@ class MembersItemController(object):
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET,DELETE' return 'GET,DELETE'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catch all for unallowed methods''' """Catch all for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Verify group member''' """Verify group member."""
response.status = 204 response.status = 204
@index.when(method='DELETE', template='json') @index.when(method='DELETE', template='json')
def index_delete(self): def index_delete(self):
'''Delete group member''' """Delete group member."""
group = request.context['group'] group = request.context['group']
member_id = request.context['member_id'] member_id = request.context['member_id']
@ -146,34 +149,35 @@ class MembersItemController(object):
class MembersController(object): class MembersController(object):
''' Members Controller /v1/groups/{group_id}/members ''' """Members Controller /v1/groups/{group_id}/members."""
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'PUT,DELETE' return 'PUT,DELETE'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='PUT', template='json') @index.when(method='PUT', template='json')
@validate(MEMBERS_SCHEMA, '/errors/schema') @validate(MEMBERS_SCHEMA, '/errors/schema')
def index_put(self, **kwargs): def index_put(self, **kwargs):
'''Add one or more members to a group''' """Add one or more members to a group."""
new_members = kwargs.get('members', None) new_members = kwargs.get('members', None)
if not conf.identity.engine.is_tenant_list_valid(new_members): if not conf.identity.engine.is_tenant_list_valid(new_members):
error('/errors/conflict', _('Member list contains invalid tenant IDs')) error('/errors/conflict', _('Member list contains '
'invalid tenant IDs'))
group = request.context['group'] group = request.context['group']
group.members = list(set(group.members + new_members)) group.members = list(set(group.members + new_members))
@ -186,7 +190,7 @@ class MembersController(object):
@index.when(method='DELETE', template='json') @index.when(method='DELETE', template='json')
def index_delete(self): def index_delete(self):
'''Delete all group members''' """Delete all group members."""
group = request.context['group'] group = request.context['group']
# Can't delete a member if it has associated VMs. # Can't delete a member if it has associated VMs.
@ -199,49 +203,50 @@ class MembersController(object):
@expose() @expose()
def _lookup(self, member_id, *remainder): def _lookup(self, member_id, *remainder):
'''Pecan subcontroller routing callback''' """Pecan subcontroller routing callback."""
return MembersItemController(member_id), remainder return MembersItemController(member_id), remainder
class GroupsItemController(object): class GroupsItemController(object):
''' Groups Item Controller /v1/groups/{group_id} ''' """Group Item Controller /v1/groups/{group_id}."""
members = MembersController() members = MembersController()
def __init__(self, group_id): def __init__(self, group_id):
'''Initialize group''' """Initialize group."""
group = Group.query.filter_by(id=group_id).first() # pylint: disable=E1101 # pylint:disable=E1101
group = Group.query.filter_by(id=group_id).first()
if not group: if not group:
error('/errors/not_found', _('Group not found')) error('/errors/not_found', _('Group not found'))
request.context['group'] = group request.context['group'] = group
@classmethod @classmethod
def allow(cls): def allow(cls):
''' Allowed methods ''' """Allowed methods."""
return 'GET,PUT,DELETE' return 'GET,PUT,DELETE'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Display a group''' """Display a group."""
return {"group": request.context['group']} return {"group": request.context['group']}
@index.when(method='PUT', template='json') @index.when(method='PUT', template='json')
@validate(UPDATE_GROUPS_SCHEMA, '/errors/schema') @validate(UPDATE_GROUPS_SCHEMA, '/errors/schema')
def index_put(self, **kwargs): def index_put(self, **kwargs):
'''Update a group''' """Update a group."""
# Name and type are immutable. # Name and type are immutable.
# Group Members are updated in MembersController. # Group Members are updated in MembersController.
group = request.context['group'] group = request.context['group']
@ -255,7 +260,7 @@ class GroupsItemController(object):
@index.when(method='DELETE', template='json') @index.when(method='DELETE', template='json')
def index_delete(self): def index_delete(self):
'''Delete a group''' """Delete a group."""
group = request.context['group'] group = request.context['group']
if isinstance(group.members, list) and len(group.members) > 0: if isinstance(group.members, list) and len(group.members) > 0:
error('/errors/conflict', _('Unable to delete a Group with members.')) error('/errors/conflict', _('Unable to delete a Group with members.'))
@ -264,29 +269,29 @@ class GroupsItemController(object):
class GroupsController(object): class GroupsController(object):
''' Groups Controller /v1/groups ''' """Group Controller /v1/groups."""
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET,POST' return 'GET,POST'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catch all for unallowed methods''' """Catch all for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''List groups''' """List groups."""
groups_array = [] groups_array = []
for group in Group.query.all(): # pylint: disable=E1101 for group in Group.query.all(): # pylint: disable=E1101
groups_array.append(group) groups_array.append(group)
@ -295,7 +300,7 @@ class GroupsController(object):
@index.when(method='POST', template='json') @index.when(method='POST', template='json')
@validate(GROUPS_SCHEMA, '/errors/schema') @validate(GROUPS_SCHEMA, '/errors/schema')
def index_post(self, **kwargs): def index_post(self, **kwargs):
'''Create a group''' """Create a group."""
group_name = kwargs.get('name', None) group_name = kwargs.get('name', None)
description = kwargs.get('description', None) description = kwargs.get('description', None)
group_type = kwargs.get('type', None) group_type = kwargs.get('type', None)
@ -314,5 +319,5 @@ class GroupsController(object):
@expose() @expose()
def _lookup(self, group_id, *remainder): def _lookup(self, group_id, *remainder):
'''Pecan subcontroller routing callback''' """Pecan subcontroller routing callback."""
return GroupsItemController(group_id), remainder return GroupsItemController(group_id), remainder

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Placements''' """Placements."""
import logging import logging
@ -32,50 +32,53 @@ LOG = logging.getLogger(__name__)
class PlacementsItemController(object): class PlacementsItemController(object):
''' Placements Item Controller /v1/placements/{placement_id} ''' """Placements Item Controller /v1/placements/{placement_id}."""
def __init__(self, uuid4): def __init__(self, uuid4):
'''Initializer.''' """Initializer."""
self.uuid = uuid4 self.uuid = uuid4
self.placement = Placement.query.filter_by(id=self.uuid).first() # pylint: disable=E1101 self.placement = Placement.query.filter_by(id=self.uuid).first()
# pylint: disable=E1101
if not self.placement: if not self.placement:
self.placement = Placement.query.filter_by(orchestration_id=self.uuid).first() # disable=E1101 self.placement = Placement.query.filter_by(
orchestration_id=self.uuid).first()
# disable=E1101
if not self.placement: if not self.placement:
error('/errors/not_found', _('Placement not found')) error('/errors/not_found', _('Placement not found'))
request.context['placement_id'] = self.placement.id request.context['placement_id'] = self.placement.id
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET,POST,DELETE' return 'GET,POST,DELETE'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
''' Inspect a placement. """Inspect a placement.
Use POST for reserving placements made by a scheduler. Use POST for reserving placements made by a scheduler.
''' """
return {"placement": self.placement} return {"placement": self.placement}
@index.when(method='POST', template='json') @index.when(method='POST', template='json')
def index_post(self, **kwargs): def index_post(self, **kwargs):
''' Reserve a placement. This and other placements may be replanned. """Reserve a placement. This and other placements may be replanned.
Once reserved, the location effectively becomes immutable. Once reserved, the location effectively becomes immutable.
''' """
res_id = kwargs.get('resource_id') res_id = kwargs.get('resource_id')
LOG.info(_('Placement reservation request for resource id ' LOG.info(_('Placement reservation request for resource id '
'%(res_id)s, orchestration id %(orch_id)s.'), '%(res_id)s, orchestration id %(orch_id)s.'),
@ -122,7 +125,8 @@ class PlacementsItemController(object):
# We may get one or more updated placements in return. # We may get one or more updated placements in return.
# One of those will be the original placement # One of those will be the original placement
# we are trying to reserve. # we are trying to reserve.
plan = Plan.query.filter_by(id=self.placement.plan_id).first() # pylint: disable=E1101 plan = Plan.query.filter_by(id=self.placement.plan_id).first()
# pylint: disable=E1101
args = { args = {
"stack_id": plan.stack_id, "stack_id": plan.stack_id,
@ -151,7 +155,7 @@ class PlacementsItemController(object):
@index.when(method='DELETE', template='json') @index.when(method='DELETE', template='json')
def index_delete(self): def index_delete(self):
'''Delete a Placement''' """Delete a Placement."""
orch_id = self.placement.orchestration_id orch_id = self.placement.orchestration_id
self.placement.delete() self.placement.delete()
LOG.info(_('Placement with orchestration id %s deleted.'), orch_id) LOG.info(_('Placement with orchestration id %s deleted.'), orch_id)
@ -159,29 +163,29 @@ class PlacementsItemController(object):
class PlacementsController(object): class PlacementsController(object):
''' Placements Controller /v1/placements ''' """Placements Controller /v1/placements."""
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET' return 'GET'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Get placements.''' """Get placements."""
placements_array = [] placements_array = []
for placement in Placement.query.all(): # pylint: disable=E1101 for placement in Placement.query.all(): # pylint: disable=E1101
placements_array.append(placement) placements_array.append(placement)
@ -189,5 +193,5 @@ class PlacementsController(object):
@expose() @expose()
def _lookup(self, uuid4, *remainder): def _lookup(self, uuid4, *remainder):
'''Pecan subcontroller routing callback''' """Pecan subcontroller routing callback."""
return PlacementsItemController(uuid4), remainder return PlacementsItemController(uuid4), remainder

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Plans''' """Plans."""
import logging import logging
@ -48,19 +48,22 @@ UPDATE_SCHEMA = (
(decorators.optional('timeout'), types.string) (decorators.optional('timeout'), types.string)
) )
# pylint: disable=R0201 # pylint: disable=R0201
class PlansItemController(object): class PlansItemController(object):
''' Plans Item Controller /v1/plans/{plan_id} ''' """Plan Item Controller /v1/plans/{plan_id}."""
def __init__(self, uuid4): def __init__(self, uuid4):
'''Initializer.''' """Initializer."""
self.uuid = uuid4 self.uuid = uuid4
self.plan = Plan.query.filter_by(id=self.uuid).first() # pylint: disable=E1101 self.plan = Plan.query.filter_by(id=self.uuid).first()
# pylint: disable=E1101
if not self.plan: if not self.plan:
self.plan = Plan.query.filter_by(stack_id=self.uuid).first() # pylint: disable=E1101 self.plan = Plan.query.filter_by(stack_id=self.uuid).first()
# pylint: disable=E1101
if not self.plan: if not self.plan:
error('/errors/not_found', _('Plan not found')) error('/errors/not_found', _('Plan not found'))
@ -68,32 +71,31 @@ class PlansItemController(object):
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET,PUT,DELETE' return 'GET,PUT,DELETE'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Get plan''' """Get plan."""
return {"plan": self.plan} return {"plan": self.plan}
@index.when(method='PUT', template='json') @index.when(method='PUT', template='json')
@validate(UPDATE_SCHEMA, '/errors/schema') @validate(UPDATE_SCHEMA, '/errors/schema')
def index_put(self, **kwargs): def index_put(self, **kwargs):
'''Update a Plan''' """Update a Plan."""
action = kwargs.get('action') action = kwargs.get('action')
if action == 'migrate': if action == 'migrate':
# Replan the placement of an existing resource. # Replan the placement of an existing resource.
@ -102,17 +104,24 @@ class PlansItemController(object):
# TODO(JD): Support replan of more than one existing resource # TODO(JD): Support replan of more than one existing resource
if not isinstance(resources, list) or len(resources) != 1: if not isinstance(resources, list) or len(resources) != 1:
error('/errors/invalid', _('resources must be a list of length 1.')) error('/errors/invalid',
_('resources must be a list of length 1.'))
# We either got a resource or orchestration id. # We either got a resource or orchestration id.
the_id = resources[0] the_id = resources[0]
placement = Placement.query.filter_by(resource_id=the_id).first() # pylint: disable=E1101 placement = Placement.query.filter_by(resource_id=the_id).first()
# pylint: disable=E1101
if not placement: if not placement:
placement = Placement.query.filter_by(orchestration_id=the_id).first() # pylint: disable=E1101 placement = Placement.query.filter_by(
orchestration_id=the_id).first() # pylint: disable=E1101
if not placement: if not placement:
error('/errors/invalid', _('Unknown resource or orchestration id: %s') % the_id) error('/errors/invalid', _('Unknown resource or '
'orchestration id: %s') % the_id)
LOG.info(_('Migration request for resource id {0}, '
'orchestration id {1}.').format(
placement.resource_id, placement.orchestration_id))
LOG.info(_('Migration request for resource id {0}, orchestration id {1}.').format(placement.resource_id, placement.orchestration_id))
args = { args = {
"stack_id": self.plan.stack_id, "stack_id": self.plan.stack_id,
"excluded_hosts": excluded_hosts, "excluded_hosts": excluded_hosts,
@ -136,7 +145,8 @@ class PlansItemController(object):
# Flush so that the DB is current. # Flush so that the DB is current.
self.plan.flush() self.plan.flush()
self.plan = Plan.query.filter_by(stack_id=self.plan.stack_id).first() # pylint: disable=E1101 self.plan = Plan.query.filter_by(
stack_id=self.plan.stack_id).first() # pylint: disable=E1101
LOG.info(_('Plan with stack id %s updated.'), self.plan.stack_id) LOG.info(_('Plan with stack id %s updated.'), self.plan.stack_id)
return {"plan": self.plan} return {"plan": self.plan}
@ -186,7 +196,7 @@ class PlansItemController(object):
@index.when(method='DELETE', template='json') @index.when(method='DELETE', template='json')
def index_delete(self): def index_delete(self):
'''Delete a Plan''' """Delete a Plan."""
for placement in self.plan.placements(): for placement in self.plan.placements():
placement.delete() placement.delete()
stack_id = self.plan.stack_id stack_id = self.plan.stack_id
@ -196,29 +206,29 @@ class PlansItemController(object):
class PlansController(object): class PlansController(object):
''' Plans Controller /v1/plans ''' """Plans Controller /v1/plans."""
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET,POST' return 'GET,POST'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Get all the plans''' """Get all the plans."""
plans_array = [] plans_array = []
for plan in Plan.query.all(): # pylint: disable=E1101 for plan in Plan.query.all(): # pylint: disable=E1101
plans_array.append(plan) plans_array.append(plan)
@ -227,7 +237,7 @@ class PlansController(object):
@index.when(method='POST', template='json') @index.when(method='POST', template='json')
@validate(CREATE_SCHEMA, '/errors/schema') @validate(CREATE_SCHEMA, '/errors/schema')
def index_post(self): def index_post(self):
'''Create a Plan''' """Create a Plan."""
ostro = Ostro() ostro = Ostro()
args = request.json args = request.json
@ -277,5 +287,5 @@ class PlansController(object):
@expose() @expose()
def _lookup(self, uuid4, *remainder): def _lookup(self, uuid4, *remainder):
'''Pecan subcontroller routing callback''' """Pecan subcontroller routing callback."""
return PlansItemController(uuid4), remainder return PlansItemController(uuid4), remainder

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Root''' """Root."""
import logging import logging
@ -31,32 +31,32 @@ LOG = logging.getLogger(__name__)
class RootController(object): class RootController(object):
''' Root Controller / ''' """Root Controller."""
errors = ErrorsController() errors = ErrorsController()
v1 = V1Controller() # pylint: disable=C0103 v1 = V1Controller() # pylint: disable=C0103
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET' return 'GET'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Get canonical URL for each version''' """Get canonical URL for each version."""
ver = { ver = {
"versions": "versions":
[ [
@ -78,7 +78,7 @@ class RootController(object):
@error_wrapper @error_wrapper
def error(self, status): def error(self, status):
'''Error handler''' """Error handler."""
try: try:
status = int(status) status = int(status)
except ValueError: # pragma: no cover except ValueError: # pragma: no cover

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''Status''' """Status."""
import logging import logging
@ -28,11 +28,11 @@ LOG = logging.getLogger(__name__)
class StatusController(object): class StatusController(object):
''' Status Controller /v1/status ''' """Status Controller /v1/status."""
@classmethod @classmethod
def _ping_ostro(cls): def _ping_ostro(cls):
'''Ping Ostro''' """Ping Ostro."""
ostro = Ostro() ostro = Ostro()
ostro.ping() ostro.ping()
ostro.send() ostro.send()
@ -40,7 +40,7 @@ class StatusController(object):
@classmethod @classmethod
def _ping(cls): def _ping(cls):
'''Ping each subsystem.''' """Ping each subsystem."""
ostro_response = StatusController._ping_ostro() ostro_response = StatusController._ping_ostro()
# TODO(JD): Ping Music plus any others. # TODO(JD): Ping Music plus any others.
@ -54,32 +54,31 @@ class StatusController(object):
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'HEAD,GET' return 'HEAD,GET'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='HEAD', template='json') @index.when(method='HEAD', template='json')
def index_head(self): def index_head(self):
'''Ping each subsystem and return summary response''' """Ping each subsystem and return summary response."""
self._ping() # pylint: disable=W0612 self._ping() # pylint: disable=W0612
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Ping each subsystem and return detailed response''' """Ping each subsystem and return detailed response."""
_response = self._ping() _response = self._ping()
response.status = 200 response.status = 200
return _response return _response

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''v1''' """v1."""
import logging import logging
@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__)
class V1Controller(SecureController): class V1Controller(SecureController):
''' v1 Controller /v1 ''' """v1 Controller /v1."""
groups = GroupsController() groups = GroupsController()
placements = PlacementsController() placements = PlacementsController()
@ -46,7 +46,7 @@ class V1Controller(SecureController):
@classmethod @classmethod
def check_permissions(cls): def check_permissions(cls):
'''SecureController permission check callback''' """SecureController permission check callback."""
token = None token = None
auth_token = request.headers.get('X-Auth-Token') auth_token = request.headers.get('X-Auth-Token')
msg = "Unauthorized - No auth token" msg = "Unauthorized - No auth token"
@ -74,7 +74,10 @@ class V1Controller(SecureController):
@classmethod @classmethod
def _action_is_migrate(cls, request): def _action_is_migrate(cls, request):
return "plan" in request.path and hasattr(request, "json") and "action" in request.json and request.json["action"] == "migrate" return "plan" in request.path \
and hasattr(request, "json") \
and "action" in request.json \
and request.json["action"] == "migrate"
@classmethod @classmethod
def _permission_granted(cls, request, token): def _permission_granted(cls, request, token):
@ -84,25 +87,25 @@ class V1Controller(SecureController):
@classmethod @classmethod
def allow(cls): def allow(cls):
'''Allowed methods''' """Allowed methods."""
return 'GET' return 'GET'
@expose(generic=True, template='json') @expose(generic=True, template='json')
def index(self): def index(self):
'''Catchall for unallowed methods''' """Catchall for unallowed methods."""
message = _('The %s method is not allowed.') % request.method message = _('The %s method is not allowed.') % request.method
kwargs = {'allow': self.allow()} kwargs = {'allow': self.allow()}
error('/errors/not_allowed', message, **kwargs) error('/errors/not_allowed', message, **kwargs)
@index.when(method='OPTIONS', template='json') @index.when(method='OPTIONS', template='json')
def index_options(self): def index_options(self):
'''Options''' """Index Options."""
response.headers['Allow'] = self.allow() response.headers['Allow'] = self.allow()
response.status = 204 response.status = 204
@index.when(method='GET', template='json') @index.when(method='GET', template='json')
def index_get(self): def index_get(self):
'''Get canonical URL for each endpoint''' """Get canonical URL for each endpoint."""
links = [] links = []
for endpoint in V1Controller.endpoints: for endpoint in V1Controller.endpoints:
links.append({ links.append({

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''WSGI Wrapper''' """WSGI Wrapper."""
from common.i18n import _ from common.i18n import _
import os import os
@ -21,7 +21,7 @@ from pecan.deploy import deploy
def config_file(file_name=None): def config_file(file_name=None):
"""Returns absolute location of the config file""" """Return absolute location of the config file."""
file_name = file_name or 'config.py' file_name = file_name or 'config.py'
_file = os.path.abspath(__file__) _file = os.path.abspath(__file__)
@ -32,7 +32,7 @@ def config_file(file_name=None):
def application(environ, start_response): def application(environ, start_response):
"""Returns a WSGI app object""" """Return a WSGI app object."""
wsgi_app = deploy(config_file('prod.py')) wsgi_app = deploy(config_file('prod.py'))
return wsgi_app(environ, start_response) return wsgi_app(environ, start_response)
@ -45,7 +45,8 @@ if __name__ == '__main__':
from valet.api.conf import register_conf, set_domain from valet.api.conf import register_conf, set_domain
register_conf() register_conf()
set_domain() set_domain()
HTTPD = make_server('', 8090, deploy(config_file('/var/www/valet/config.py'))) HTTPD = make_server('', 8090,
deploy(config_file('/var/www/valet/config.py')))
print(_("Serving HTTP on port 8090...")) print(_("Serving HTTP on port 8090..."))
# Respond to requests until process is killed # Respond to requests until process is killed

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Group Cli."""
import argparse import argparse
import json import json
from oslo_config import cfg from oslo_config import cfg
@ -23,81 +25,120 @@ CONF = cfg.CONF
class ResponseError(Exception): class ResponseError(Exception):
"""Response Error Exception."""
pass pass
class ConnectionError(Exception): class ConnectionError(Exception):
"""Connection Error Exception."""
pass pass
def print_verbose(verbose, url, headers, body, rest_cmd, timeout): def print_verbose(verbose, url, headers, body, rest_cmd, timeout):
"""Print verbose data."""
# TODO(Chris Martin): Replace prints with logs
if verbose: if verbose:
print("Sending Request:\nurl: %s\nheaders: %s\nbody: %s\ncmd: %s\ntimeout: %d\n" print("Sending Request:\nurl: %s\nheaders: "
% (url, headers, body, rest_cmd.__name__ if rest_cmd is not None else None, timeout)) "%s\nbody: %s\ncmd: %s\ntimeout: %d\n"
% (url, headers, body,
rest_cmd.__name__ if rest_cmd is not None else None, timeout))
def pretty_print_json(json_thing, sort=True, indents=4): def pretty_print_json(json_thing, sort=True, indents=4):
"""Print parser in nice format."""
# TODO(Chris Martin): Replace prints with logs
if type(json_thing) is str: if type(json_thing) is str:
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) print(json.dumps(json.loads(json_thing), sort_keys=sort,
indent=indents))
else: else:
print(json.dumps(json_thing, sort_keys=sort, indent=indents)) print(json.dumps(json_thing, sort_keys=sort, indent=indents))
return None return None
def add_to_parser(service_sub): def add_to_parser(service_sub):
"""Return parser."""
parser = service_sub.add_parser('group', help='Group Management', parser = service_sub.add_parser('group', help='Group Management',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, formatter_class=lambda
prog: argparse.HelpFormatter(
prog, max_help_position=30,
width=120)) width=120))
parser.add_argument('--version', action='version', version='%(prog)s 1.1') parser.add_argument('--version', action='version', version='%(prog)s 1.1')
parser.add_argument('--timeout', type=int, help='Set request timeout in seconds (default: 10)') parser.add_argument('--timeout', type=int,
parser.add_argument('--host', type=str, help='Hostname or ip of valet server') help='Set request timeout in seconds (default: 10)')
parser.add_argument('--host', type=str,
help='Hostname or ip of valet server')
parser.add_argument('--port', type=str, help='Port number of valet server') parser.add_argument('--port', type=str, help='Port number of valet server')
parser.add_argument('--os-tenant-name', type=str, help='Tenant name') parser.add_argument('--os-tenant-name', type=str, help='Tenant name')
parser.add_argument('--os-user-name', dest='os_username', type=str, help='Username') parser.add_argument('--os-user-name', dest='os_username', type=str,
help='Username')
parser.add_argument('--os-password', type=str, help="User's password") parser.add_argument('--os-password', type=str, help="User's password")
parser.add_argument('--verbose', '-v', help='Show details', action="store_true") parser.add_argument('--verbose', '-v', help='Show details',
action="store_true")
subparsers = parser.add_subparsers(dest='subcmd', metavar='<subcommand>') subparsers = parser.add_subparsers(dest='subcmd', metavar='<subcommand>')
# create group # create group
parser_create_group = subparsers.add_parser('create', help='Create new group.') parser_create_group = subparsers.add_parser('create',
help='Create new group.')
parser_create_group.add_argument('name', type=str, help='<GROUP_NAME>') parser_create_group.add_argument('name', type=str, help='<GROUP_NAME>')
parser_create_group.add_argument('type', type=str, help='<GROUP_TYPE> (exclusivity)') parser_create_group.add_argument('type', type=str,
parser_create_group.add_argument('--description', type=str, help='<GROUP_DESCRIPTION>') help='<GROUP_TYPE> (exclusivity)')
parser_create_group.add_argument('--description', type=str,
help='<GROUP_DESCRIPTION>')
# delete group # delete group
parser_delete_group = subparsers.add_parser('delete', help='Delete specified group.') parser_delete_group = subparsers.add_parser('delete',
help='Delete specified group.')
parser_delete_group.add_argument('groupid', type=str, help='<GROUP_ID>') parser_delete_group.add_argument('groupid', type=str, help='<GROUP_ID>')
# delete group member # delete group member
parser_delete_group_member = subparsers.add_parser('delete-member', help='Delete members from specified group.') parser_delete_group_member = subparsers.add_parser('delete-member',
parser_delete_group_member.add_argument('groupid', type=str, help='<GROUP_ID>') help='Delete member from'
parser_delete_group_member.add_argument('memberid', type=str, help='<MEMBER_ID>') 'specified group.')
parser_delete_group_member.add_argument('groupid', type=str,
help='<GROUP_ID>')
parser_delete_group_member.add_argument('memberid', type=str,
help='<MEMBER_ID>')
# delete all group members # delete all group members
parser_delete_all_group_members = subparsers.add_parser('delete-all-members', help='Delete all members from ' parser_delete_all_group_members = subparsers.add_parser(
'delete-all-members', help='Delete all members from '
'specified group.') 'specified group.')
parser_delete_all_group_members.add_argument('groupid', type=str, help='<GROUP_ID>') parser_delete_all_group_members.add_argument('groupid', type=str,
help='<GROUP_ID>')
# list group # list group
subparsers.add_parser('list', help='List all groups.') subparsers.add_parser('list', help='List all groups.')
# show group details # show group details
parser_show_group_details = subparsers.add_parser('show', help='Show details about the given group.') parser_show_group_details = subparsers.add_parser('show',
parser_show_group_details.add_argument('groupid', type=str, help='<GROUP_ID>') help='Show details about'
'the given group.')
parser_show_group_details.add_argument('groupid', type=str,
help='<GROUP_ID>')
# update group # update group
parser_update_group = subparsers.add_parser('update', help='Update group description.') parser_update_group = subparsers.add_parser('update',
help='Update group'
'description.')
parser_update_group.add_argument('groupid', type=str, help='<GROUP_ID>') parser_update_group.add_argument('groupid', type=str, help='<GROUP_ID>')
parser_update_group.add_argument('--description', type=str, help='<GROUP_DESCRIPTION>') parser_update_group.add_argument('--description', type=str,
help='<GROUP_DESCRIPTION>')
parser_update_group_members = subparsers.add_parser('update-member', help='Update group members.') parser_update_group_members = subparsers.add_parser('update-member',
parser_update_group_members.add_argument('groupid', type=str, help='<GROUP_ID>') help='Update'
parser_update_group_members.add_argument('members', type=str, help='<MEMBER_ID>') 'group members.')
parser_update_group_members.add_argument('groupid', type=str,
help='<GROUP_ID>')
parser_update_group_members.add_argument('members', type=str,
help='<MEMBER_ID>')
return parser return parser
def cmd_details(args): def cmd_details(args):
"""Command details."""
if args.subcmd == 'create': if args.subcmd == 'create':
return requests.post, '' return requests.post, ''
elif args.subcmd == 'update': elif args.subcmd == 'update':
@ -105,21 +146,25 @@ def cmd_details(args):
elif args.subcmd == 'update-member': elif args.subcmd == 'update-member':
return requests.put, '/%s/members' % args.groupid return requests.put, '/%s/members' % args.groupid
elif args.subcmd == 'delete': elif args.subcmd == 'delete':
return requests.delete, '/%s' % (args.groupid) return requests.delete, '/%s' % args.groupid
elif args.subcmd == 'delete-all-members': elif args.subcmd == 'delete-all-members':
return requests.delete, '/%s/members' % (args.groupid) return requests.delete, '/%s/members' % args.groupid
elif args.subcmd == 'delete-member': elif args.subcmd == 'delete-member':
return requests.delete, '/%s/members/%s' % (args.groupid, args.memberid) return requests.delete, '/%s/members/%s' % (args.groupid, args.memberid)
elif args.subcmd == 'show': elif args.subcmd == 'show':
return requests.get, '/%s' % (args.groupid) return requests.get, '/%s' % args.groupid
elif args.subcmd == 'list': elif args.subcmd == 'list':
return requests.get, '' return requests.get, ''
def get_token(timeout, args): def get_token(timeout, args):
tenant_name = args.os_tenant_name if args.os_tenant_name else CONF.identity.project_name """Return JSON of access token id."""
auth_name = args.os_username if args.os_username else CONF.identity.username tenant_name = args.os_tenant_name if args.os_tenant_name \
password = args.os_password if args.os_password else CONF.identity.password else CONF.identity.project_name
auth_name = args.os_username if args.os_username \
else CONF.identity.username
password = args.os_password if args.os_password \
else CONF.identity.password
headers = { headers = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
} }
@ -149,19 +194,23 @@ def get_token(timeout, args):
def populate_args_request_body(args): def populate_args_request_body(args):
"""Return JSON of filtered body dictionary."""
body_args_list = ['name', 'type', 'description', 'members'] body_args_list = ['name', 'type', 'description', 'members']
# assign values to dictionary (if val exist). members will be assign as a list # assign values to dict (if val exist) members will be assign as a list
body_dict = {} body_dict = {}
for body_arg in body_args_list: for body_arg in body_args_list:
if hasattr(args, body_arg): if hasattr(args, body_arg):
body_dict[body_arg] = getattr(args, body_arg) if body_arg != 'members' else [getattr(args, body_arg)] body_dict[body_arg] = getattr(args, body_arg) \
if body_arg != 'members' else [getattr(args, body_arg)]
# remove keys without values # remove keys without values
filtered_body_dict = dict((k, v) for k, v in body_dict.iteritems() if v is not None) filtered_body_dict = dict(
(k, v) for k, v in body_dict.iteritems() if v is not None)
# check if dictionary is not empty, convert body dictionary to json format # check if dictionary is not empty, convert body dictionary to json format
return json.dumps(filtered_body_dict) if bool(filtered_body_dict) else None return json.dumps(filtered_body_dict) if bool(filtered_body_dict) else None
def run(args): def run(args):
"""Run."""
register_conf() register_conf()
set_domain(project='valet') set_domain(project='valet')
args.host = args.host or CONF.server.host args.host = args.host or CONF.server.host
@ -177,23 +226,27 @@ def run(args):
args.body = populate_args_request_body(args) args.body = populate_args_request_body(args)
try: try:
print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd, args.timeout) print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd,
args.timeout)
if args.body: if args.body:
resp = rest_cmd(args.url, timeout=args.timeout, data=args.body, headers=args.headers) resp = rest_cmd(args.url, timeout=args.timeout, data=args.body,
headers=args.headers)
else: else:
resp = rest_cmd(args.url, timeout=args.timeout, headers=args.headers) resp = rest_cmd(args.url, timeout=args.timeout,
headers=args.headers)
except Exception as e: except Exception as e:
print(e) print(e)
exit(1) exit(1)
if not 200 <= resp.status_code < 300: if not 200 <= resp.status_code < 300:
content = resp.json() if resp.status_code == 500 else '' content = resp.json() if resp.status_code == 500 else ''
print('API error: %s %s (Reason: %d)\n%s' % (rest_cmd.func_name.upper(), args.url, resp.status_code, content)) print('API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.func_name.upper(), args.url, resp.status_code, content))
exit(1) exit(1)
try: try:
if resp.content: if resp.content:
rj = resp.json() rj = resp.json()
pretty_print_json(rj) pretty_print_json(rj)
except Exception as e: except Exception as e:
print (e) print(e)
exit(1) exit(1)

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Valet cli."""
import argparse import argparse
import sys import sys
import valet.cli.groupcli as groupcli import valet.cli.groupcli as groupcli
@ -20,27 +22,36 @@ import valet.cli.groupcli as groupcli
class Cli(object): class Cli(object):
"""Cli."""
def __init__(self): def __init__(self):
"""Init cli."""
self.args = None self.args = None
self.submod = None self.submod = None
self.parser = None self.parser = None
def create_parser(self): def create_parser(self):
self.parser = argparse.ArgumentParser(prog='valet', description='VALET REST CLI') """Create parser."""
service_sub = self.parser.add_subparsers(dest='service', metavar='<service>') self.parser = argparse.ArgumentParser(prog='valet',
description='VALET REST CLI')
service_sub = self.parser.add_subparsers(dest='service',
metavar='<service>')
self.submod = {'group': groupcli} self.submod = {'group': groupcli}
for s in self.submod.values(): for s in self.submod.values():
s.add_to_parser(service_sub) s.add_to_parser(service_sub)
def parse(self, argv=sys.argv): def parse(self, argv=sys.argv):
"""Parse args."""
sys.argv = argv sys.argv = argv
self.args = self.parser.parse_args() self.args = self.parser.parse_args()
def logic(self): def logic(self):
"""Logic."""
self.submod[self.args.service].run(self.args) self.submod[self.args.service].run(self.args)
def main(argv): def main(argv):
"""Main."""
cli = Cli() cli = Cli()
cli.create_parser() cli.create_parser()
cli.parse(argv) cli.parse(argv)

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Conf."""
from oslo_config import cfg from oslo_config import cfg
from valet.api import conf as api from valet.api import conf as api
@ -28,53 +30,178 @@ ostro_cli_opts = [
engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf') engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf')
engine_opts = [ engine_opts = [
cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'), cfg.StrOpt(
cfg.StrOpt('mode', default='live', 'pid',
help='sim will let Ostro simulate datacenter, while live will let it handle a real datacenter'), default='/var/run/valet/ostro-daemon.pid'
cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'), ),
cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'), cfg.StrOpt(
cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'), 'mode',
cfg.StrOpt('ip', default='localhost'), default='live',
cfg.IntOpt('priority', default=1, help='this instance priority (master=1)'), help="""
cfg.StrOpt('rpc_server_ip', default='localhost', Sim will let Ostro simulate datacenter, while live will
help='Set RPC server ip and port if used. Otherwise, ignore these parameters'), let it handle a real datacenter.
cfg.StrOpt('rpc_server_port', default='8002'), """),
cfg.StrOpt('logger_name', default='engine.log'), cfg.StrOpt(
cfg.StrOpt('logging_level', default='debug'), 'sim_cfg_loc',
cfg.StrOpt('logging_dir', default='/var/log/valet/'), default='/etc/valet/engine/ostro_sim.cfg'),
cfg.StrOpt('max_main_log_size', default=5000000), cfg.BoolOpt(
cfg.IntOpt('max_log_size', default=1000000), 'network_control',
cfg.IntOpt('max_num_of_logs', default=20), default=False,
cfg.StrOpt('datacenter_name', default='bigsite', help="""
help='Inform the name of datacenter (region name), where Valet/Ostro is deployed.'), Whether network controller (i.e., Tegu) has been deployed
cfg.IntOpt('num_of_region_chars', default='3', help='number of chars that indicates the region code'), """),
cfg.StrOpt('rack_code_list', default='r', help='rack indicator.'), cfg.StrOpt(
cfg.ListOpt('node_code_list', default='a,c,u,f,o,p,s', 'network_control_url',
help='indicates the node type. a: network, c KVM compute, u: ESXi compute, f: ?, o: operation, ' default='http://network_control:29444/tegu/api'),
'p: power, s: storage.'), cfg.StrOpt(
cfg.StrOpt('compute_trigger_time', default='1:00', 'ip',
help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'), default='localhost'),
cfg.IntOpt('compute_trigger_frequency', default=3600, cfg.IntOpt(
help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'), 'priority',
cfg.StrOpt('topology_trigger_time', default='2:00', default=1,
help='Set trigger time or frequency for checking datacenter topology'), help="""
cfg.IntOpt('topology_trigger_frequency', default=3600, This instance priority (master=1)
help='Set trigger time or frequency for checking datacenter topology'), """),
cfg.IntOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. ' cfg.StrOpt(
'Note that each compute node can have its own ratios'), 'rpc_server_ip',
cfg.IntOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. ' default='localhost',
'Note that each compute node can have its own ratios'), help="""
cfg.IntOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. ' Set RPC server ip and port if used. Otherwise, ignore these parameters
'Note that each compute node can have its own ratios'), """),
cfg.IntOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' cfg.StrOpt(
'that are set aside for applications workload spikes.'), 'rpc_server_port',
cfg.IntOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' default='8002'
'that are set aside for applications workload spikes.'), ),
cfg.IntOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) ' cfg.StrOpt(
'that are set aside for applications workload spikes.'), 'logger_name',
default='engine.log'
),
cfg.StrOpt(
'logging_level',
default='debug'
),
cfg.StrOpt(
'logging_dir',
default='/var/log/valet/'
),
cfg.StrOpt(
'max_main_log_size',
default=5000000
),
cfg.IntOpt(
'max_log_size',
default=1000000
),
cfg.IntOpt(
'max_num_of_logs',
default=20
),
cfg.StrOpt(
'datacenter_name',
default='bigsite',
help="""
Inform the name of datacenter (region name), where Valet/Ostro is deployed.
"""),
cfg.IntOpt(
'num_of_region_chars',
default='3',
help="""
Number of chars that indicates the region code
"""),
cfg.StrOpt(
'rack_code_list',
default='r',
help="""
Rack indicator.
"""),
cfg.ListOpt(
'node_code_list',
default='a,c,u,f,o,p,s',
help="""
Indicates the node type.
Values:
* a: network
* c KVM compute
* u: ESXi compute
* f: ?
* o: operation
* p: power
* s: storage.
"""),
cfg.StrOpt(
'compute_trigger_time',
default='1:00',
help="""
Trigger time or frequency for checking compute hosting server status
(i.e., call Nova)
"""),
cfg.IntOpt(
'compute_trigger_frequency',
default=3600,
help="""
Trigger time or frequency for checking compute hosting server status
(i.e., call Nova).
"""),
cfg.StrOpt(
'topology_trigger_time',
default='2:00',
help="""
Set trigger time or frequency for checking datacenter topology.
"""),
cfg.IntOpt(
'topology_trigger_frequency',
default=3600,
help="""
Set trigger time or frequency for checking datacenter topology.
"""),
cfg.IntOpt(
'default_cpu_allocation_ratio',
default=16,
help="""
Set default overbooking ratios.
Note that each compute node can have its own ratios.
"""),
cfg.IntOpt(
'default_ram_allocation_ratio',
default=1.5,
help="""
Set default overbooking ratios.
Note that each compute node can have its own ratios.
"""),
cfg.IntOpt(
'default_disk_allocation_ratio',
default=1,
help="""
Set default overbooking ratios.
Note that each compute node can have its own ratios.
"""),
cfg.IntOpt(
'static_cpu_standby_ratio',
default=20,
help="""
Unused percentages of resources (i.e. standby) that are set
aside for applications workload spikes.
"""),
cfg.IntOpt(
'static_mem_standby_ratio',
default=20,
help="""
Unused percentages of resources (i.e. standby) that are set
aside for applications workload spikes.
"""),
cfg.IntOpt(
'static_local_disk_standby_ratio',
default=20,
help="""
Unused percentages of resources (i.e. standby) that are set
aside for applications workload spikes.
"""),
] ]
listener_group = cfg.OptGroup(name='events_listener', title='Valet Engine listener') listener_group = cfg.OptGroup(name='events_listener',
title='Valet Engine listener')
listener_opts = [ listener_opts = [
cfg.StrOpt('exchange', default='nova'), cfg.StrOpt('exchange', default='nova'),
cfg.StrOpt('exchange_type', default='topic'), cfg.StrOpt('exchange_type', default='topic'),
@ -89,6 +216,7 @@ listener_opts = [
def register_conf(): def register_conf():
"""Function calls api and registers configs opts."""
api.register_conf() api.register_conf()
CONF.register_group(engine_group) CONF.register_group(engine_group)
CONF.register_opts(engine_opts, engine_group) CONF.register_opts(engine_opts, engine_group)

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Listener Manager."""
from datetime import datetime from datetime import datetime
import json import json
import pika import pika
@ -26,8 +28,10 @@ import yaml
class ListenerManager(threading.Thread): class ListenerManager(threading.Thread):
"""Listener Manager Thread Class."""
def __init__(self, _t_id, _t_name, _config): def __init__(self, _t_id, _t_name, _config):
"""Init."""
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.thread_id = _t_id self.thread_id = _t_id
self.thread_name = _t_name self.thread_name = _t_name
@ -36,14 +40,16 @@ class ListenerManager(threading.Thread):
self.MUSIC = None self.MUSIC = None
def run(self): def run(self):
'''Entry point """Entry point.
Connect to localhost rabbitmq servers, use username:password@ipaddress:port. Connect to localhost rabbitmq servers, use
The port is typically 5672, and the default username and password are guest and guest. username:password@ipaddress:port. The port is typically 5672,
credentials = pika.PlainCredentials("guest", "PASSWORD") and the default username and password are guest and guest.
''' credentials = pika.PlainCredentials("guest", "PASSWORD").
"""
try: try:
self.listener_logger.info("ListenerManager: start " + self.thread_name + " ......") self.listener_logger.info("ListenerManager: start " +
self.thread_name + " ......")
if self.config.events_listener.store: if self.config.events_listener.store:
@ -54,12 +60,20 @@ class ListenerManager(threading.Thread):
} }
engine = Music(**kwargs) engine = Music(**kwargs)
engine.create_keyspace(self.config.music.keyspace) engine.create_keyspace(self.config.music.keyspace)
self.MUSIC = {'engine': engine, 'keyspace': self.config.music.keyspace} self.MUSIC = {'engine': engine,
self.listener_logger.debug('Storing in music on %s, keyspace %s' % (self.config.music.host, self.config.music.keyspace)) 'keyspace': self.config.music.keyspace}
self.listener_logger.debug('Storing in music on %s, keyspace %s'
% (self.config.music.host,
self.config.music.keyspace))
self.listener_logger.debug('Connecting to %s, with %s' % (self.config.messaging.host, self.config.messaging.username)) self.listener_logger.debug('Connecting to %s, with %s' %
credentials = pika.PlainCredentials(self.config.messaging.username, self.config.messaging.password) (self.config.messaging.host,
parameters = pika.ConnectionParameters(self.config.messaging.host, self.config.messaging.port, '/', credentials) self.config.messaging.username))
credentials = pika.PlainCredentials(self.config.messaging.username,
self.config.messaging.password)
parameters = pika.ConnectionParameters(self.config.messaging.host,
self.config.messaging.port,
'/', credentials)
connection = pika.BlockingConnection(parameters) connection = pika.BlockingConnection(parameters)
channel = connection.channel() channel = connection.channel()
@ -73,9 +87,9 @@ class ListenerManager(threading.Thread):
# to receive. '#' is a wild card -- meaning receive all messages # to receive. '#' is a wild card -- meaning receive all messages
binding_key = "#" binding_key = "#"
# Check whether or not an exchange with the given name and type exists. # Check whether an exchange with the given name and type exists.
# Make sure that the exchange is multicast "fanout" or "topic" type # Make sure that the exchange is multicast "fanout" or "topic" type
# otherwise our queue will consume the messages intended for other queues # otherwise queue will consume messages intended for other queues
channel.exchange_declare(exchange=exchange_name, channel.exchange_declare(exchange=exchange_name,
exchange_type=exchange_type, exchange_type=exchange_type,
auto_delete=auto_delete) auto_delete=auto_delete)
@ -85,8 +99,11 @@ class ListenerManager(threading.Thread):
queue_name = result.method.queue queue_name = result.method.queue
# Bind the queue to the selected exchange # Bind the queue to the selected exchange
channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=binding_key) channel.queue_bind(exchange=exchange_name, queue=queue_name,
self.listener_logger.info('Channel is bound, listening on %s exchange %s', self.config.messaging.host, self.config.events_listener.exchange) routing_key=binding_key)
self.listener_logger.info('Channel is bound,listening on%s '
'exchange %s', self.config.messaging.host,
self.config.events_listener.exchange)
# Start consuming messages # Start consuming messages
channel.basic_consume(self.on_message, queue_name) channel.basic_consume(self.on_message, queue_name)
@ -103,8 +120,9 @@ class ListenerManager(threading.Thread):
channel.close() channel.close()
connection.close() connection.close()
def on_message(self, channel, method_frame, _, body): # pylint: disable=W0613 def on_message(self, channel, method_frame, _, body):
'''Specify the action to be taken on a message received''' """Specify the action to be taken on a message received."""
# pylint: disable=W0613
message = yaml.load(body) message = yaml.load(body)
try: try:
if 'oslo.message' in message.keys(): if 'oslo.message' in message.keys():
@ -115,12 +133,14 @@ class ListenerManager(threading.Thread):
else: else:
return return
self.listener_logger.debug("\nMessage No: %s\n", method_frame.delivery_tag) self.listener_logger.debug("\nMessage No: %s\n",
method_frame.delivery_tag)
message_obj = yaml.load(body) message_obj = yaml.load(body)
if 'oslo.message' in message_obj.keys(): if 'oslo.message' in message_obj.keys():
message_obj = yaml.load(message_obj['oslo.message']) message_obj = yaml.load(message_obj['oslo.message'])
if self.config.events_listener.output_format == 'json': if self.config.events_listener.output_format == 'json':
self.listener_logger.debug(json.dumps(message_obj, sort_keys=True, indent=2)) self.listener_logger.debug(json.dumps(message_obj,
sort_keys=True, indent=2))
elif self.config.events_listener.output_format == 'yaml': elif self.config.events_listener.output_format == 'yaml':
self.listener_logger.debug(yaml.dump(message_obj)) self.listener_logger.debug(yaml.dump(message_obj))
else: else:
@ -131,25 +151,34 @@ class ListenerManager(threading.Thread):
return return
def is_message_wanted(self, message): def is_message_wanted(self, message):
''' Based on markers from Ostro, determine if this is a wanted message. ''' """Based on markers from Ostro.
Determine if this is a wanted message.
"""
method = message.get('method', None) method = message.get('method', None)
args = message.get('args', None) args = message.get('args', None)
nova_props = {'nova_object.changes', 'nova_object.data', 'nova_object.name'} nova_props = {'nova_object.changes', 'nova_object.data',
'nova_object.name'}
args_props = {'filter_properties', 'instance'} args_props = {'filter_properties', 'instance'}
is_data = method and args is_data = method and args
is_nova = is_data and 'objinst' in args and nova_props.issubset(args['objinst']) is_nova = is_data and 'objinst' in args \
and nova_props.issubset(args['objinst'])
action_instance = is_nova and method == 'object_action' and self.is_nova_name(args) and self.is_nova_state(args) action_instance = is_nova and method == 'object_action' \
and self.is_nova_name(args) \
and self.is_nova_state(args)
action_compute = is_nova and self.is_compute_name(args) action_compute = is_nova and self.is_compute_name(args)
create_instance = is_data and method == 'build_and_run_instance' and args_props.issubset(args) and 'nova_object.data' in args['instance'] create_instance = is_data and method == 'build_and_run_instance' \
and args_props.issubset(args) \
and 'nova_object.data' in args['instance']
return action_instance or action_compute or create_instance return action_instance or action_compute or create_instance
def store_message(self, message): def store_message(self, message):
'''Store message in Music''' """Store message in Music."""
timestamp = datetime.now().isoformat() timestamp = datetime.now().isoformat()
args = json.dumps(message.get('args', None)) args = json.dumps(message.get('args', None))
exchange = self.config.events_listener.exchange exchange = self.config.events_listener.exchange
@ -165,10 +194,14 @@ class ListenerManager(threading.Thread):
OsloMessage(**kwargs) # pylint: disable=W0612 OsloMessage(**kwargs) # pylint: disable=W0612
def is_nova_name(self, args): def is_nova_name(self, args):
"""Return True if object name is Instance."""
return args['objinst']['nova_object.name'] == 'Instance' return args['objinst']['nova_object.name'] == 'Instance'
def is_nova_state(self, args): def is_nova_state(self, args):
return args['objinst']['nova_object.data']['vm_state'] in ['deleted', 'active'] """Return True if object vm_state is deleted or active."""
return args['objinst']['nova_object.data']['vm_state'] \
in ['deleted', 'active']
def is_compute_name(self, args): def is_compute_name(self, args):
"""Return True if object name is ComputeNode."""
return args['objinst']['nova_object.name'] == 'ComputeNode' return args['objinst']['nova_object.name'] == 'ComputeNode'

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
'''OsloMessage Database Model''' """OsloMessage Database Model."""
# This is based on Music models used in Valet. # This is based on Music models used in Valet.
@ -21,6 +21,8 @@ import uuid
class OsloMessage(object): class OsloMessage(object):
"""OsloMessage class."""
__tablename__ = 'oslo_messages' __tablename__ = 'oslo_messages'
_database = None _database = None
@ -32,7 +34,7 @@ class OsloMessage(object):
@classmethod @classmethod
def schema(cls): def schema(cls):
'''Return schema.''' """Return schema."""
schema = { schema = {
'timestamp': 'text', 'timestamp': 'text',
'args': 'text', 'args': 'text',
@ -44,13 +46,15 @@ class OsloMessage(object):
@classmethod @classmethod
def pk_name(cls): def pk_name(cls):
"""Return timestamp string."""
return 'timestamp' return 'timestamp'
def pk_value(self): def pk_value(self):
"""Return self.timestamp."""
return self.timestamp return self.timestamp
def insert(self): def insert(self):
'''Insert row.''' """Insert row."""
keyspace = self._database.get('keyspace') keyspace = self._database.get('keyspace')
kwargs = { kwargs = {
'keyspace': keyspace, 'keyspace': keyspace,
@ -66,6 +70,7 @@ class OsloMessage(object):
engine.create_row(**kwargs) engine.create_row(**kwargs)
def values(self): def values(self):
"""Return values."""
return { return {
'timestamp': self.timestamp, 'timestamp': self.timestamp,
'args': self.args, 'args': self.args,
@ -75,6 +80,7 @@ class OsloMessage(object):
def __init__(self, timestamp, args, exchange, def __init__(self, timestamp, args, exchange,
method, database, _insert=True): method, database, _insert=True):
"""Init."""
self._database = database self._database = database
self.timestamp = timestamp self.timestamp = timestamp
self.args = args self.args = args
@ -84,6 +90,7 @@ class OsloMessage(object):
self.insert() self.insert()
def __json__(self): def __json__(self):
"""Return json."""
json_ = {} json_ = {}
json_['timestamp'] = self.timestamp json_['timestamp'] = self.timestamp
json_['args'] = self.args json_['args'] = self.args

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""App Handler."""
import json import json
from valet.engine.optimizer.app_manager.app_topology import AppTopology from valet.engine.optimizer.app_manager.app_topology import AppTopology
@ -22,14 +24,21 @@ from valet.engine.optimizer.util import util as util
class AppHandler(object): class AppHandler(object):
"""App Handler Class.
This class handles operations for the management of applications.
Functions related to adding apps and adding/removing them from
placement and updating topology info.
"""
def __init__(self, _resource, _db, _config, _logger): def __init__(self, _resource, _db, _config, _logger):
"""Init App Handler Class."""
self.resource = _resource self.resource = _resource
self.db = _db self.db = _db
self.config = _config self.config = _config
self.logger = _logger self.logger = _logger
''' current app requested, a temporary copy ''' """ current app requested, a temporary copy """
self.apps = {} self.apps = {}
self.last_log_index = 0 self.last_log_index = 0
@ -37,6 +46,7 @@ class AppHandler(object):
self.status = "success" self.status = "success"
def add_app(self, _app_data): def add_app(self, _app_data):
"""Add app and set or regenerate topology, return updated topology."""
self.apps.clear() self.apps.clear()
app_topology = AppTopology(self.resource, self.logger) app_topology = AppTopology(self.resource, self.logger)
@ -60,10 +70,12 @@ class AppHandler(object):
if action == "ping": if action == "ping":
self.logger.debug("AppHandler: got ping") self.logger.debug("AppHandler: got ping")
elif action == "replan" or action == "migrate": elif action == "replan" or action == "migrate":
re_app = self._regenerate_app_topology(stack_id, app, app_topology, action) re_app = self._regenerate_app_topology(stack_id, app,
app_topology, action)
if re_app is None: if re_app is None:
self.apps[stack_id] = None self.apps[stack_id] = None
self.status = "cannot locate the original plan for stack = " + stack_id self.status = "cannot locate the original plan for " \
"stack = " + stack_id
return None return None
if action == "replan": if action == "replan":
@ -93,6 +105,7 @@ class AppHandler(object):
return app_topology return app_topology
def add_placement(self, _placement_map, _timestamp): def add_placement(self, _placement_map, _timestamp):
"""Change requested apps to scheduled and place them."""
for v in _placement_map.keys(): for v in _placement_map.keys():
if self.apps[v.app_uuid].status == "requested": if self.apps[v.app_uuid].status == "requested":
self.apps[v.app_uuid].status = "scheduled" self.apps[v.app_uuid].status = "scheduled"
@ -116,11 +129,12 @@ class AppHandler(object):
def _store_app_placements(self): def _store_app_placements(self):
(app_logfile, last_index, mode) = util.get_last_logfile( (app_logfile, last_index, mode) = util.get_last_logfile(
self.config.app_log_loc, self.config.max_log_size, self.config.max_num_of_logs, self.config.app_log_loc, self.config.max_log_size,
self.resource.datacenter.name, self.last_log_index) self.config.max_num_of_logs, self.resource.datacenter.name,
self.last_log_index)
self.last_log_index = last_index self.last_log_index = last_index
# TODO: error handling # TODO(UNKNOWN): error handling
logging = open(self.config.app_log_loc + app_logfile, mode) logging = open(self.config.app_log_loc + app_logfile, mode)
@ -141,19 +155,23 @@ class AppHandler(object):
if self.db.add_app(appk, json_info) is False: if self.db.add_app(appk, json_info) is False:
return False return False
if self.db.update_app_log_index(self.resource.datacenter.name, self.last_log_index) is False: if self.db.update_app_log_index(self.resource.datacenter.name,
self.last_log_index) is False:
return False return False
return True return True
def remove_placement(self): def remove_placement(self):
"""Remove App from placement."""
if self.db is not None: if self.db is not None:
for appk, _ in self.apps.iteritems(): for appk, _ in self.apps.iteritems():
if self.db.add_app(appk, None) is False: if self.db.add_app(appk, None) is False:
self.logger.error("AppHandler: error while adding app info to MUSIC") self.logger.error("AppHandler: error while adding app "
"info to MUSIC")
# NOTE: ignore? # NOTE: ignore?
def get_vm_info(self, _s_uuid, _h_uuid, _host): def get_vm_info(self, _s_uuid, _h_uuid, _host):
"""Return vm_info from database."""
vm_info = {} vm_info = {}
if _h_uuid is not None and _h_uuid != "none" and \ if _h_uuid is not None and _h_uuid != "none" and \
@ -163,6 +181,7 @@ class AppHandler(object):
return vm_info return vm_info
def update_vm_info(self, _s_uuid, _h_uuid): def update_vm_info(self, _s_uuid, _h_uuid):
"""Update vm info (the ids) in the database."""
s_uuid_exist = bool(_s_uuid is not None and _s_uuid != "none") s_uuid_exist = bool(_s_uuid is not None and _s_uuid != "none")
h_uuid_exist = bool(_h_uuid is not None and _h_uuid != "none") h_uuid_exist = bool(_h_uuid is not None and _h_uuid != "none")
if s_uuid_exist and h_uuid_exist: if s_uuid_exist and h_uuid_exist:
@ -216,26 +235,32 @@ class AppHandler(object):
if _action == "replan": if _action == "replan":
if vmk == _app["orchestration_id"]: if vmk == _app["orchestration_id"]:
_app_topology.candidate_list_map[vmk] = _app["locations"] _app_topology.candidate_list_map[vmk] = \
_app["locations"]
self.logger.debug("AppHandler: re-requested vm = " + vm["name"] + " in") self.logger.debug("AppHandler: re-requested vm = " +
vm["name"] + " in")
for hk in _app["locations"]: for hk in _app["locations"]:
self.logger.debug(" " + hk) self.logger.debug(" " + hk)
elif vmk in _app["exclusions"]: elif vmk in _app["exclusions"]:
_app_topology.planned_vm_map[vmk] = vm["host"] _app_topology.planned_vm_map[vmk] = vm["host"]
self.logger.debug("AppHandler: exception from replan = " + vm["name"]) self.logger.debug("AppHandler: exception from "
"replan = " + vm["name"])
elif _action == "migrate": elif _action == "migrate":
if vmk == _app["orchestration_id"]: if vmk == _app["orchestration_id"]:
_app_topology.exclusion_list_map[vmk] = _app["excluded_hosts"] _app_topology.exclusion_list_map[vmk] = _app[
"excluded_hosts"]
if vm["host"] not in _app["excluded_hosts"]: if vm["host"] not in _app["excluded_hosts"]:
_app_topology.exclusion_list_map[vmk].append(vm["host"]) _app_topology.exclusion_list_map[vmk].append(
vm["host"])
else: else:
_app_topology.planned_vm_map[vmk] = vm["host"] _app_topology.planned_vm_map[vmk] = vm["host"]
_app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"], vm["mem"], vm["local_volume"]) _app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"],
vm["mem"], vm["local_volume"])
if "VGroups" in old_app.keys(): if "VGroups" in old_app.keys():
for gk, affinity in old_app["VGroups"].iteritems(): for gk, affinity in old_app["VGroups"].iteritems():
@ -251,14 +276,16 @@ class AppHandler(object):
resources[gk]["properties"] = properties resources[gk]["properties"] = properties
if len(affinity["diversity_groups"]) > 0: if len(affinity["diversity_groups"]) > 0:
for divk, level_name in affinity["diversity_groups"].iteritems(): for divk, level_name in \
affinity["diversity_groups"].iteritems():
div_id = divk + ":" + level_name div_id = divk + ":" + level_name
if div_id not in diversity_groups.keys(): if div_id not in diversity_groups.keys():
diversity_groups[div_id] = [] diversity_groups[div_id] = []
diversity_groups[div_id].append(gk) diversity_groups[div_id].append(gk)
if len(affinity["exclusivity_groups"]) > 0: if len(affinity["exclusivity_groups"]) > 0:
for exk, level_name in affinity["exclusivity_groups"].iteritems(): for exk, level_name in \
affinity["exclusivity_groups"].iteritems():
ex_id = exk + ":" + level_name ex_id = exk + ":" + level_name
if ex_id not in exclusivity_groups.keys(): if ex_id not in exclusivity_groups.keys():
exclusivity_groups[ex_id] = [] exclusivity_groups[ex_id] = []
@ -269,7 +296,8 @@ class AppHandler(object):
for div_id, resource_list in diversity_groups.iteritems(): for div_id, resource_list in diversity_groups.iteritems():
divk_level_name = div_id.split(":") divk_level_name = div_id.split(":")
resources[divk_level_name[0]] = {} resources[divk_level_name[0]] = {}
resources[divk_level_name[0]]["type"] = "ATT::Valet::GroupAssignment" resources[divk_level_name[0]]["type"] = \
"ATT::Valet::GroupAssignment"
properties = {} properties = {}
properties["group_type"] = "diversity" properties["group_type"] = "diversity"
properties["group_name"] = divk_level_name[2] properties["group_name"] = divk_level_name[2]

View File

@ -13,29 +13,37 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""App Topology."""
from valet.engine.optimizer.app_manager.app_topology_base import VM, VGroup from valet.engine.optimizer.app_manager.app_topology_base import VM, VGroup
from valet.engine.optimizer.app_manager.app_topology_parser import Parser from valet.engine.optimizer.app_manager.app_topology_parser import Parser
class AppTopology(object): class AppTopology(object):
"""App Topology Class.
This class contains functions for parsing and setting each app, as well as
calculating and setting optimization.
"""
def __init__(self, _resource, _logger): def __init__(self, _resource, _logger):
"""Init App Topology Class."""
self.vgroups = {} self.vgroups = {}
self.vms = {} self.vms = {}
self.volumes = {} self.volumes = {}
''' for replan ''' """ for replan """
self.old_vm_map = {} self.old_vm_map = {}
self.planned_vm_map = {} self.planned_vm_map = {}
self.candidate_list_map = {} self.candidate_list_map = {}
''' for migration-tip ''' """ for migration-tip """
self.exclusion_list_map = {} self.exclusion_list_map = {}
self.resource = _resource self.resource = _resource
self.logger = _logger self.logger = _logger
''' restriction of host naming convention ''' """ restriction of host naming convention """
high_level_allowed = True high_level_allowed = True
if "none" in self.resource.datacenter.region_code_list: if "none" in self.resource.datacenter.region_code_list:
high_level_allowed = False high_level_allowed = False
@ -51,15 +59,19 @@ class AppTopology(object):
self.status = "success" self.status = "success"
''' parse and set each app '''
def set_app_topology(self, _app_graph): def set_app_topology(self, _app_graph):
"""Set app topology (Parse and set each app).
Set app topology by calling parser to determine vgroups,
vms and volumes. Then return parsed stack_id, app_name and action.
"""
(vgroups, vms, volumes) = self.parser.set_topology(_app_graph) (vgroups, vms, volumes) = self.parser.set_topology(_app_graph)
if len(vgroups) == 0 and len(vms) == 0 and len(volumes) == 0: if len(vgroups) == 0 and len(vms) == 0 and len(volumes) == 0:
self.status = self.parser.status self.status = self.parser.status
return None return None
''' cumulate virtual resources ''' """ cumulate virtual resources """
for _, vgroup in vgroups.iteritems(): for _, vgroup in vgroups.iteritems():
self.vgroups[vgroup.uuid] = vgroup self.vgroups[vgroup.uuid] = vgroup
for _, vm in vms.iteritems(): for _, vm in vms.iteritems():
@ -67,9 +79,11 @@ class AppTopology(object):
for _, vol in volumes.iteritems(): for _, vol in volumes.iteritems():
self.volumes[vol.uuid] = vol self.volumes[vol.uuid] = vol
return self.parser.stack_id, self.parser.application_name, self.parser.action return self.parser.stack_id, self.parser.application_name, \
self.parser.action
def set_weight(self): def set_weight(self):
"""Set weight of vms and vgroups."""
for _, vm in self.vms.iteritems(): for _, vm in self.vms.iteritems():
self._set_vm_weight(vm) self._set_vm_weight(vm)
for _, vg in self.vgroups.iteritems(): for _, vg in self.vgroups.iteritems():
@ -87,19 +101,22 @@ class AppTopology(object):
self._set_vm_weight(sg) self._set_vm_weight(sg)
else: else:
if self.resource.CPU_avail > 0: if self.resource.CPU_avail > 0:
_v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail) _v.vCPU_weight = float(_v.vCPUs) / \
float(self.resource.CPU_avail)
else: else:
_v.vCPU_weight = 1.0 _v.vCPU_weight = 1.0
self.total_CPU += _v.vCPUs self.total_CPU += _v.vCPUs
if self.resource.mem_avail > 0: if self.resource.mem_avail > 0:
_v.mem_weight = float(_v.mem) / float(self.resource.mem_avail) _v.mem_weight = float(_v.mem) / \
float(self.resource.mem_avail)
else: else:
_v.mem_weight = 1.0 _v.mem_weight = 1.0
self.total_mem += _v.mem self.total_mem += _v.mem
if self.resource.local_disk_avail > 0: if self.resource.local_disk_avail > 0:
_v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail) _v.local_volume_weight = float(_v.local_volume_size) / \
float(self.resource.local_disk_avail)
else: else:
if _v.local_volume_size > 0: if _v.local_volume_size > 0:
_v.local_volume_weight = 1.0 _v.local_volume_weight = 1.0
@ -110,7 +127,8 @@ class AppTopology(object):
bandwidth = _v.nw_bandwidth + _v.io_bandwidth bandwidth = _v.nw_bandwidth + _v.io_bandwidth
if self.resource.nw_bandwidth_avail > 0: if self.resource.nw_bandwidth_avail > 0:
_v.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail) _v.bandwidth_weight = float(bandwidth) / \
float(self.resource.nw_bandwidth_avail)
else: else:
if bandwidth > 0: if bandwidth > 0:
_v.bandwidth_weight = 1.0 _v.bandwidth_weight = 1.0
@ -129,8 +147,10 @@ class AppTopology(object):
_vg.local_volume_size += sg.local_volume_size _vg.local_volume_size += sg.local_volume_size
def _set_vgroup_weight(self, _vgroup): def _set_vgroup_weight(self, _vgroup):
"""Calculate weights for vgroup."""
if self.resource.CPU_avail > 0: if self.resource.CPU_avail > 0:
_vgroup.vCPU_weight = float(_vgroup.vCPUs) / float(self.resource.CPU_avail) _vgroup.vCPU_weight = float(_vgroup.vCPUs) / \
float(self.resource.CPU_avail)
else: else:
if _vgroup.vCPUs > 0: if _vgroup.vCPUs > 0:
_vgroup.vCPU_weight = 1.0 _vgroup.vCPU_weight = 1.0
@ -138,7 +158,8 @@ class AppTopology(object):
_vgroup.vCPU_weight = 0.0 _vgroup.vCPU_weight = 0.0
if self.resource.mem_avail > 0: if self.resource.mem_avail > 0:
_vgroup.mem_weight = float(_vgroup.mem) / float(self.resource.mem_avail) _vgroup.mem_weight = float(_vgroup.mem) / \
float(self.resource.mem_avail)
else: else:
if _vgroup.mem > 0: if _vgroup.mem > 0:
_vgroup.mem_weight = 1.0 _vgroup.mem_weight = 1.0
@ -146,7 +167,8 @@ class AppTopology(object):
_vgroup.mem_weight = 0.0 _vgroup.mem_weight = 0.0
if self.resource.local_disk_avail > 0: if self.resource.local_disk_avail > 0:
_vgroup.local_volume_weight = float(_vgroup.local_volume_size) / float(self.resource.local_disk_avail) _vgroup.local_volume_weight = float(_vgroup.local_volume_size) / \
float(self.resource.local_disk_avail)
else: else:
if _vgroup.local_volume_size > 0: if _vgroup.local_volume_size > 0:
_vgroup.local_volume_weight = 1.0 _vgroup.local_volume_weight = 1.0
@ -156,7 +178,8 @@ class AppTopology(object):
bandwidth = _vgroup.nw_bandwidth + _vgroup.io_bandwidth bandwidth = _vgroup.nw_bandwidth + _vgroup.io_bandwidth
if self.resource.nw_bandwidth_avail > 0: if self.resource.nw_bandwidth_avail > 0:
_vgroup.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail) _vgroup.bandwidth_weight = float(bandwidth) / \
float(self.resource.nw_bandwidth_avail)
else: else:
if bandwidth > 0: if bandwidth > 0:
_vgroup.bandwidth_weight = 1.0 _vgroup.bandwidth_weight = 1.0
@ -168,12 +191,20 @@ class AppTopology(object):
self._set_vgroup_weight(svg) self._set_vgroup_weight(svg)
def set_optimization_priority(self): def set_optimization_priority(self):
if len(self.vgroups) == 0 and len(self.vms) == 0 and len(self.volumes) == 0: """Set Optimization Priority.
This function calculates weights for bandwidth, cpu, memory, local
and overall volume for an app. Then Sorts the results and sets
optimization order accordingly.
"""
if len(self.vgroups) == 0 and len(self.vms) == 0 and \
len(self.volumes) == 0:
return return
app_nw_bandwidth_weight = -1 app_nw_bandwidth_weight = -1
if self.resource.nw_bandwidth_avail > 0: if self.resource.nw_bandwidth_avail > 0:
app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / float(self.resource.nw_bandwidth_avail) app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / \
float(self.resource.nw_bandwidth_avail)
else: else:
if self.total_nw_bandwidth > 0: if self.total_nw_bandwidth > 0:
app_nw_bandwidth_weight = 1.0 app_nw_bandwidth_weight = 1.0
@ -182,7 +213,8 @@ class AppTopology(object):
app_CPU_weight = -1 app_CPU_weight = -1
if self.resource.CPU_avail > 0: if self.resource.CPU_avail > 0:
app_CPU_weight = float(self.total_CPU) / float(self.resource.CPU_avail) app_CPU_weight = float(self.total_CPU) / \
float(self.resource.CPU_avail)
else: else:
if self.total_CPU > 0: if self.total_CPU > 0:
app_CPU_weight = 1.0 app_CPU_weight = 1.0
@ -191,7 +223,8 @@ class AppTopology(object):
app_mem_weight = -1 app_mem_weight = -1
if self.resource.mem_avail > 0: if self.resource.mem_avail > 0:
app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail) app_mem_weight = float(self.total_mem) / \
float(self.resource.mem_avail)
else: else:
if self.total_mem > 0: if self.total_mem > 0:
app_mem_weight = 1.0 app_mem_weight = 1.0
@ -200,7 +233,8 @@ class AppTopology(object):
app_local_vol_weight = -1 app_local_vol_weight = -1
if self.resource.local_disk_avail > 0: if self.resource.local_disk_avail > 0:
app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail) app_local_vol_weight = float(self.total_local_vol) / \
float(self.resource.local_disk_avail)
else: else:
if self.total_local_vol > 0: if self.total_local_vol > 0:
app_local_vol_weight = 1.0 app_local_vol_weight = 1.0
@ -213,7 +247,8 @@ class AppTopology(object):
app_vol_weight = -1 app_vol_weight = -1
if self.resource.disk_avail > 0: if self.resource.disk_avail > 0:
app_vol_weight = float(sum(total_vol_list)) / float(self.resource.disk_avail) app_vol_weight = float(sum(total_vol_list)) / \
float(self.resource.disk_avail)
else: else:
if sum(total_vol_list) > 0: if sum(total_vol_list) > 0:
app_vol_weight = 1.0 app_vol_weight = 1.0
@ -226,4 +261,6 @@ class AppTopology(object):
("lvol", app_local_vol_weight), ("lvol", app_local_vol_weight),
("vol", app_vol_weight)] ("vol", app_vol_weight)]
self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True) self.optimization_priority = sorted(opt,
key=lambda resource: resource[1],
reverse=True)

View File

@ -13,12 +13,26 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""App Topology Base.
This file contains different datatype base classes to be used when
buliding out app topology. These classes include VGroups, Volumes and Vms,
as well as 'Link' classes for each.
"""
LEVELS = ["host", "rack", "cluster"] LEVELS = ["host", "rack", "cluster"]
class VGroup(object): class VGroup(object):
"""VGroup Class.
This class represents a VGroup object (virtual group). It contains
data about the volumes or vms it contains (such as compute resources),
and data about the group itself (group type, etc).
"""
def __init__(self, _app_uuid, _uuid): def __init__(self, _app_uuid, _uuid):
"""Init VGroup Class."""
self.app_uuid = _app_uuid self.app_uuid = _app_uuid
self.uuid = _uuid self.uuid = _uuid
self.name = None self.name = None
@ -55,6 +69,7 @@ class VGroup(object):
self.host = None self.host = None
def get_json_info(self): def get_json_info(self):
"""Return JSON info of VGroup Object."""
survgroup_id = None survgroup_id = None
if self.survgroup is None: if self.survgroup is None:
survgroup_id = "none" survgroup_id = "none"
@ -95,8 +110,14 @@ class VGroup(object):
class VM(object): class VM(object):
"""VM Class.
This class represents a Virtual Machine object. Examples of data this
class contains are compute resources, the host, and status.
"""
def __init__(self, _app_uuid, _uuid): def __init__(self, _app_uuid, _uuid):
"""Init VM Class."""
self.app_uuid = _app_uuid self.app_uuid = _app_uuid
self.uuid = _uuid self.uuid = _uuid
self.name = None self.name = None
@ -129,6 +150,7 @@ class VM(object):
self.host = None # where this vm is placed self.host = None # where this vm is placed
def get_json_info(self): def get_json_info(self):
"""Return JSON info for VM object."""
survgroup_id = None survgroup_id = None
if self.survgroup is None: if self.survgroup is None:
survgroup_id = "none" survgroup_id = "none"
@ -172,8 +194,15 @@ class VM(object):
class Volume(object): class Volume(object):
"""Volume Class.
This class represents a volume, containing an app id and name, as well as
a list of links to VMs and the groups it belongs to. This also contains
data about the resources needed such as size, bandwidth and weight.
"""
def __init__(self, _app_uuid, _uuid): def __init__(self, _app_uuid, _uuid):
"""Init Volume Class."""
self.app_uuid = _app_uuid self.app_uuid = _app_uuid
self.uuid = _uuid self.uuid = _uuid
self.name = None self.name = None
@ -198,6 +227,7 @@ class Volume(object):
self.storage_host = None self.storage_host = None
def get_json_info(self): def get_json_info(self):
"""Return JSON info for a Volume."""
survgroup_id = None survgroup_id = None
if self.survgroup is None: if self.survgroup is None:
survgroup_id = "none" survgroup_id = "none"
@ -229,35 +259,53 @@ class Volume(object):
class VGroupLink(object): class VGroupLink(object):
"""VGroup Link Class.
This class represents a link between VGroups.
"""
def __init__(self, _n): def __init__(self, _n):
"""Init VGroup Link."""
self.node = _n # target VM or Volume self.node = _n # target VM or Volume
self.nw_bandwidth = 0 self.nw_bandwidth = 0
self.io_bandwidth = 0 self.io_bandwidth = 0
def get_json_info(self): def get_json_info(self):
"""Return JSON info of VGroup Link Object."""
return {'target': self.node.uuid, return {'target': self.node.uuid,
'nw_bandwidth': self.nw_bandwidth, 'nw_bandwidth': self.nw_bandwidth,
'io_bandwidth': self.io_bandwidth} 'io_bandwidth': self.io_bandwidth}
class VMLink(object): class VMLink(object):
"""VM Link Class.
This class represents a link between VMs.
"""
def __init__(self, _n): def __init__(self, _n):
"""Init VM Link."""
self.node = _n # target VM self.node = _n # target VM
self.nw_bandwidth = 0 # Mbps self.nw_bandwidth = 0 # Mbps
def get_json_info(self): def get_json_info(self):
"""Return JSON info of VM Link Object."""
return {'target': self.node.uuid, return {'target': self.node.uuid,
'nw_bandwidth': self.nw_bandwidth} 'nw_bandwidth': self.nw_bandwidth}
class VolumeLink(object): class VolumeLink(object):
"""Volume Link Class.
This class represents a link between volumes.
"""
def __init__(self, _n): def __init__(self, _n):
"""Init Volume Link."""
self.node = _n # target Volume self.node = _n # target Volume
self.io_bandwidth = 0 # Mbps self.io_bandwidth = 0 # Mbps
def get_json_info(self): def get_json_info(self):
"""Return JSON info of Volume Link Object."""
return {'target': self.node.uuid, return {'target': self.node.uuid,
'io_bandwidth': self.io_bandwidth} 'io_bandwidth': self.io_bandwidth}

View File

@ -13,10 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupLink, VM, VMLink, LEVELS """App Topology Parser.
'''
- Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV - Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV
- VM/group cannot exist in multiple EX groups - VM/group cannot exist in multiple EX groups
- Nested group's level cannot be higher than nesting group - Nested group's level cannot be higher than nesting group
@ -26,12 +24,21 @@ from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupL
OS::Heat::Stack OS::Heat::Stack
OS::Heat::ResourceGroup OS::Heat::ResourceGroup
OS::Heat::ResourceGroup OS::Heat::ResourceGroup
''' """
from valet.engine.optimizer.app_manager.app_topology_base \
import VGroup, VGroupLink, VM, VMLink, LEVELS
class Parser(object): class Parser(object):
"""Parser Class.
This class handles parsing out the data related to the desired
topology from a template.
"""
def __init__(self, _high_level_allowed, _logger): def __init__(self, _high_level_allowed, _logger):
"""Init Parser Class."""
self.logger = _logger self.logger = _logger
self.high_level_allowed = _high_level_allowed self.high_level_allowed = _high_level_allowed
@ -44,6 +51,7 @@ class Parser(object):
self.status = "success" self.status = "success"
def set_topology(self, _graph): def set_topology(self, _graph):
"""Return result of set_topology which parses input to get topology."""
if "version" in _graph.keys(): if "version" in _graph.keys():
self.format_version = _graph["version"] self.format_version = _graph["version"]
else: else:
@ -71,7 +79,7 @@ class Parser(object):
vgroup_captured = False vgroup_captured = False
vms = {} vms = {}
''' empty at this version ''' """ empty at this version """
volumes = {} volumes = {}
for rk, r in _elements.iteritems(): for rk, r in _elements.iteritems():
@ -96,7 +104,8 @@ class Parser(object):
self.logger.debug("Parser: get a vm = " + vm.name) self.logger.debug("Parser: get a vm = " + vm.name)
elif r["type"] == "OS::Cinder::Volume": elif r["type"] == "OS::Cinder::Volume":
self.logger.warn("Parser: do nothing for volume at this version") self.logger.warn("Parser: do nothing for volume at this "
"version")
elif r["type"] == "ATT::Valet::GroupAssignment": elif r["type"] == "ATT::Valet::GroupAssignment":
vgroup = VGroup(self.stack_id, rk) vgroup = VGroup(self.stack_id, rk)
@ -110,7 +119,8 @@ class Parser(object):
elif r["properties"]["group_type"] == "exclusivity": elif r["properties"]["group_type"] == "exclusivity":
vgroup.vgroup_type = "EX" vgroup.vgroup_type = "EX"
else: else:
self.status = "unknown group = " + r["properties"]["group_type"] self.status = "unknown group = " + \
r["properties"]["group_type"]
return {}, {}, {} return {}, {}, {}
else: else:
self.status = "no group type" self.status = "no group type"
@ -129,8 +139,9 @@ class Parser(object):
vgroup.level = r["properties"]["level"] vgroup.level = r["properties"]["level"]
if vgroup.level != "host": if vgroup.level != "host":
if self.high_level_allowed is False: if self.high_level_allowed is False:
self.status = "only host level of affinity group allowed " + \ self.status = "only host level of affinity group " \
"due to the mis-match of host naming convention" "allowed due to the mis-match of " \
"host naming convention"
return {}, {}, {} return {}, {}, {}
else: else:
self.status = "no grouping level" self.status = "no grouping level"
@ -150,16 +161,19 @@ class Parser(object):
self.logger.debug("Parser: all vms parsed") self.logger.debug("Parser: all vms parsed")
if self._merge_diversity_groups(_elements, vgroups, vms, volumes) is False: if self._merge_diversity_groups(_elements, vgroups, vms, volumes) \
is False:
return {}, {}, {} return {}, {}, {}
if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) is False: if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) \
is False:
return {}, {}, {} return {}, {}, {}
if self._merge_affinity_groups(_elements, vgroups, vms, volumes) is False: if self._merge_affinity_groups(_elements, vgroups, vms, volumes) \
is False:
return {}, {}, {} return {}, {}, {}
''' delete all EX and DIV vgroups after merging ''' """ delete all EX and DIV vgroups after merging """
for vgk in vgroups.keys(): for vgk in vgroups.keys():
vg = vgroups[vgk] vg = vgroups[vgk]
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
@ -186,13 +200,15 @@ class Parser(object):
if vk2 in _vms.keys(): if vk2 in _vms.keys():
link = VMLink(_vms[vk2]) link = VMLink(_vms[vk2])
if "bandwidth" in r["properties"].keys(): if "bandwidth" in r["properties"].keys():
link.nw_bandwidth = r["properties"]["bandwidth"]["min"] link.nw_bandwidth = \
r["properties"]["bandwidth"]["min"]
vm.vm_list.append(link) vm.vm_list.append(link)
def _set_volume_links(self, _elements, _vms, _volumes): def _set_volume_links(self, _elements, _vms, _volumes):
for rk, r in _elements.iteritems(): for rk, r in _elements.iteritems():
if r["type"] == "OS::Cinder::VolumeAttachment": if r["type"] == "OS::Cinder::VolumeAttachment":
self.logger.warn("Parser: do nothing for volume attachment at this version") self.logger.warn("Parser: do nothing for volume attachment at "
"this version")
return True return True
@ -219,23 +235,31 @@ class Parser(object):
for vk in r["properties"]["resources"]: for vk in r["properties"]["resources"]:
if vk in _vms.keys(): if vk in _vms.keys():
vgroup.subvgroups[vk] = _vms[vk] vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name _vms[vk].diversity_groups[rk] = \
vgroup.level + ":" + vgroup.name
elif vk in _volumes.keys(): elif vk in _volumes.keys():
vgroup.subvgroups[vk] = _volumes[vk] vgroup.subvgroups[vk] = _volumes[vk]
_volumes[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name _volumes[vk].diversity_groups[rk] = \
vgroup.level + ":" + vgroup.name
elif vk in _vgroups.keys(): elif vk in _vgroups.keys():
vg = _vgroups[vk] vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(level): if LEVELS.index(vg.level) > LEVELS.index(level):
self.status = "grouping scope: nested group's level is higher" self.status = "grouping scope: nested " \
"group's level is higher"
return False return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": if vg.vgroup_type == "DIV" or \
self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in diversity group at this version" vg.vgroup_type == "EX":
self.status = "group type (" + \
vg.vgroup_type + ") not allowd " \
"to be nested in diversity " \
"group at this version"
return False return False
vgroup.subvgroups[vk] = vg vgroup.subvgroups[vk] = vg
vg.diversity_groups[rk] = vgroup.level + ":" + vgroup.name vg.diversity_groups[rk] = vgroup.level + ":" + \
vgroup.name
else: else:
self.status = "invalid resource = " + vk self.status = "invalid resource = " + vk
return False return False
@ -254,23 +278,34 @@ class Parser(object):
for vk in r["properties"]["resources"]: for vk in r["properties"]["resources"]:
if vk in _vms.keys(): if vk in _vms.keys():
vgroup.subvgroups[vk] = _vms[vk] vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name _vms[vk].exclusivity_groups[rk] = \
vgroup.level + ":" + vgroup.name
elif vk in _volumes.keys(): elif vk in _volumes.keys():
vgroup.subvgroups[vk] = _volumes[vk] vgroup.subvgroups[vk] = _volumes[vk]
_volumes[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name _volumes[vk].exclusivity_groups[rk] = \
vgroup.level + ":" + vgroup.name
elif vk in _vgroups.keys(): elif vk in _vgroups.keys():
vg = _vgroups[vk] vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(level): if LEVELS.index(vg.level) > LEVELS.index(level):
self.status = "grouping scope: nested group's level is higher" self.status = "grouping scope: nested " \
"group's level is higher"
return False return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": if vg.vgroup_type == "DIV" or \
self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in exclusivity group at this version" vg.vgroup_type == "EX":
self.status = "group type (" + \
vg.vgroup_type + ") not allowd " \
"to be nested " \
"in " \
"exclusivity " \
"group at " \
"this version"
return False return False
vgroup.subvgroups[vk] = vg vgroup.subvgroups[vk] = vg
vg.exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name vg.exclusivity_groups[rk] = vgroup.level + ":" + \
vgroup.name
else: else:
self.status = "invalid resource = " + vk self.status = "invalid resource = " + vk
return False return False
@ -278,7 +313,8 @@ class Parser(object):
return True return True
def _merge_affinity_groups(self, _elements, _vgroups, _vms, _volumes): def _merge_affinity_groups(self, _elements, _vgroups, _vms, _volumes):
affinity_map = {} # key is uuid of vm, volume, or vgroup & value is its parent vgroup # key is uuid of vm, volume, or vgroup & value is its parent vgroup
affinity_map = {}
for level in LEVELS: for level in LEVELS:
for rk, r in _elements.iteritems(): for rk, r in _elements.iteritems():
@ -292,7 +328,8 @@ class Parser(object):
else: else:
continue continue
self.logger.debug("Parser: merge for affinity = " + vgroup.name) self.logger.debug("Parser: merge for affinity = " +
vgroup.name)
for vk in r["properties"]["resources"]: for vk in r["properties"]["resources"]:
@ -302,8 +339,10 @@ class Parser(object):
affinity_map[vk] = vgroup affinity_map[vk] = vgroup
self._add_implicit_diversity_groups(vgroup, _vms[vk].diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(vgroup, _vms[vk].exclusivity_groups) vgroup, _vms[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
vgroup, _vms[vk].exclusivity_groups)
self._add_memberships(vgroup, _vms[vk]) self._add_memberships(vgroup, _vms[vk])
del _vms[vk] del _vms[vk]
@ -314,8 +353,10 @@ class Parser(object):
affinity_map[vk] = vgroup affinity_map[vk] = vgroup
self._add_implicit_diversity_groups(vgroup, _volumes[vk].diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(vgroup, _volumes[vk].exclusivity_groups) vgroup, _volumes[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
vgroup, _volumes[vk].exclusivity_groups)
self._add_memberships(vgroup, _volumes[vk]) self._add_memberships(vgroup, _volumes[vk])
del _volumes[vk] del _volumes[vk]
@ -324,19 +365,23 @@ class Parser(object):
vg = _vgroups[vk] vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(level): if LEVELS.index(vg.level) > LEVELS.index(level):
self.status = "grouping scope: nested group's level is higher" self.status = "grouping scope: nested " \
"group's level is higher"
return False return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
if self._merge_subgroups(vgroup, vg.subvgroups, _vms, _volumes, _vgroups, if self._merge_subgroups(
_elements, affinity_map) is False: vgroup, vg.subvgroups, _vms, _volumes,
_vgroups, _elements, affinity_map) \
is False:
return False return False
del _vgroups[vk] del _vgroups[vk]
else: else:
if self._exist_in_subgroups(vk, vgroup) is None: if self._exist_in_subgroups(vk, vgroup) is None:
if self._get_subgroups(vg, _elements, if self._get_subgroups(
_vgroups, _vms, _volumes, vg, _elements, _vgroups, _vms,
affinity_map) is False: _volumes, affinity_map) \
is False:
return False return False
vgroup.subvgroups[vk] = vg vgroup.subvgroups[vk] = vg
@ -344,24 +389,29 @@ class Parser(object):
affinity_map[vk] = vgroup affinity_map[vk] = vgroup
self._add_implicit_diversity_groups(vgroup, vg.diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(vgroup, vg.exclusivity_groups) vgroup, vg.diversity_groups)
self._add_implicit_exclusivity_groups(
vgroup, vg.exclusivity_groups)
self._add_memberships(vgroup, vg) self._add_memberships(vgroup, vg)
del _vgroups[vk] del _vgroups[vk]
else:
else: # vk belongs to the other vgroup already or refer to invalid resource # vk belongs to the other vgroup already
# or refer to invalid resource
if vk not in affinity_map.keys(): if vk not in affinity_map.keys():
self.status = "invalid resource = " + vk self.status = "invalid resource = " + vk
return False return False
if affinity_map[vk].uuid != vgroup.uuid: if affinity_map[vk].uuid != vgroup.uuid:
if self._exist_in_subgroups(vk, vgroup) is None: if self._exist_in_subgroups(vk, vgroup) is None:
self._set_implicit_grouping(vk, vgroup, affinity_map, _vgroups) self._set_implicit_grouping(
vk, vgroup, affinity_map, _vgroups)
return True return True
def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups, _elements, _affinity_map): def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups,
_elements, _affinity_map):
for vk, _ in _subgroups.iteritems(): for vk, _ in _subgroups.iteritems():
if vk in _vms.keys(): if vk in _vms.keys():
_vgroup.subvgroups[vk] = _vms[vk] _vgroup.subvgroups[vk] = _vms[vk]
@ -369,8 +419,10 @@ class Parser(object):
_affinity_map[vk] = _vgroup _affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) _vgroup, _vms[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, _vms[vk].exclusivity_groups)
self._add_memberships(_vgroup, _vms[vk]) self._add_memberships(_vgroup, _vms[vk])
del _vms[vk] del _vms[vk]
@ -381,8 +433,10 @@ class Parser(object):
_affinity_map[vk] = _vgroup _affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups) _vgroup, _volumes[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, _volumes[vk].exclusivity_groups)
self._add_memberships(_vgroup, _volumes[vk]) self._add_memberships(_vgroup, _volumes[vk])
del _volumes[vk] del _volumes[vk]
@ -391,7 +445,8 @@ class Parser(object):
vg = _vgroups[vk] vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level):
self.status = "grouping scope: nested group's level is higher" self.status = "grouping scope: nested group's level is " \
"higher"
return False return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
@ -402,7 +457,9 @@ class Parser(object):
del _vgroups[vk] del _vgroups[vk]
else: else:
if self._exist_in_subgroups(vk, _vgroup) is None: if self._exist_in_subgroups(vk, _vgroup) is None:
if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False: if self._get_subgroups(vg, _elements, _vgroups, _vms,
_volumes, _affinity_map) \
is False:
return False return False
_vgroup.subvgroups[vk] = vg _vgroup.subvgroups[vk] = vg
@ -410,13 +467,16 @@ class Parser(object):
_affinity_map[vk] = _vgroup _affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) _vgroup, vg.diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, vg.exclusivity_groups)
self._add_memberships(_vgroup, vg) self._add_memberships(_vgroup, vg)
del _vgroups[vk] del _vgroups[vk]
else:
else: # vk belongs to the other vgroup already or refer to invalid resource # vk belongs to the other vgroup already
# or refer to invalid resource
if vk not in _affinity_map.keys(): if vk not in _affinity_map.keys():
self.status = "invalid resource = " + vk self.status = "invalid resource = " + vk
return False return False
@ -427,7 +487,8 @@ class Parser(object):
return True return True
def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes, _affinity_map): def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes,
_affinity_map):
for vk in _elements[_vgroup.uuid]["properties"]["resources"]: for vk in _elements[_vgroup.uuid]["properties"]["resources"]:
@ -437,8 +498,10 @@ class Parser(object):
_affinity_map[vk] = _vgroup _affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) _vgroup, _vms[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, _vms[vk].exclusivity_groups)
self._add_memberships(_vgroup, _vms[vk]) self._add_memberships(_vgroup, _vms[vk])
del _vms[vk] del _vms[vk]
@ -449,8 +512,10 @@ class Parser(object):
_affinity_map[vk] = _vgroup _affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups) _vgroup, _volumes[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, _volumes[vk].exclusivity_groups)
self._add_memberships(_vgroup, _volumes[vk]) self._add_memberships(_vgroup, _volumes[vk])
del _volumes[vk] del _volumes[vk]
@ -459,7 +524,8 @@ class Parser(object):
vg = _vgroups[vk] vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level):
self.status = "grouping scope: nested group's level is higher" self.status = "grouping scope: nested group's level is " \
"higher"
return False return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
@ -470,7 +536,9 @@ class Parser(object):
del _vgroups[vk] del _vgroups[vk]
else: else:
if self._exist_in_subgroups(vk, _vgroup) is None: if self._exist_in_subgroups(vk, _vgroup) is None:
if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False: if self._get_subgroups(
vg, _elements, _vgroups, _vms, _volumes,
_affinity_map) is False:
return False return False
_vgroup.subvgroups[vk] = vg _vgroup.subvgroups[vk] = vg
@ -478,8 +546,10 @@ class Parser(object):
_affinity_map[vk] = _vgroup _affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) _vgroup, vg.diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, vg.exclusivity_groups)
self._add_memberships(_vgroup, vg) self._add_memberships(_vgroup, vg)
del _vgroups[vk] del _vgroups[vk]
@ -490,7 +560,8 @@ class Parser(object):
if _affinity_map[vk].uuid != _vgroup.uuid: if _affinity_map[vk].uuid != _vgroup.uuid:
if self._exist_in_subgroups(vk, _vgroup) is None: if self._exist_in_subgroups(vk, _vgroup) is None:
self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups) self._set_implicit_grouping(
vk, _vgroup, _affinity_map, _vgroups)
return True return True
@ -529,26 +600,25 @@ class Parser(object):
def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _vgroups): def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _vgroups):
t_vg = _affinity_map[_vk] # where _vk currently belongs to t_vg = _affinity_map[_vk] # where _vk currently belongs to
if t_vg.uuid in _affinity_map.keys(): # if the parent belongs to the other parent vgroup # if the parent belongs to the other parent vgroup
self._set_implicit_grouping(t_vg.uuid, _s_vg, _affinity_map, _vgroups) if t_vg.uuid in _affinity_map.keys():
self._set_implicit_grouping(
t_vg.uuid, _s_vg, _affinity_map, _vgroups)
else: else:
if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level): if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level):
t_vg.level = _s_vg.level t_vg.level = _s_vg.level
'''
self.status = "Grouping scope: sub-group's level is larger"
return False
'''
if self._exist_in_subgroups(t_vg.uuid, _s_vg) is None: if self._exist_in_subgroups(t_vg.uuid, _s_vg) is None:
_s_vg.subvgroups[t_vg.uuid] = t_vg _s_vg.subvgroups[t_vg.uuid] = t_vg
t_vg.survgroup = _s_vg t_vg.survgroup = _s_vg
_affinity_map[t_vg.uuid] = _s_vg _affinity_map[t_vg.uuid] = _s_vg
self._add_implicit_diversity_groups(_s_vg, t_vg.diversity_groups) self._add_implicit_diversity_groups(
self._add_implicit_exclusivity_groups(_s_vg, t_vg.exclusivity_groups) _s_vg, t_vg.diversity_groups)
self._add_implicit_exclusivity_groups(
_s_vg, t_vg.exclusivity_groups)
self._add_memberships(_s_vg, t_vg) self._add_memberships(_s_vg, t_vg)
del _vgroups[t_vg.uuid] del _vgroups[t_vg.uuid]
@ -567,16 +637,19 @@ class Parser(object):
return containing_vg_uuid return containing_vg_uuid
def _set_vgroup_links(self, _vgroup, _vgroups, _vms, _volumes): def _set_vgroup_links(self, _vgroup, _vgroups, _vms, _volumes):
for _, svg in _vgroup.subvgroups.iteritems(): # currently, not define vgroup itself in pipe for _, svg in _vgroup.subvgroups.iteritems():
# currently, not define vgroup itself in pipe
if isinstance(svg, VM): if isinstance(svg, VM):
for vml in svg.vm_list: for vml in svg.vm_list:
found = False found = False
for _, tvgroup in _vgroups.iteritems(): for _, tvgroup in _vgroups.iteritems():
containing_vg_uuid = self._exist_in_subgroups(vml.node.uuid, tvgroup) containing_vg_uuid = self._exist_in_subgroups(
vml.node.uuid, tvgroup)
if containing_vg_uuid is not None: if containing_vg_uuid is not None:
found = True found = True
if containing_vg_uuid != _vgroup.uuid and \ if containing_vg_uuid != _vgroup.uuid and \
self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: self._exist_in_subgroups(
containing_vg_uuid, _vgroup) is None:
self._add_nw_link(vml, _vgroup) self._add_nw_link(vml, _vgroup)
break break
if found is False: if found is False:
@ -587,11 +660,13 @@ class Parser(object):
for voll in svg.volume_list: for voll in svg.volume_list:
found = False found = False
for _, tvgroup in _vgroups.iteritems(): for _, tvgroup in _vgroups.iteritems():
containing_vg_uuid = self._exist_in_subgroups(voll.node.uuid, tvgroup) containing_vg_uuid = self._exist_in_subgroups(
voll.node.uuid, tvgroup)
if containing_vg_uuid is not None: if containing_vg_uuid is not None:
found = True found = True
if containing_vg_uuid != _vgroup.uuid and \ if containing_vg_uuid != _vgroup.uuid and \
self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None: self._exist_in_subgroups(
containing_vg_uuid, _vgroup) is None:
self._add_io_link(voll, _vgroup) self._add_io_link(voll, _vgroup)
break break
if found is False: if found is False:
@ -603,7 +678,8 @@ class Parser(object):
self._set_vgroup_links(svg, _vgroups, _vms, _volumes) self._set_vgroup_links(svg, _vgroups, _vms, _volumes)
for svgl in svg.vgroup_list: # svgl is a link to VM or Volume for svgl in svg.vgroup_list: # svgl is a link to VM or Volume
if self._exist_in_subgroups(svgl.node.uuid, _vgroup) is None: if self._exist_in_subgroups(svgl.node.uuid, _vgroup) \
is None:
self._add_nw_link(svgl, _vgroup) self._add_nw_link(svgl, _vgroup)
self._add_io_link(svgl, _vgroup) self._add_io_link(svgl, _vgroup)

View File

@ -13,9 +13,18 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""App."""
class App(object): class App(object):
"""App Class.
This class represents an app object that consists of the name and id of
the app, as well as the status and vms/volumes/vgroups it belogns to.
"""
def __init__(self, _app_id, _app_name, _action): def __init__(self, _app_id, _app_name, _action):
"""Init App."""
self.app_id = _app_id self.app_id = _app_id
self.app_name = _app_name self.app_name = _app_name
@ -30,21 +39,25 @@ class App(object):
self.status = 'requested' # Moved to "scheduled" (and then "placed") self.status = 'requested' # Moved to "scheduled" (and then "placed")
def add_vm(self, _vm, _host_name): def add_vm(self, _vm, _host_name):
"""Add vm to app, set status to scheduled."""
self.vms[_vm.uuid] = _vm self.vms[_vm.uuid] = _vm
self.vms[_vm.uuid].status = "scheduled" self.vms[_vm.uuid].status = "scheduled"
self.vms[_vm.uuid].host = _host_name self.vms[_vm.uuid].host = _host_name
def add_volume(self, _vol, _host_name): def add_volume(self, _vol, _host_name):
"""Add volume to app, set status to scheduled."""
self.vms[_vol.uuid] = _vol self.vms[_vol.uuid] = _vol
self.vms[_vol.uuid].status = "scheduled" self.vms[_vol.uuid].status = "scheduled"
self.vms[_vol.uuid].storage_host = _host_name self.vms[_vol.uuid].storage_host = _host_name
def add_vgroup(self, _vg, _host_name): def add_vgroup(self, _vg, _host_name):
"""Add vgroup to app, set status to scheduled."""
self.vgroups[_vg.uuid] = _vg self.vgroups[_vg.uuid] = _vg
self.vgroups[_vg.uuid].status = "scheduled" self.vgroups[_vg.uuid].status = "scheduled"
self.vgroups[_vg.uuid].host = _host_name self.vgroups[_vg.uuid].host = _host_name
def get_json_info(self): def get_json_info(self):
"""Return JSON info of App including vms, vols and vgs."""
vms = {} vms = {}
for vmk, vm in self.vms.iteritems(): for vmk, vm in self.vms.iteritems():
vms[vmk] = vm.get_json_info() vms[vmk] = vm.get_json_info()
@ -66,6 +79,7 @@ class App(object):
'VGroups': vgs} 'VGroups': vgs}
def log_in_info(self): def log_in_info(self):
"""Return in info related to login (time of login, app name, etc)."""
return {'action': self.request_type, return {'action': self.request_type,
'timestamp': self.timestamp_scheduled, 'timestamp': self.timestamp_scheduled,
'stack_id': self.app_id, 'stack_id': self.app_id,

View File

@ -1,4 +1,4 @@
# Version 2.0.2: Feb. 9, 2016 # Version 2.0.2:
# Set database keyspace # Set database keyspace
db_keyspace=valet_test db_keyspace=valet_test
@ -12,6 +12,3 @@ db_app_table=app
db_uuid_table=uuid_map db_uuid_table=uuid_map
#replication_factor=3 #replication_factor=3

View File

@ -13,12 +13,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Configuration."""
import sys import sys
class Config(object): class Config(object):
"""Config Class.
This class consists of one function that reads client config options
from a file and sets the corresponding config variables of this class.
"""
def __init__(self): def __init__(self):
"""Init Config class."""
self.mode = None self.mode = None
self.db_keyspace = None self.db_keyspace = None
@ -32,6 +40,7 @@ class Config(object):
self.db_uuid_table = None self.db_uuid_table = None
def configure(self): def configure(self):
"""Read client config file for config options and return success."""
try: try:
f = open("./client.cfg", "r") f = open("./client.cfg", "r")
line = f.readline() line = f.readline()

View File

@ -13,12 +13,21 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Event."""
import json import json
class Event(object): class Event(object):
"""Event Class.
This class represents an event and all the necessary metadata to
properly track it and set the data for the event. Handles object_action
events and build and run instance events.
"""
def __init__(self, _id): def __init__(self, _id):
"""Init Event Class."""
self.event_id = _id self.event_id = _id
self.exchange = None self.exchange = None
self.method = None self.method = None
@ -56,6 +65,13 @@ class Event(object):
self.uuid = None self.uuid = None
def set_data(self): def set_data(self):
"""Set event data depending on method(action) performed.
- If object_action, change data and calculate correct
compute resources for instance or Compute Node.
- If building_and_run_instance, get data from scheduler
and set heat values.
"""
if self.method == 'object_action': if self.method == 'object_action':
self.change_list = self.args['objinst']['nova_object.changes'] self.change_list = self.args['objinst']['nova_object.changes']
self.change_data = self.args['objinst']['nova_object.data'] self.change_data = self.args['objinst']['nova_object.data']
@ -103,33 +119,43 @@ class Event(object):
if 'host' in self.change_data.keys(): if 'host' in self.change_data.keys():
self.host = self.change_data['host'] self.host = self.change_data['host']
if 'deleted' in self.change_list and 'deleted' in self.change_data.keys(): if 'deleted' in self.change_list and 'deleted' in \
if self.change_data['deleted'] == "true" or self.change_data['deleted'] is True: self.change_data.keys():
if self.change_data['deleted'] == "true" or \
self.change_data['deleted'] is True:
self.status = "disabled" self.status = "disabled"
if 'vcpus' in self.change_list and 'vcpus' in self.change_data.keys(): if 'vcpus' in self.change_list and 'vcpus' in \
self.change_data.keys():
self.vcpus = self.change_data['vcpus'] self.vcpus = self.change_data['vcpus']
if 'vcpus_used' in self.change_list and 'vcpus_used' in self.change_data.keys(): if 'vcpus_used' in self.change_list and 'vcpus_used' in \
self.change_data.keys():
self.vcpus_used = self.change_data['vcpus_used'] self.vcpus_used = self.change_data['vcpus_used']
if 'memory_mb' in self.change_list and 'memory_mb' in self.change_data.keys(): if 'memory_mb' in self.change_list and 'memory_mb' in \
self.change_data.keys():
self.mem = self.change_data['memory_mb'] self.mem = self.change_data['memory_mb']
if 'free_ram_mb' in self.change_list and 'free_ram_mb' in self.change_data.keys(): if 'free_ram_mb' in self.change_list and 'free_ram_mb' in \
self.change_data.keys():
self.free_mem = self.change_data['free_ram_mb'] self.free_mem = self.change_data['free_ram_mb']
if 'local_gb' in self.change_list and 'local_gb' in self.change_data.keys(): if 'local_gb' in self.change_list and 'local_gb' in \
self.change_data.keys():
self.local_disk = self.change_data['local_gb'] self.local_disk = self.change_data['local_gb']
if 'free_disk_gb' in self.change_list and 'free_disk_gb' in self.change_data.keys(): if 'free_disk_gb' in self.change_list and 'free_disk_gb' in \
self.change_data.keys():
self.free_local_disk = self.change_data['free_disk_gb'] self.free_local_disk = self.change_data['free_disk_gb']
if 'disk_available_least' in self.change_list and \ if 'disk_available_least' in self.change_list and \
'disk_available_least' in self.change_data.keys(): 'disk_available_least' in self.change_data.keys():
self.disk_available_least = self.change_data['disk_available_least'] self.disk_available_least = \
self.change_data['disk_available_least']
if 'numa_topology' in self.change_list and 'numa_topology' in self.change_data.keys(): if 'numa_topology' in self.change_list and 'numa_topology' in \
self.change_data.keys():
str_numa_topology = self.change_data['numa_topology'] str_numa_topology = self.change_data['numa_topology']
try: try:
numa_topology = json.loads(str_numa_topology) numa_topology = json.loads(str_numa_topology)
@ -137,7 +163,10 @@ class Event(object):
if 'nova_object.data' in numa_topology.keys(): if 'nova_object.data' in numa_topology.keys():
if 'cells' in numa_topology['nova_object.data']: if 'cells' in numa_topology['nova_object.data']:
for cell in numa_topology['nova_object.data']['cells']: for cell in \
numa_topology[
'nova_object.data'
]['cells']:
self.numa_cell_list.append(cell) self.numa_cell_list.append(cell)
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
@ -146,13 +175,18 @@ class Event(object):
elif self.method == 'build_and_run_instance': elif self.method == 'build_and_run_instance':
if 'scheduler_hints' in self.args['filter_properties'].keys(): if 'scheduler_hints' in self.args['filter_properties'].keys():
scheduler_hints = self.args['filter_properties']['scheduler_hints'] scheduler_hints = self.args[
'filter_properties'
]['scheduler_hints']
if 'heat_resource_name' in scheduler_hints.keys(): if 'heat_resource_name' in scheduler_hints.keys():
self.heat_resource_name = scheduler_hints['heat_resource_name'] self.heat_resource_name = \
scheduler_hints['heat_resource_name']
if 'heat_resource_uuid' in scheduler_hints.keys(): if 'heat_resource_uuid' in scheduler_hints.keys():
self.heat_resource_uuid = scheduler_hints['heat_resource_uuid'] self.heat_resource_uuid = \
scheduler_hints['heat_resource_uuid']
if 'heat_root_stack_id' in scheduler_hints.keys(): if 'heat_root_stack_id' in scheduler_hints.keys():
self.heat_root_stack_id = scheduler_hints['heat_root_stack_id'] self.heat_root_stack_id = \
scheduler_hints['heat_root_stack_id']
if 'heat_stack_name' in scheduler_hints.keys(): if 'heat_stack_name' in scheduler_hints.keys():
self.heat_stack_name = scheduler_hints['heat_stack_name'] self.heat_stack_name = scheduler_hints['heat_stack_name']

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Music Handler."""
import json import json
import operator import operator
from valet.api.db.models.music import Music from valet.api.db.models.music import Music
@ -20,8 +22,14 @@ from valet.engine.optimizer.db_connect.event import Event
class MusicHandler(object): class MusicHandler(object):
"""Music Handler Class.
This Class consists of functions that interact with the music
database for valet and returns/deletes/updates objects within it.
"""
def __init__(self, _config, _logger): def __init__(self, _config, _logger):
"""Init Music Handler."""
self.config = _config self.config = _config
self.logger = _logger self.logger = _logger
@ -32,9 +40,17 @@ class MusicHandler(object):
if self.config.mode.startswith("sim"): if self.config.mode.startswith("sim"):
self.music = Music() self.music = Music()
elif self.config.mode.startswith("live"): elif self.config.mode.startswith("live"):
self.music = Music(hosts=self.config.db_hosts, replication_factor=self.config.replication_factor) self.music = Music(
hosts=self.config.db_hosts,
replication_factor=self.config.replication_factor)
def init_db(self): def init_db(self):
"""Init Database.
This function initializes a database in Music by creating all the
necessary tables with the proper schemas in Music using API calls.
Return True if no exceptions are caught.
"""
self.logger.info("MusicHandler.init_db: create table") self.logger.info("MusicHandler.init_db: create table")
try: try:
@ -51,7 +67,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(stack_id)' 'PRIMARY KEY': '(stack_id)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_request_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_request_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -62,7 +79,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(stack_id)' 'PRIMARY KEY': '(stack_id)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_response_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_response_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -75,7 +93,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(timestamp)' 'PRIMARY KEY': '(timestamp)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_event_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_event_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -86,7 +105,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(site_name)' 'PRIMARY KEY': '(site_name)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_resource_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_resource_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -97,7 +117,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(stack_id)' 'PRIMARY KEY': '(stack_id)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_app_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_app_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -108,7 +129,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(site_name)' 'PRIMARY KEY': '(site_name)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_app_index_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_app_index_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -119,7 +141,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(site_name)' 'PRIMARY KEY': '(site_name)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_resource_index_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_resource_index_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -131,7 +154,8 @@ class MusicHandler(object):
'PRIMARY KEY': '(uuid)' 'PRIMARY KEY': '(uuid)'
} }
try: try:
self.music.create_table(self.config.db_keyspace, self.config.db_uuid_table, schema) self.music.create_table(self.config.db_keyspace,
self.config.db_uuid_table, schema)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -139,11 +163,18 @@ class MusicHandler(object):
return True return True
def get_events(self): def get_events(self):
"""Get Events.
This function obtains all events from the database and then
iterates through all of them to check the method and perform the
corresponding action on them. Return Event list.
"""
event_list = [] event_list = []
events = {} events = {}
try: try:
events = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) events = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_event_table)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading events: " + str(e)) self.logger.error("MUSIC error while reading events: " + str(e))
return None return None
@ -155,30 +186,37 @@ class MusicHandler(object):
method = row['method'] method = row['method']
args_data = row['args'] args_data = row['args']
self.logger.debug("MusicHandler.get_events: event (" + event_id + ") is entered") self.logger.debug("MusicHandler.get_events: event (" +
event_id + ") is entered")
if exchange != "nova": if exchange != "nova":
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
return None return None
self.logger.debug("MusicHandler.get_events: event exchange (" + exchange + ") is not supported") self.logger.debug("MusicHandler.get_events: event exchange "
"(" + exchange + ") is not supported")
continue continue
if method != 'object_action' and method != 'build_and_run_instance': if method != 'object_action' and method != 'build_and_run_' \
'instance':
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
return None return None
self.logger.debug("MusicHandler.get_events: event method (" + method + ") is not considered") self.logger.debug("MusicHandler.get_events: event method "
"(" + method + ") is not considered")
continue continue
if len(args_data) == 0: if len(args_data) == 0:
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
return None return None
self.logger.debug("MusicHandler.get_events: event does not have args") self.logger.debug("MusicHandler.get_events: event does not "
"have args")
continue continue
try: try:
args = json.loads(args_data) args = json.loads(args_data)
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.warn("MusicHandler.get_events: error while decoding to JSON event = " + method + ":" + event_id) self.logger.warn("MusicHandler.get_events: error while "
"decoding to JSON event = " + method +
":" + event_id)
continue continue
if method == 'object_action': if method == 'object_action':
@ -193,15 +231,19 @@ class MusicHandler(object):
change_data = objinst['nova_object.data'] change_data = objinst['nova_object.data']
if 'vm_state' in change_list and \ if 'vm_state' in change_list and \
'vm_state' in change_data.keys(): 'vm_state' in change_data.keys():
if change_data['vm_state'] == 'deleted' or \ if change_data['vm_state'] == \
change_data['vm_state'] == 'active': 'deleted' \
or change_data[
'vm_state'
] == 'active':
e = Event(event_id) e = Event(event_id)
e.exchange = exchange e.exchange = exchange
e.method = method e.method = method
e.args = args e.args = args
event_list.append(e) event_list.append(e)
else: else:
if self.delete_event(event_id) is False: if self.delete_event(event_id) \
is False:
return None return None
else: else:
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
@ -257,7 +299,8 @@ class MusicHandler(object):
for e in event_list: for e in event_list:
e.set_data() e.set_data()
self.logger.debug("MusicHandler.get_events: event (" + e.event_id + ") is parsed") self.logger.debug("MusicHandler.get_events: event (" +
e.event_id + ") is parsed")
if e.method == "object_action": if e.method == "object_action":
if e.object_name == 'Instance': if e.object_name == 'Instance':
@ -265,17 +308,20 @@ class MusicHandler(object):
e.host is None or e.host == "none" or \ e.host is None or e.host == "none" or \
e.vcpus == -1 or e.mem == -1: e.vcpus == -1 or e.mem == -1:
error_event_list.append(e) error_event_list.append(e)
self.logger.warn("MusicHandler.get_events: data missing in instance object event") self.logger.warn("MusicHandler.get_events: data "
"missing in instance object event")
elif e.object_name == 'ComputeNode': elif e.object_name == 'ComputeNode':
if e.host is None or e.host == "none": if e.host is None or e.host == "none":
error_event_list.append(e) error_event_list.append(e)
self.logger.warn("MusicHandler.get_events: data missing in compute object event") self.logger.warn("MusicHandler.get_events: data "
"missing in compute object event")
elif e.method == "build_and_run_instance": elif e.method == "build_and_run_instance":
if e.uuid is None or e.uuid == "none": if e.uuid is None or e.uuid == "none":
error_event_list.append(e) error_event_list.append(e)
self.logger.warn("MusicHandler.get_events: data missing in build event") self.logger.warn("MusicHandler.get_events: data missing "
"in build event")
if len(error_event_list) > 0: if len(error_event_list) > 0:
event_list[:] = [e for e in event_list if e not in error_event_list] event_list[:] = [e for e in event_list if e not in error_event_list]
@ -286,6 +332,7 @@ class MusicHandler(object):
return event_list return event_list
def delete_event(self, _event_id): def delete_event(self, _event_id):
"""Return True after deleting corresponding event row in db."""
try: try:
self.music.delete_row_eventually(self.config.db_keyspace, self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_event_table, self.config.db_event_table,
@ -297,12 +344,14 @@ class MusicHandler(object):
return True return True
def get_uuid(self, _uuid): def get_uuid(self, _uuid):
"""Return h_uuid and s_uuid from matching _uuid row in music db."""
h_uuid = "none" h_uuid = "none"
s_uuid = "none" s_uuid = "none"
row = {} row = {}
try: try:
row = self.music.read_row(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _uuid) row = self.music.read_row(self.config.db_keyspace,
self.config.db_uuid_table, 'uuid', _uuid)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading uuid: " + str(e)) self.logger.error("MUSIC error while reading uuid: " + str(e))
return None return None
@ -311,18 +360,22 @@ class MusicHandler(object):
h_uuid = row[row.keys()[0]]['h_uuid'] h_uuid = row[row.keys()[0]]['h_uuid']
s_uuid = row[row.keys()[0]]['s_uuid'] s_uuid = row[row.keys()[0]]['s_uuid']
self.logger.info("MusicHandler.get_uuid: get heat uuid (" + h_uuid + ") for uuid = " + _uuid) self.logger.info("MusicHandler.get_uuid: get heat uuid (" +
h_uuid + ") for uuid = " + _uuid)
else: else:
self.logger.debug("MusicHandler.get_uuid: heat uuid not found") self.logger.debug("MusicHandler.get_uuid: heat uuid not found")
return h_uuid, s_uuid return h_uuid, s_uuid
def put_uuid(self, _e): def put_uuid(self, _e):
"""Insert uuid, h_uuid and s_uuid from event into new row in db."""
heat_resource_uuid = "none" heat_resource_uuid = "none"
heat_root_stack_id = "none" heat_root_stack_id = "none"
if _e.heat_resource_uuid is not None and _e.heat_resource_uuid != "none": if _e.heat_resource_uuid is not None and \
_e.heat_resource_uuid != "none":
heat_resource_uuid = _e.heat_resource_uuid heat_resource_uuid = _e.heat_resource_uuid
if _e.heat_root_stack_id is not None and _e.heat_root_stack_id != "none": if _e.heat_root_stack_id is not None and \
_e.heat_root_stack_id != "none":
heat_root_stack_id = _e.heat_root_stack_id heat_root_stack_id = _e.heat_root_stack_id
data = { data = {
@ -332,7 +385,8 @@ class MusicHandler(object):
} }
try: try:
self.music.create_row(self.config.db_keyspace, self.config.db_uuid_table, data) self.music.create_row(self.config.db_keyspace,
self.config.db_uuid_table, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while inserting uuid: " + str(e)) self.logger.error("MUSIC error while inserting uuid: " + str(e))
return False return False
@ -342,8 +396,11 @@ class MusicHandler(object):
return True return True
def delete_uuid(self, _k): def delete_uuid(self, _k):
"""Return True after deleting row corresponding to event uuid."""
try: try:
self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _k) self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_uuid_table, 'uuid',
_k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while deleting uuid: " + str(e)) self.logger.error("MUSIC error while deleting uuid: " + str(e))
return False return False
@ -351,17 +408,20 @@ class MusicHandler(object):
return True return True
def get_requests(self): def get_requests(self):
"""Return list of requests that consists of all rows in a db table."""
request_list = [] request_list = []
requests = {} requests = {}
try: try:
requests = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) requests = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_request_table)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading requests: " + str(e)) self.logger.error("MUSIC error while reading requests: " + str(e))
return None return None
if len(requests) > 0: if len(requests) > 0:
self.logger.info("MusicHandler.get_requests: placement request arrived") self.logger.info("MusicHandler.get_requests: placement request "
"arrived")
for _, row in requests.iteritems(): for _, row in requests.iteritems():
self.logger.info(" request_id = " + row['stack_id']) self.logger.info(" request_id = " + row['stack_id'])
@ -373,6 +433,7 @@ class MusicHandler(object):
return request_list return request_list
def put_result(self, _result): def put_result(self, _result):
"""Return True after putting result in db(create and delete rows)."""
for appk, app_placement in _result.iteritems(): for appk, app_placement in _result.iteritems():
data = { data = {
'stack_id': appk, 'stack_id': appk,
@ -380,12 +441,15 @@ class MusicHandler(object):
} }
try: try:
self.music.create_row(self.config.db_keyspace, self.config.db_response_table, data) self.music.create_row(self.config.db_keyspace,
self.config.db_response_table, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while putting placement result: " + str(e)) self.logger.error("MUSIC error while putting placement "
"result: " + str(e))
return False return False
self.logger.info("MusicHandler.put_result: " + appk + " placement result added") self.logger.info("MusicHandler.put_result: " + appk +
" placement result added")
for appk in _result.keys(): for appk in _result.keys():
try: try:
@ -393,37 +457,48 @@ class MusicHandler(object):
self.config.db_request_table, self.config.db_request_table,
'stack_id', appk) 'stack_id', appk)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while deleting handled request: " + str(e)) self.logger.error("MUSIC error while deleting handled "
"request: " + str(e))
return False return False
self.logger.info("MusicHandler.put_result: " + appk + " placement request deleted") self.logger.info("MusicHandler.put_result: " +
appk + " placement request deleted")
return True return True
def get_resource_status(self, _k): def get_resource_status(self, _k):
"""Get Row of resource related to '_k' and return resource as json."""
json_resource = {} json_resource = {}
row = {} row = {}
try: try:
row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k, self.logger) row = self.music.read_row(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k, self.logger)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading resource status: " + str(e)) self.logger.error("MUSIC error while reading resource status: " +
str(e))
return None return None
if len(row) > 0: if len(row) > 0:
str_resource = row[row.keys()[0]]['resource'] str_resource = row[row.keys()[0]]['resource']
json_resource = json.loads(str_resource) json_resource = json.loads(str_resource)
self.logger.info("MusicHandler.get_resource_status: get resource status") self.logger.info("MusicHandler.get_resource_status: get resource "
"status")
return json_resource return json_resource
def update_resource_status(self, _k, _status): def update_resource_status(self, _k, _status):
"""Update resource _k to the new _status (flavors, lgs, hosts, etc)."""
row = {} row = {}
try: try:
row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k) row = self.music.read_row(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading resource status: " + str(e)) self.logger.error("MUSIC error while reading resource status: " +
str(e))
return False return False
json_resource = {} json_resource = {}
@ -485,7 +560,8 @@ class MusicHandler(object):
self.config.db_resource_table, self.config.db_resource_table,
'site_name', _k) 'site_name', _k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while deleting resource status: " + str(e)) self.logger.error("MUSIC error while deleting resource "
"status: " + str(e))
return False return False
else: else:
@ -497,34 +573,40 @@ class MusicHandler(object):
} }
try: try:
self.music.create_row(self.config.db_keyspace, self.config.db_resource_table, data) self.music.create_row(self.config.db_keyspace,
self.config.db_resource_table, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
self.logger.info("MusicHandler.update_resource_status: resource status updated") self.logger.info("MusicHandler.update_resource_status: resource status "
"updated")
return True return True
def update_resource_log_index(self, _k, _index): def update_resource_log_index(self, _k, _index):
"""Update resource log index in database and return True."""
data = { data = {
'site_name': _k, 'site_name': _k,
'resource_log_index': str(_index) 'resource_log_index': str(_index)
} }
try: try:
self.music.update_row_eventually(self.config.db_keyspace, self.music.update_row_eventually(
self.config.db_resource_index_table, self.config.db_keyspace, self.config.db_resource_index_table,
'site_name', _k, data) 'site_name', _k, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while updating resource log index: " + str(e)) self.logger.error("MUSIC error while updating resource log "
"index: " + str(e))
return False return False
self.logger.info("MusicHandler.update_resource_log_index: resource log index updated") self.logger.info("MusicHandler.update_resource_log_index: resource log "
"index updated")
return True return True
def update_app_log_index(self, _k, _index): def update_app_log_index(self, _k, _index):
"""Update app log index in database and return True."""
data = { data = {
'site_name': _k, 'site_name': _k,
'app_log_index': str(_index) 'app_log_index': str(_index)
@ -535,16 +617,21 @@ class MusicHandler(object):
self.config.db_app_index_table, self.config.db_app_index_table,
'site_name', _k, data) 'site_name', _k, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while updating app log index: " + str(e)) self.logger.error("MUSIC error while updating app log index: " +
str(e))
return False return False
self.logger.info("MusicHandler.update_app_log_index: app log index updated") self.logger.info("MusicHandler.update_app_log_index: app log index "
"updated")
return True return True
def add_app(self, _k, _app_data): def add_app(self, _k, _app_data):
"""Add app to database in music and return True."""
try: try:
self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _k) self.music.delete_row_eventually(
self.config.db_keyspace, self.config.db_app_table,
'stack_id', _k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while deleting app: " + str(e)) self.logger.error("MUSIC error while deleting app: " + str(e))
return False return False
@ -558,7 +645,8 @@ class MusicHandler(object):
} }
try: try:
self.music.create_row(self.config.db_keyspace, self.config.db_app_table, data) self.music.create_row(self.config.db_keyspace,
self.config.db_app_table, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while inserting app: " + str(e)) self.logger.error("MUSIC error while inserting app: " + str(e))
return False return False
@ -568,11 +656,14 @@ class MusicHandler(object):
return True return True
def get_app_info(self, _s_uuid): def get_app_info(self, _s_uuid):
"""Get app info for stack id and return as json object."""
json_app = {} json_app = {}
row = {} row = {}
try: try:
row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) row = self.music.read_row(self.config.db_keyspace,
self.config.db_app_table, 'stack_id',
_s_uuid)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading app info: " + str(e)) self.logger.error("MUSIC error while reading app info: " + str(e))
return None return None
@ -583,8 +674,9 @@ class MusicHandler(object):
return json_app return json_app
# TODO: get all other VMs related to this VM # TODO(UNKNOWN): get all other VMs related to this VM
def get_vm_info(self, _s_uuid, _h_uuid, _host): def get_vm_info(self, _s_uuid, _h_uuid, _host):
"""Return vm info connected with ids and host passed in."""
updated = False updated = False
json_app = {} json_app = {}
@ -592,7 +684,9 @@ class MusicHandler(object):
row = {} row = {}
try: try:
row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) row = self.music.read_row(self.config.db_keyspace,
self.config.db_app_table, 'stack_id',
_s_uuid)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return None return None
@ -608,8 +702,10 @@ class MusicHandler(object):
if vm["host"] != _host: if vm["host"] != _host:
vm["planned_host"] = vm["host"] vm["planned_host"] = vm["host"]
vm["host"] = _host vm["host"] = _host
self.logger.warn("db: conflicted placement decision from Ostro") self.logger.warn("db: conflicted placement "
# TODO: affinity, diversity, exclusivity validation check "decision from Ostro")
# TODO(UNKOWN): affinity, diversity,
# exclusivity check
updated = True updated = True
else: else:
self.logger.debug("db: placement as expected") self.logger.debug("db: placement as expected")
@ -621,10 +717,12 @@ class MusicHandler(object):
vm_info = vm vm_info = vm
break break
else: else:
self.logger.error("MusicHandler.get_vm_info: vm is missing from stack") self.logger.error("MusicHandler.get_vm_info: vm is missing "
"from stack")
else: else:
self.logger.warn("MusicHandler.get_vm_info: not found stack for update = " + _s_uuid) self.logger.warn("MusicHandler.get_vm_info: not found stack for "
"update = " + _s_uuid)
if updated is True: if updated is True:
if self.add_app(_s_uuid, json_app) is False: if self.add_app(_s_uuid, json_app) is False:
@ -633,12 +731,15 @@ class MusicHandler(object):
return vm_info return vm_info
def update_vm_info(self, _s_uuid, _h_uuid): def update_vm_info(self, _s_uuid, _h_uuid):
"""Return true if vm's heat and heat stack ids are updated in db."""
updated = False updated = False
json_app = {} json_app = {}
row = {} row = {}
try: try:
row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid) row = self.music.read_row(self.config.db_keyspace,
self.config.db_app_table, 'stack_id',
_s_uuid)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error: " + str(e)) self.logger.error("MUSIC error: " + str(e))
return False return False
@ -659,10 +760,12 @@ class MusicHandler(object):
break break
else: else:
self.logger.error("MusicHandler.update_vm_info: vm is missing from stack") self.logger.error("MusicHandler.update_vm_info: vm is missing "
"from stack")
else: else:
self.logger.warn("MusicHandler.update_vm_info: not found stack for update = " + _s_uuid) self.logger.warn("MusicHandler.update_vm_info: not found stack for "
"update = " + _s_uuid)
if updated is True: if updated is True:
if self.add_app(_s_uuid, json_app) is False: if self.add_app(_s_uuid, json_app) is False:

View File

@ -13,17 +13,25 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, LEVELS """ConstraintSolver."""
from valet.engine.optimizer.ostro.openstack_filters import AggregateInstanceExtraSpecsFilter
from valet.engine.optimizer.ostro.openstack_filters import AvailabilityZoneFilter from valet.engine.optimizer.app_manager.app_topology_base \
import VGroup, VM, LEVELS
from valet.engine.optimizer.ostro.openstack_filters \
import AggregateInstanceExtraSpecsFilter
from valet.engine.optimizer.ostro.openstack_filters \
import AvailabilityZoneFilter
from valet.engine.optimizer.ostro.openstack_filters import CoreFilter from valet.engine.optimizer.ostro.openstack_filters import CoreFilter
from valet.engine.optimizer.ostro.openstack_filters import DiskFilter from valet.engine.optimizer.ostro.openstack_filters import DiskFilter
from valet.engine.optimizer.ostro.openstack_filters import RamFilter from valet.engine.optimizer.ostro.openstack_filters import RamFilter
class ConstraintSolver(object): class ConstraintSolver(object):
"""ConstraintSolver."""
def __init__(self, _logger): def __init__(self, _logger):
"""Initialization."""
"""Instantiate filters to help enforce constraints."""
self.logger = _logger self.logger = _logger
self.openstack_AZ = AvailabilityZoneFilter(self.logger) self.openstack_AZ = AvailabilityZoneFilter(self.logger)
@ -34,12 +42,15 @@ class ConstraintSolver(object):
self.status = "success" self.status = "success"
def compute_candidate_list(self, _level, _n, _node_placements, _avail_resources, _avail_logical_groups): def compute_candidate_list(self, _level, _n, _node_placements,
_avail_resources, _avail_logical_groups):
"""Compute candidate list for the given VGroup or VM."""
candidate_list = [] candidate_list = []
''' when replanning ''' """When replanning."""
if _n.node.host is not None and len(_n.node.host) > 0: if _n.node.host is not None and len(_n.node.host) > 0:
self.logger.debug("ConstraintSolver: reconsider with given candidates") self.logger.debug("ConstraintSolver: reconsider with given "
"candidates")
for hk in _n.node.host: for hk in _n.node.host:
for ark, ar in _avail_resources.iteritems(): for ark, ar in _avail_resources.iteritems():
if hk == ark: if hk == ark:
@ -52,135 +63,166 @@ class ConstraintSolver(object):
self.logger.warn("ConstraintSolver: " + self.status) self.logger.warn("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: num of candidates = " + str(len(candidate_list))) self.logger.debug("ConstraintSolver: num of candidates = " +
str(len(candidate_list)))
''' availability zone constraint ''' """Availability zone constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
if (isinstance(_n.node, VM) and _n.node.availability_zone is not None) or \ if (isinstance(_n.node, VM) and _n.node.availability_zone
(isinstance(_n.node, VGroup) and len(_n.node.availability_zone_list) > 0): is not None) or (isinstance(_n.node, VGroup) and
len(_n.node.availability_zone_list) > 0):
self._constrain_availability_zone(_level, _n, candidate_list) self._constrain_availability_zone(_level, _n, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate availability zone constraint for node = " + _n.node.name self.status = "violate availability zone constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done availability_zone constraint") self.logger.debug("ConstraintSolver: done availability_"
"zone constraint")
''' host aggregate constraint ''' """Host aggregate constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
if len(_n.node.extra_specs_list) > 0: if len(_n.node.extra_specs_list) > 0:
self._constrain_host_aggregates(_level, _n, candidate_list) self._constrain_host_aggregates(_level, _n, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate host aggregate constraint for node = " + _n.node.name self.status = "violate host aggregate constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done host_aggregate constraint") self.logger.debug("ConstraintSolver: done host_aggregate "
"constraint")
''' cpu capacity constraint ''' """CPU capacity constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
self._constrain_cpu_capacity(_level, _n, candidate_list) self._constrain_cpu_capacity(_level, _n, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate cpu capacity constraint for node = " + _n.node.name self.status = "violate cpu capacity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done cpu capacity constraint") self.logger.debug("ConstraintSolver: done cpu capacity "
"constraint")
''' memory capacity constraint ''' """Memory capacity constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
self._constrain_mem_capacity(_level, _n, candidate_list) self._constrain_mem_capacity(_level, _n, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate memory capacity constraint for node = " + _n.node.name self.status = "violate memory capacity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done memory capacity constraint") self.logger.debug("ConstraintSolver: done memory capacity "
"constraint")
''' local disk capacity constraint ''' """Local disk capacity constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM): if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
self._constrain_local_disk_capacity(_level, _n, candidate_list) self._constrain_local_disk_capacity(_level, _n, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate local disk capacity constraint for node = " + _n.node.name self.status = "violate local disk capacity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done local disk capacity constraint") self.logger.debug("ConstraintSolver: done local disk capacity "
"constraint")
''' network bandwidth constraint ''' """Network bandwidth constraint."""
self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements, candidate_list) self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements,
candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate nw bandwidth capacity constraint for node = " + _n.node.name self.status = "violate nw bandwidth capacity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done bandwidth capacity constraint") self.logger.debug("ConstraintSolver: done bandwidth capacity "
"constraint")
''' diversity constraint ''' """Diversity constraint."""
if len(_n.node.diversity_groups) > 0: if len(_n.node.diversity_groups) > 0:
for _, diversity_id in _n.node.diversity_groups.iteritems(): for _, diversity_id in _n.node.diversity_groups.iteritems():
if diversity_id.split(":")[0] == _level: if diversity_id.split(":")[0] == _level:
if diversity_id in _avail_logical_groups.keys(): if diversity_id in _avail_logical_groups.keys():
self._constrain_diversity_with_others(_level, diversity_id, candidate_list) self._constrain_diversity_with_others(_level,
diversity_id,
candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
break break
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate diversity constraint for node = " + _n.node.name self.status = "violate diversity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self._constrain_diversity(_level, _n, _node_placements, candidate_list) self._constrain_diversity(_level, _n, _node_placements,
candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate diversity constraint for node = " + _n.node.name self.status = "violate diversity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done diversity_group constraint") self.logger.debug("ConstraintSolver: done diversity_group "
"constraint")
''' exclusivity constraint ''' """Exclusivity constraint."""
exclusivities = self.get_exclusivities(_n.node.exclusivity_groups, _level) exclusivities = self.get_exclusivities(_n.node.exclusivity_groups,
_level)
if len(exclusivities) > 1: if len(exclusivities) > 1:
self.status = "violate exclusivity constraint (more than one exclusivity) for node = " + _n.node.name self.status = "violate exclusivity constraint (more than one " \
"exclusivity) for node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return [] return []
else: else:
if len(exclusivities) == 1: if len(exclusivities) == 1:
exclusivity_id = exclusivities[exclusivities.keys()[0]] exclusivity_id = exclusivities[exclusivities.keys()[0]]
if exclusivity_id.split(":")[0] == _level: if exclusivity_id.split(":")[0] == _level:
self._constrain_exclusivity(_level, exclusivity_id, candidate_list) self._constrain_exclusivity(_level, exclusivity_id,
candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate exclusivity constraint for node = " + _n.node.name self.status = "violate exclusivity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done exclusivity_group constraint") self.logger.debug("ConstraintSolver: done exclusivity "
"group constraint")
else: else:
self._constrain_non_exclusivity(_level, candidate_list) self._constrain_non_exclusivity(_level, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate non-exclusivity constraint for node = " + _n.node.name self.status = "violate non-exclusivity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done non-exclusivity_group constraint") self.logger.debug("ConstraintSolver: done non-exclusivity_"
"group constraint")
''' affinity constraint ''' """Affinity constraint."""
affinity_id = _n.get_affinity_id() # level:name, except name == "any" affinity_id = _n.get_affinity_id() # level:name, except name == "any"
if affinity_id is not None: if affinity_id is not None:
if affinity_id.split(":")[0] == _level: if affinity_id.split(":")[0] == _level:
if affinity_id in _avail_logical_groups.keys(): if affinity_id in _avail_logical_groups.keys():
self._constrain_affinity(_level, affinity_id, candidate_list) self._constrain_affinity(_level, affinity_id,
candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate affinity constraint for node = " + _n.node.name self.status = "violate affinity constraint for " \
"node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) self.logger.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: done affinity_group constraint") self.logger.debug("ConstraintSolver: done affinity_"
"group constraint")
return candidate_list return candidate_list
''' """
constraint modules Constraint modules.
''' """
def _constrain_affinity(self, _level, _affinity_id, _candidate_list): def _constrain_affinity(self, _level, _affinity_id, _candidate_list):
conflict_list = [] conflict_list = []
@ -191,11 +233,14 @@ class ConstraintSolver(object):
conflict_list.append(r) conflict_list.append(r)
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: not exist affinity in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: not exist affinity "
"in resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def _constrain_diversity_with_others(self, _level, _diversity_id, _candidate_list): def _constrain_diversity_with_others(self, _level, _diversity_id,
_candidate_list):
conflict_list = [] conflict_list = []
for r in _candidate_list: for r in _candidate_list:
@ -204,11 +249,17 @@ class ConstraintSolver(object):
conflict_list.append(r) conflict_list.append(r)
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: conflict diversity in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: conflict diversity "
"in resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def exist_group(self, _level, _id, _group_type, _candidate): def exist_group(self, _level, _id, _group_type, _candidate):
"""Check if group esists."""
"""Return True if there exists a group within the candidate's
membership list that matches the provided id and group type.
"""
match = False match = False
memberships = _candidate.get_memberships(_level) memberships = _candidate.get_memberships(_level)
@ -219,7 +270,8 @@ class ConstraintSolver(object):
return match return match
def _constrain_diversity(self, _level, _n, _node_placements, _candidate_list): def _constrain_diversity(self, _level, _n, _node_placements,
_candidate_list):
conflict_list = [] conflict_list = []
for r in _candidate_list: for r in _candidate_list:
@ -228,29 +280,40 @@ class ConstraintSolver(object):
conflict_list.append(r) conflict_list.append(r)
resource_name = r.get_resource_name(_level) resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: conflict the diversity in resource = " + resource_name) self.logger.debug("ConstraintSolver: conflict the "
"diversity in resource = " +
resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def conflict_diversity(self, _level, _n, _node_placements, _candidate): def conflict_diversity(self, _level, _n, _node_placements, _candidate):
"""Return True if the candidate has a placement conflict."""
conflict = False conflict = False
for v in _node_placements.keys(): for v in _node_placements.keys():
diversity_level = _n.get_common_diversity(v.diversity_groups) diversity_level = _n.get_common_diversity(v.diversity_groups)
if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level): if diversity_level != "ANY" and \
LEVELS.index(diversity_level) >= \
LEVELS.index(_level):
if diversity_level == "host": if diversity_level == "host":
if _candidate.cluster_name == _node_placements[v].cluster_name and \ if _candidate.cluster_name == \
_candidate.rack_name == _node_placements[v].rack_name and \ _node_placements[v].cluster_name and \
_candidate.host_name == _node_placements[v].host_name: _candidate.rack_name == \
_node_placements[v].rack_name and \
_candidate.host_name == \
_node_placements[v].host_name:
conflict = True conflict = True
break break
elif diversity_level == "rack": elif diversity_level == "rack":
if _candidate.cluster_name == _node_placements[v].cluster_name and \ if _candidate.cluster_name == \
_node_placements[v].cluster_name and \
_candidate.rack_name == _node_placements[v].rack_name: _candidate.rack_name == _node_placements[v].rack_name:
conflict = True conflict = True
break break
elif diversity_level == "cluster": elif diversity_level == "cluster":
if _candidate.cluster_name == _node_placements[v].cluster_name: if _candidate.cluster_name == \
_node_placements[v].cluster_name:
conflict = True conflict = True
break break
@ -265,21 +328,31 @@ class ConstraintSolver(object):
conflict_list.append(r) conflict_list.append(r)
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: exclusivity defined in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: exclusivity defined "
"in resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def conflict_exclusivity(self, _level, _candidate): def conflict_exclusivity(self, _level, _candidate):
"""Check for an exculsivity conflict."""
"""Check if the candidate contains an exclusivity group within its
list of memberships."""
conflict = False conflict = False
memberships = _candidate.get_memberships(_level) memberships = _candidate.get_memberships(_level)
for mk in memberships.keys(): for mk in memberships.keys():
if memberships[mk].group_type == "EX" and mk.split(":")[0] == _level: if memberships[mk].group_type == "EX" and \
mk.split(":")[0] == _level:
conflict = True conflict = True
return conflict return conflict
def get_exclusivities(self, _exclusivity_groups, _level): def get_exclusivities(self, _exclusivity_groups, _level):
"""Return a list of filtered exclusivities."""
"""Extract and return only those exclusivities that exist at the
specified level.
"""
exclusivities = {} exclusivities = {}
for exk, level in _exclusivity_groups.iteritems(): for exk, level in _exclusivity_groups.iteritems():
@ -289,15 +362,20 @@ class ConstraintSolver(object):
return exclusivities return exclusivities
def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list): def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list):
candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id, _candidate_list) candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id,
_candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
candidate_list = self._get_hibernated_candidates(_level, _candidate_list) candidate_list = self._get_hibernated_candidates(_level,
_candidate_list[:] = [x for x in _candidate_list if x in candidate_list] _candidate_list)
_candidate_list[:] = [x for x in _candidate_list
if x in candidate_list]
else: else:
_candidate_list[:] = [x for x in _candidate_list if x in candidate_list] _candidate_list[:] = [x for x in _candidate_list
if x in candidate_list]
def _get_exclusive_candidates(self, _level, _exclusivity_id, _candidate_list): def _get_exclusive_candidates(self, _level, _exclusivity_id,
_candidate_list):
candidate_list = [] candidate_list = []
for r in _candidate_list: for r in _candidate_list:
@ -306,7 +384,8 @@ class ConstraintSolver(object):
candidate_list.append(r) candidate_list.append(r)
else: else:
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: exclusivity not exist in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: exclusivity not exist in "
"resource = " + debug_resource_name)
return candidate_list return candidate_list
@ -319,11 +398,16 @@ class ConstraintSolver(object):
candidate_list.append(r) candidate_list.append(r)
else: else:
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: exclusivity not allowed in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: exclusivity not allowed "
"in resource = " + debug_resource_name)
return candidate_list return candidate_list
def check_hibernated(self, _level, _candidate): def check_hibernated(self, _level, _candidate):
"""Check if the candidate is hibernated."""
"""Return True if the candidate has no placed VMs at the specified
level.
"""
match = False match = False
num_of_placed_vms = _candidate.get_num_of_placed_vms(_level) num_of_placed_vms = _candidate.get_num_of_placed_vms(_level)
@ -341,11 +425,14 @@ class ConstraintSolver(object):
conflict_list.append(r) conflict_list.append(r)
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: not meet aggregate in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: not meet aggregate "
"in resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_host_aggregates(self, _level, _candidate, _v): def check_host_aggregates(self, _level, _candidate, _v):
"""Check if the candidate passes the aggregate instance extra specs zone filter."""
return self.openstack_AIES.host_passes(_level, _candidate, _v) return self.openstack_AIES.host_passes(_level, _candidate, _v)
def _constrain_availability_zone(self, _level, _n, _candidate_list): def _constrain_availability_zone(self, _level, _n, _candidate_list):
@ -357,11 +444,14 @@ class ConstraintSolver(object):
conflict_list.append(r) conflict_list.append(r)
debug_resource_name = r.get_resource_name(_level) debug_resource_name = r.get_resource_name(_level)
self.logger.debug("ConstraintSolver: not meet az in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: not meet az in "
"resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_availability_zone(self, _level, _candidate, _v): def check_availability_zone(self, _level, _candidate, _v):
"""Check if the candidate passes the availability zone filter."""
return self.openstack_AZ.host_passes(_level, _candidate, _v) return self.openstack_AZ.host_passes(_level, _candidate, _v)
def _constrain_cpu_capacity(self, _level, _n, _candidate_list): def _constrain_cpu_capacity(self, _level, _n, _candidate_list):
@ -372,11 +462,14 @@ class ConstraintSolver(object):
conflict_list.append(ch) conflict_list.append(ch)
debug_resource_name = ch.get_resource_name(_level) debug_resource_name = ch.get_resource_name(_level)
self.logger.debug("ConstraintSolver: lack of cpu in " + debug_resource_name) self.logger.debug("ConstraintSolver: lack of cpu in " +
debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_cpu_capacity(self, _level, _v, _candidate): def check_cpu_capacity(self, _level, _v, _candidate):
"""Check if the candidate passes the core filter."""
return self.openstack_C.host_passes(_level, _candidate, _v) return self.openstack_C.host_passes(_level, _candidate, _v)
def _constrain_mem_capacity(self, _level, _n, _candidate_list): def _constrain_mem_capacity(self, _level, _n, _candidate_list):
@ -387,11 +480,14 @@ class ConstraintSolver(object):
conflict_list.append(ch) conflict_list.append(ch)
debug_resource_name = ch.get_resource_name(_level) debug_resource_name = ch.get_resource_name(_level)
self.logger.debug("ConstraintSolver: lack of mem in " + debug_resource_name) self.logger.debug("ConstraintSolver: lack of mem in " +
debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_mem_capacity(self, _level, _v, _candidate): def check_mem_capacity(self, _level, _v, _candidate):
"""Check if the candidate passes the RAM filter."""
return self.openstack_R.host_passes(_level, _candidate, _v) return self.openstack_R.host_passes(_level, _candidate, _v)
def _constrain_local_disk_capacity(self, _level, _n, _candidate_list): def _constrain_local_disk_capacity(self, _level, _n, _candidate_list):
@ -402,11 +498,14 @@ class ConstraintSolver(object):
conflict_list.append(ch) conflict_list.append(ch)
debug_resource_name = ch.get_resource_name(_level) debug_resource_name = ch.get_resource_name(_level)
self.logger.debug("ConstraintSolver: lack of local disk in " + debug_resource_name) self.logger.debug("ConstraintSolver: lack of local disk in " +
debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_local_disk_capacity(self, _level, _v, _candidate): def check_local_disk_capacity(self, _level, _v, _candidate):
"""Check if the candidate passes the disk filter."""
return self.openstack_D.host_passes(_level, _candidate, _v) return self.openstack_D.host_passes(_level, _candidate, _v)
def _constrain_storage_capacity(self, _level, _n, _candidate_list): def _constrain_storage_capacity(self, _level, _n, _candidate_list):
@ -434,11 +533,14 @@ class ConstraintSolver(object):
if vc == "any" or s.storage_class == vc: if vc == "any" or s.storage_class == vc:
avail_disks.append(s.storage_avail_disk) avail_disks.append(s.storage_avail_disk)
self.logger.debug("ConstraintSolver: storage constrained in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: storage constrained in"
"resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_storage_availability(self, _level, _v, _ch): def check_storage_availability(self, _level, _v, _ch):
"""Return True if there is sufficient storage availability."""
available = False available = False
volume_sizes = [] volume_sizes = []
@ -462,21 +564,28 @@ class ConstraintSolver(object):
return available return available
def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements, _candidate_list): def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements,
_candidate_list):
conflict_list = [] conflict_list = []
for cr in _candidate_list: for cr in _candidate_list:
if self.check_nw_bandwidth_availability(_level, _n, _node_placements, cr) is False: if self.check_nw_bandwidth_availability(
_level, _n, _node_placements, cr) is False:
if cr not in conflict_list: if cr not in conflict_list:
conflict_list.append(cr) conflict_list.append(cr)
debug_resource_name = cr.get_resource_name(_level) debug_resource_name = cr.get_resource_name(_level)
self.logger.debug("ConstraintSolver: bw constrained in resource = " + debug_resource_name) self.logger.debug("ConstraintSolver: bw constrained in "
"resource = " + debug_resource_name)
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] _candidate_list[:] = [c for c in _candidate_list
if c not in conflict_list]
def check_nw_bandwidth_availability(self, _level, _n, _node_placements, _cr): def check_nw_bandwidth_availability(self, _level, _n, _node_placements,
# NOTE: 3rd entry for special node requiring bandwidth of out-going from spine switch _cr):
"""Return True if there is sufficient network availability."""
# NOTE: 3rd entry for special node requiring bandwidth of out-going
# from spine switch
total_req_bandwidths = [0, 0, 0] total_req_bandwidths = [0, 0, 0]
link_list = _n.get_all_links() link_list = _n.get_all_links()
@ -486,26 +595,35 @@ class ConstraintSolver(object):
placement_level = None placement_level = None
if vl.node in _node_placements.keys(): # vl.node is VM or Volume if vl.node in _node_placements.keys(): # vl.node is VM or Volume
placement_level = _node_placements[vl.node].get_common_placement(_cr) placement_level = \
_node_placements[vl.node].get_common_placement(_cr)
else: # in the open list else: # in the open list
placement_level = _n.get_common_diversity(vl.node.diversity_groups) placement_level = \
_n.get_common_diversity(vl.node.diversity_groups)
if placement_level == "ANY": if placement_level == "ANY":
implicit_diversity = self.get_implicit_diversity(_n.node, link_list, vl.node, _level) implicit_diversity = self.get_implicit_diversity(_n.node,
link_list,
vl.node,
_level)
if implicit_diversity[0] is not None: if implicit_diversity[0] is not None:
placement_level = implicit_diversity[1] placement_level = implicit_diversity[1]
self.get_req_bandwidths(_level, placement_level, bandwidth, total_req_bandwidths) self.get_req_bandwidths(_level, placement_level, bandwidth,
total_req_bandwidths)
return self._check_nw_bandwidth_availability(_level, total_req_bandwidths, _cr) return self._check_nw_bandwidth_availability(_level,
total_req_bandwidths, _cr)
# to find any implicit diversity relation caused by the other links of _v # to find any implicit diversity relation caused by the other links of _v
# (i.e., intersection between _v and _target_v) # (i.e., intersection between _v and _target_v)
def get_implicit_diversity(self, _v, _link_list, _target_v, _level): def get_implicit_diversity(self, _v, _link_list, _target_v, _level):
"""Get the maximum implicit diversity between _v and _target_v."""
max_implicit_diversity = (None, 0) max_implicit_diversity = (None, 0)
for vl in _link_list: for vl in _link_list:
diversity_level = _v.get_common_diversity(vl.node.diversity_groups) diversity_level = _v.get_common_diversity(vl.node.diversity_groups)
if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level): if diversity_level != "ANY" \
and LEVELS.index(diversity_level) >= LEVELS.index(_level):
for dk, dl in vl.node.diversity_groups.iteritems(): for dk, dl in vl.node.diversity_groups.iteritems():
if LEVELS.index(dl) > LEVELS.index(diversity_level): if LEVELS.index(dl) > LEVELS.index(diversity_level):
if _target_v.uuid != vl.node.uuid: if _target_v.uuid != vl.node.uuid:
@ -515,7 +633,9 @@ class ConstraintSolver(object):
return max_implicit_diversity return max_implicit_diversity
def get_req_bandwidths(self, _level, _placement_level, _bandwidth, _total_req_bandwidths): def get_req_bandwidths(self, _level, _placement_level, _bandwidth,
_total_req_bandwidths):
"""Calculate and update total required bandwidths."""
if _level == "cluster" or _level == "rack": if _level == "cluster" or _level == "rack":
if _placement_level == "cluster" or _placement_level == "rack": if _placement_level == "cluster" or _placement_level == "rack":
_total_req_bandwidths[1] += _bandwidth _total_req_bandwidths[1] += _bandwidth
@ -526,7 +646,8 @@ class ConstraintSolver(object):
elif _placement_level == "host": elif _placement_level == "host":
_total_req_bandwidths[0] += _bandwidth _total_req_bandwidths[0] += _bandwidth
def _check_nw_bandwidth_availability(self, _level, _req_bandwidths, _candidate_resource): def _check_nw_bandwidth_availability(self, _level, _req_bandwidths,
_candidate_resource):
available = True available = True
if _level == "cluster": if _level == "cluster":
@ -557,7 +678,8 @@ class ConstraintSolver(object):
for _, sr in _candidate_resource.rack_avail_switches.iteritems(): for _, sr in _candidate_resource.rack_avail_switches.iteritems():
rack_avail_bandwidths.append(max(sr.avail_bandwidths)) rack_avail_bandwidths.append(max(sr.avail_bandwidths))
avail_bandwidth = min(max(host_avail_bandwidths), max(rack_avail_bandwidths)) avail_bandwidth = min(max(host_avail_bandwidths),
max(rack_avail_bandwidths))
if avail_bandwidth < _req_bandwidths[1]: if avail_bandwidth < _req_bandwidths[1]:
available = False available = False

View File

@ -13,10 +13,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""AggregateInstanceExtraSpecsFilter."""
import six import six
from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_base import VM
import valet.engine.optimizer.ostro.openstack_utils from valet.engine.optimizer.ostro import openstack_utils
_SCOPE = 'aggregate_instance_extra_specs' _SCOPE = 'aggregate_instance_extra_specs'
@ -28,14 +30,13 @@ class AggregateInstanceExtraSpecsFilter(object):
run_filter_once_per_request = True run_filter_once_per_request = True
def __init__(self, _logger): def __init__(self, _logger):
"""Initialization."""
self.logger = _logger self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Return a list of hosts that can create instance_type """Return a list of hosts that can create instance_type."""
"""Check that the extra specs associated with the instance type match
Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False."""
the metadata provided by aggregates. If not present return False.
"""
# If 'extra_specs' is not present or extra_specs are empty then we # If 'extra_specs' is not present or extra_specs are empty then we
# need not proceed further # need not proceed further
@ -47,12 +48,14 @@ class AggregateInstanceExtraSpecsFilter(object):
if len(extra_specs_list) == 0: if len(extra_specs_list) == 0:
return True return True
metadatas = openstack_utils.aggregate_metadata_get_by_host(_level, _host) metadatas = openstack_utils.aggregate_metadata_get_by_host(_level,
_host)
matched_logical_group_list = [] matched_logical_group_list = []
for extra_specs in extra_specs_list: for extra_specs in extra_specs_list:
for lgk, metadata in metadatas.iteritems(): for lgk, metadata in metadatas.iteritems():
if self._match_metadata(_host.get_resource_name(_level), lgk, extra_specs, metadata) is True: if self._match_metadata(_host.get_resource_name(_level), lgk,
extra_specs, metadata) is True:
matched_logical_group_list.append(lgk) matched_logical_group_list.append(lgk)
break break
else: else:
@ -64,7 +67,8 @@ class AggregateInstanceExtraSpecsFilter(object):
break break
else: else:
host_aggregate_extra_specs = {} host_aggregate_extra_specs = {}
host_aggregate_extra_specs["host_aggregates"] = matched_logical_group_list host_aggregate_extra_specs["host_aggregates"] = \
matched_logical_group_list
_v.extra_specs_list.append(host_aggregate_extra_specs) _v.extra_specs_list.append(host_aggregate_extra_specs)
return True return True
@ -85,13 +89,17 @@ class AggregateInstanceExtraSpecsFilter(object):
aggregate_vals = _metadata.get(key, None) aggregate_vals = _metadata.get(key, None)
if not aggregate_vals: if not aggregate_vals:
self.logger.debug("key (" + key + ") not exists in logical_group (" + _lg_name + ") " + " of host (" + _h_name + ")") self.logger.debug("key (" + key + ") not exists in logical_"
"group (" + _lg_name + ") " +
" of host (" + _h_name + ")")
return False return False
for aggregate_val in aggregate_vals: for aggregate_val in aggregate_vals:
if openstack_utils.match(aggregate_val, req): if openstack_utils.match(aggregate_val, req):
break break
else: else:
self.logger.debug("key (" + key + ")'s value (" + req + ") not exists in logical_group " + "(" + _lg_name + ") " + " of host (" + _h_name + ")") self.logger.debug("key (" + key + ")'s value (" + req + ") not "
"exists in logical_group " + "(" + _lg_name +
") " + " of host (" + _h_name + ")")
return False return False
return True return True
@ -99,9 +107,9 @@ class AggregateInstanceExtraSpecsFilter(object):
# NOTE: originally, OpenStack used the metadata of host_aggregate # NOTE: originally, OpenStack used the metadata of host_aggregate
class AvailabilityZoneFilter(object): class AvailabilityZoneFilter(object):
""" Filters Hosts by availability zone. """AvailabilityZoneFilter filters Hosts by availability zone."""
Works with aggregate metadata availability zones, using the key """Work with aggregate metadata availability zones, using the key
'availability_zone' 'availability_zone'
Note: in theory a compute node can be part of multiple availability_zones Note: in theory a compute node can be part of multiple availability_zones
""" """
@ -110,9 +118,11 @@ class AvailabilityZoneFilter(object):
run_filter_once_per_request = True run_filter_once_per_request = True
def __init__(self, _logger): def __init__(self, _logger):
"""Initialization."""
self.logger = _logger self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Return True if all availalibility zones in _v exist in the host."""
az_request_list = [] az_request_list = []
if isinstance(_v, VM): if isinstance(_v, VM):
az_request_list.append(_v.availability_zone) az_request_list.append(_v.availability_zone)
@ -123,43 +133,54 @@ class AvailabilityZoneFilter(object):
if len(az_request_list) == 0: if len(az_request_list) == 0:
return True return True
availability_zone_list = openstack_utils.availability_zone_get_by_host(_level, _host) availability_zone_list = \
openstack_utils.availability_zone_get_by_host(_level, _host)
for azr in az_request_list: for azr in az_request_list:
if azr not in availability_zone_list: if azr not in availability_zone_list:
self.logger.debug("AZ (" + azr + ") not exists in host " + "(" + _host.get_resource_name(_level) + ")") self.logger.debug("AZ (" + azr + ") not exists in host " + "(" +
_host.get_resource_name(_level) + ")")
return False return False
return True return True
class RamFilter(object): class RamFilter(object):
"""RamFilter."""
def __init__(self, _logger): def __init__(self, _logger):
"""Initialization."""
self.logger = _logger self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Only return hosts with sufficient available RAM.""" """Return True if host has sufficient available RAM."""
requested_ram = _v.mem # MB requested_ram = _v.mem # MB
(total_ram, usable_ram) = _host.get_mem(_level) (total_ram, usable_ram) = _host.get_mem(_level)
# Do not allow an instance to overcommit against itself, only against other instances. # Do not allow an instance to overcommit against itself, only against
# other instances.
if not total_ram >= requested_ram: if not total_ram >= requested_ram:
self.logger.debug("requested mem (" + str(requested_ram) + ") more than total mem (" + self.logger.debug("requested mem (" + str(requested_ram) +
str(total_ram) + ") in host (" + _host.get_resource_name(_level) + ")") ") more than total mem (" +
str(total_ram) + ") in host (" +
_host.get_resource_name(_level) + ")")
return False return False
if not usable_ram >= requested_ram: if not usable_ram >= requested_ram:
self.logger.debug("requested mem (" + str(requested_ram) + ") more than avail mem (" + self.logger.debug("requested mem (" + str(requested_ram) +
str(usable_ram) + ") in host (" + _host.get_resource_name(_level) + ")") ") more than avail mem (" +
str(usable_ram) + ") in host (" +
_host.get_resource_name(_level) + ")")
return False return False
return True return True
class CoreFilter(object): class CoreFilter(object):
"""CoreFilter."""
def __init__(self, _logger): def __init__(self, _logger):
"""Initialization."""
self.logger = _logger self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
@ -168,33 +189,42 @@ class CoreFilter(object):
instance_vCPUs = _v.vCPUs instance_vCPUs = _v.vCPUs
# Do not allow an instance to overcommit against itself, only against other instances. # Do not allow an instance to overcommit against itself, only against
# other instances.
if instance_vCPUs > vCPUs: if instance_vCPUs > vCPUs:
self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than total vCPUs (" + self.logger.debug("requested vCPUs (" + str(instance_vCPUs) +
str(vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")") ") more than total vCPUs (" +
str(vCPUs) + ") in host (" +
_host.get_resource_name(_level) + ")")
return False return False
if avail_vCPUs < instance_vCPUs: if avail_vCPUs < instance_vCPUs:
self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than avail vCPUs (" + self.logger.debug("requested vCPUs (" + str(instance_vCPUs) +
str(avail_vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")") ") more than avail vCPUs (" +
str(avail_vCPUs) + ") in host (" +
_host.get_resource_name(_level) + ")")
return False return False
return True return True
class DiskFilter(object): class DiskFilter(object):
"""DiskFilter."""
def __init__(self, _logger): def __init__(self, _logger):
"""Initialization."""
self.logger = _logger self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Filter based on disk usage.""" """Return True if the requested disk is less than the available disk."""
requested_disk = _v.local_volume_size requested_disk = _v.local_volume_size
(_, usable_disk) = _host.get_local_disk(_level) (_, usable_disk) = _host.get_local_disk(_level)
if not usable_disk >= requested_disk: if not usable_disk >= requested_disk:
self.logger.debug("requested disk (" + str(requested_disk) + ") more than avail disk (" + self.logger.debug("requested disk (" + str(requested_disk) +
str(usable_disk) + ") in host (" + _host.get_resource_name(_level) + ")") ") more than avail disk (" +
str(usable_disk) + ") in host (" +
_host.get_resource_name(_level) + ")")
return False return False
return True return True

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Openstack utlity functions."""
import collections import collections
import operator import operator
@ -38,6 +40,7 @@ op_methods = {'=': lambda x, y: float(x) >= float(y),
def match(value, req): def match(value, req):
"""Return True if value matches request."""
words = req.split() words = req.split()
op = method = None op = method = None
@ -70,7 +73,10 @@ def match(value, req):
def aggregate_metadata_get_by_host(_level, _host, _key=None): def aggregate_metadata_get_by_host(_level, _host, _key=None):
"""Returns a dict of all metadata based on a metadata key for a specific host. If the key is not provided, returns a dict of all metadata.""" """Return a dict of metadata for a specific host."""
"""Base dict on a metadata key. If the key is not provided,
return a dict of all metadata.
"""
metadatas = {} metadatas = {}
@ -90,6 +96,7 @@ def aggregate_metadata_get_by_host(_level, _host, _key=None):
# NOTE: this function not exist in OpenStack # NOTE: this function not exist in OpenStack
def availability_zone_get_by_host(_level, _host): def availability_zone_get_by_host(_level, _host):
"""Return a list of availability zones for a specific host."""
availability_zone_list = [] availability_zone_list = []
logical_groups = _host.get_memberships(_level) logical_groups = _host.get_memberships(_level)

View File

@ -13,15 +13,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Optimizer."""
import time import time
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume from valet.engine.optimizer.app_manager.app_topology_base \
import VGroup, VM, Volume
from valet.engine.optimizer.ostro.search import Search from valet.engine.optimizer.ostro.search import Search
class Optimizer(object): class Optimizer(object):
"""Optimizer."""
def __init__(self, _resource, _logger): def __init__(self, _resource, _logger):
"""Initialization."""
self.resource = _resource self.resource = _resource
self.logger = _logger self.logger = _logger
@ -30,6 +35,8 @@ class Optimizer(object):
self.status = "success" self.status = "success"
def place(self, _app_topology): def place(self, _app_topology):
"""Perform a replan, migration, or create operation."""
"""Return a placement map for VMs, Volumes, and VGroups."""
success = False success = False
uuid_map = None uuid_map = None
@ -59,7 +66,8 @@ class Optimizer(object):
uuid_map = self._delete_old_vms(_app_topology.old_vm_map) uuid_map = self._delete_old_vms(_app_topology.old_vm_map)
self.resource.update_topology(store=False) self.resource.update_topology(store=False)
self.logger.debug("Optimizer: remove old placements for replan") self.logger.debug("Optimizer: remove old placements for "
"replan")
else: else:
success = self.search.place_nodes(_app_topology, self.resource) success = self.search.place_nodes(_app_topology, self.resource)
@ -67,26 +75,35 @@ class Optimizer(object):
if success is True: if success is True:
self.logger.debug("Optimizer: search running time = " + str(end_ts - start_ts) + " sec") self.logger.debug("Optimizer: search running time = " +
self.logger.debug("Optimizer: total bandwidth = " + str(self.search.bandwidth_usage)) str(end_ts - start_ts) + " sec")
self.logger.debug("Optimizer: total number of hosts = " + str(self.search.num_of_hosts)) self.logger.debug("Optimizer: total bandwidth = " +
str(self.search.bandwidth_usage))
self.logger.debug("Optimizer: total number of hosts = " +
str(self.search.num_of_hosts))
placement_map = {} placement_map = {}
for v in self.search.node_placements.keys(): for v in self.search.node_placements.keys():
if isinstance(v, VM): if isinstance(v, VM):
placement_map[v] = self.search.node_placements[v].host_name placement_map[v] = self.search.node_placements[v].host_name
elif isinstance(v, Volume): elif isinstance(v, Volume):
placement_map[v] = self.search.node_placements[v].host_name + "@" placement_map[v] = \
placement_map[v] += self.search.node_placements[v].storage.storage_name self.search.node_placements[v].host_name + "@"
placement_map[v] += \
self.search.node_placements[v].storage.storage_name
elif isinstance(v, VGroup): elif isinstance(v, VGroup):
if v.level == "host": if v.level == "host":
placement_map[v] = self.search.node_placements[v].host_name placement_map[v] = \
self.search.node_placements[v].host_name
elif v.level == "rack": elif v.level == "rack":
placement_map[v] = self.search.node_placements[v].rack_name placement_map[v] = \
self.search.node_placements[v].rack_name
elif v.level == "cluster": elif v.level == "cluster":
placement_map[v] = self.search.node_placements[v].cluster_name placement_map[v] = \
self.search.node_placements[v].cluster_name
self.logger.debug(" " + v.name + " placed in " + placement_map[v]) self.logger.debug(" " + v.name + " placed in " +
placement_map[v])
self._update_resource_status(uuid_map) self._update_resource_status(uuid_map)
@ -104,7 +121,8 @@ class Optimizer(object):
if uuid is not None: if uuid is not None:
uuid_map[h_uuid] = uuid uuid_map[h_uuid] = uuid
self.resource.remove_vm_by_h_uuid_from_host(info[0], h_uuid, info[1], info[2], info[3]) self.resource.remove_vm_by_h_uuid_from_host(
info[0], h_uuid, info[1], info[2], info[3])
self.resource.update_host_time(info[0]) self.resource.update_host_time(info[0])
host = self.resource.hosts[info[0]] host = self.resource.hosts[info[0]]
@ -123,58 +141,75 @@ class Optimizer(object):
self.resource.add_vm_to_host(np.host_name, self.resource.add_vm_to_host(np.host_name,
(v.uuid, v.name, uuid), (v.uuid, v.name, uuid),
v.vCPUs, v.mem, v.local_volume_size) v.vCPUs, v.mem,
v.local_volume_size)
for vl in v.vm_list: for vl in v.vm_list:
tnp = self.search.node_placements[vl.node] tnp = self.search.node_placements[vl.node]
placement_level = np.get_common_placement(tnp) placement_level = np.get_common_placement(tnp)
self.resource.deduct_bandwidth(np.host_name, placement_level, vl.nw_bandwidth) self.resource.deduct_bandwidth(np.host_name,
placement_level,
vl.nw_bandwidth)
for voll in v.volume_list: for voll in v.volume_list:
tnp = self.search.node_placements[voll.node] tnp = self.search.node_placements[voll.node]
placement_level = np.get_common_placement(tnp) placement_level = np.get_common_placement(tnp)
self.resource.deduct_bandwidth(np.host_name, placement_level, voll.io_bandwidth) self.resource.deduct_bandwidth(np.host_name,
placement_level,
voll.io_bandwidth)
self._update_logical_grouping(v, self.search.avail_hosts[np.host_name], uuid) self._update_logical_grouping(
v, self.search.avail_hosts[np.host_name], uuid)
self.resource.update_host_time(np.host_name) self.resource.update_host_time(np.host_name)
elif isinstance(v, Volume): elif isinstance(v, Volume):
self.resource.add_vol_to_host(np.host_name, np.storage.storage_name, v.name, v.volume_size) self.resource.add_vol_to_host(np.host_name,
np.storage.storage_name, v.name,
v.volume_size)
for vl in v.vm_list: for vl in v.vm_list:
tnp = self.search.node_placements[vl.node] tnp = self.search.node_placements[vl.node]
placement_level = np.get_common_placement(tnp) placement_level = np.get_common_placement(tnp)
self.resource.deduct_bandwidth(np.host_name, placement_level, vl.io_bandwidth) self.resource.deduct_bandwidth(np.host_name,
placement_level,
vl.io_bandwidth)
self.resource.update_storage_time(np.storage.storage_name) self.resource.update_storage_time(np.storage.storage_name)
def _update_logical_grouping(self, _v, _avail_host, _uuid): def _update_logical_grouping(self, _v, _avail_host, _uuid):
for lgk, lg in _avail_host.host_memberships.iteritems(): for lgk, lg in _avail_host.host_memberships.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_name = lgk.split(":") lg_name = lgk.split(":")
if lg_name[0] == "host" and lg_name[1] != "any": if lg_name[0] == "host" and lg_name[1] != "any":
self.resource.add_logical_group(_avail_host.host_name, lgk, lg.group_type) self.resource.add_logical_group(_avail_host.host_name,
lgk, lg.group_type)
if _avail_host.rack_name != "any": if _avail_host.rack_name != "any":
for lgk, lg in _avail_host.rack_memberships.iteritems(): for lgk, lg in _avail_host.rack_memberships.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_name = lgk.split(":") lg_name = lgk.split(":")
if lg_name[0] == "rack" and lg_name[1] != "any": if lg_name[0] == "rack" and lg_name[1] != "any":
self.resource.add_logical_group(_avail_host.rack_name, lgk, lg.group_type) self.resource.add_logical_group(_avail_host.rack_name,
lgk, lg.group_type)
if _avail_host.cluster_name != "any": if _avail_host.cluster_name != "any":
for lgk, lg in _avail_host.cluster_memberships.iteritems(): for lgk, lg in _avail_host.cluster_memberships.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_name = lgk.split(":") lg_name = lgk.split(":")
if lg_name[0] == "cluster" and lg_name[1] != "any": if lg_name[0] == "cluster" and lg_name[1] != "any":
self.resource.add_logical_group(_avail_host.cluster_name, lgk, lg.group_type) self.resource.add_logical_group(
_avail_host.cluster_name, lgk, lg.group_type)
vm_logical_groups = [] vm_logical_groups = []
self._collect_logical_groups_of_vm(_v, vm_logical_groups) self._collect_logical_groups_of_vm(_v, vm_logical_groups)
host = self.resource.hosts[_avail_host.host_name] host = self.resource.hosts[_avail_host.host_name]
self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid), vm_logical_groups) self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid),
vm_logical_groups)
def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups): def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups):
if isinstance(_v, VM): if isinstance(_v, VM):

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Valet Engine."""
from oslo_config import cfg from oslo_config import cfg
import threading import threading
import time import time
@ -30,21 +32,25 @@ CONF = cfg.CONF
class Ostro(object): class Ostro(object):
"""Valet Engine."""
def __init__(self, _config, _logger): def __init__(self, _config, _logger):
"""Initialization."""
self.config = _config self.config = _config
self.logger = _logger self.logger = _logger
self.db = MusicHandler(self.config, self.logger) self.db = MusicHandler(self.config, self.logger)
if self.db.init_db() is False: if self.db.init_db() is False:
self.logger.error("Ostro.__init__: error while initializing MUSIC database") self.logger.error("Ostro.__init__: error while initializing MUSIC "
"database")
else: else:
self.logger.debug("Ostro.__init__: done init music") self.logger.debug("Ostro.__init__: done init music")
self.resource = Resource(self.db, self.config, self.logger) self.resource = Resource(self.db, self.config, self.logger)
self.logger.debug("done init resource") self.logger.debug("done init resource")
self.app_handler = AppHandler(self.resource, self.db, self.config, self.logger) self.app_handler = AppHandler(self.resource, self.db, self.config,
self.logger)
self.logger.debug("done init apphandler") self.logger.debug("done init apphandler")
self.optimizer = Optimizer(self.resource, self.logger) self.optimizer = Optimizer(self.resource, self.logger)
@ -53,10 +59,13 @@ class Ostro(object):
self.data_lock = threading.Lock() self.data_lock = threading.Lock()
self.thread_list = [] self.thread_list = []
self.topology = TopologyManager(1, "Topology", self.resource, self.data_lock, self.config, self.logger) self.topology = TopologyManager(1, "Topology", self.resource,
self.data_lock, self.config,
self.logger)
self.logger.debug("done init topology") self.logger.debug("done init topology")
self.compute = ComputeManager(2, "Compute", self.resource, self.data_lock, self.config, self.logger) self.compute = ComputeManager(2, "Compute", self.resource,
self.data_lock, self.config, self.logger)
self.logger.debug("done init compute") self.logger.debug("done init compute")
self.listener = ListenerManager(3, "Listener", CONF) self.listener = ListenerManager(3, "Listener", CONF)
@ -66,6 +75,10 @@ class Ostro(object):
self.end_of_process = False self.end_of_process = False
def run_ostro(self): def run_ostro(self):
"""Start main engine process."""
"""Start topology, compute, and listener processes. Start process of
retrieving and handling events and requests from the db every 1 second.
"""
self.logger.info("Ostro.run_ostro: start Ostro ......") self.logger.info("Ostro.run_ostro: start Ostro ......")
self.topology.start() self.topology.start()
@ -102,6 +115,10 @@ class Ostro(object):
self.logger.info("Ostro.run_ostro: exit Ostro") self.logger.info("Ostro.run_ostro: exit Ostro")
def stop_ostro(self): def stop_ostro(self):
"""Stop main engine process."""
"""Stop process of retrieving and handling events and requests from
the db. Stop topology and compute processes.
"""
self.end_of_process = True self.end_of_process = True
while len(self.thread_list) > 0: while len(self.thread_list) > 0:
@ -111,10 +128,12 @@ class Ostro(object):
self.thread_list.remove(t) self.thread_list.remove(t)
def bootstrap(self): def bootstrap(self):
"""Start bootstrap and update the engine's resource topology."""
self.logger.info("Ostro.bootstrap: start bootstrap") self.logger.info("Ostro.bootstrap: start bootstrap")
try: try:
resource_status = self.db.get_resource_status(self.resource.datacenter.name) resource_status = self.db.get_resource_status(
self.resource.datacenter.name)
if resource_status is None: if resource_status is None:
return False return False
@ -140,7 +159,8 @@ class Ostro(object):
self.resource.update_topology() self.resource.update_topology()
except Exception: except Exception:
self.logger.critical("Ostro.bootstrap failed: " + traceback.format_exc()) self.logger.critical("Ostro.bootstrap failed: " +
traceback.format_exc())
self.logger.info("Ostro.bootstrap: done bootstrap") self.logger.info("Ostro.bootstrap: done bootstrap")
@ -173,6 +193,7 @@ class Ostro(object):
return True return True
def place_app(self, _app_data): def place_app(self, _app_data):
"""Place results of query and placement requests in the db."""
self.data_lock.acquire() self.data_lock.acquire()
start_time = time.time() start_time = time.time()
@ -190,7 +211,8 @@ class Ostro(object):
query_results = self._query(query_request_list) query_results = self._query(query_request_list)
result = self._get_json_results("query", "ok", self.status, query_results) result = self._get_json_results("query", "ok", self.status,
query_results)
if self.db.put_result(result) is False: if self.db.put_result(result) is False:
self.data_lock.release() self.data_lock.release()
@ -207,9 +229,11 @@ class Ostro(object):
placement_map = self._place_app(placement_request_list) placement_map = self._place_app(placement_request_list)
if placement_map is None: if placement_map is None:
result = self._get_json_results("placement", "error", self.status, placement_map) result = self._get_json_results("placement", "error",
self.status, placement_map)
else: else:
result = self._get_json_results("placement", "ok", "success", placement_map) result = self._get_json_results("placement", "ok", "success",
placement_map)
if self.db.put_result(result) is False: if self.db.put_result(result) is False:
self.data_lock.release() self.data_lock.release()
@ -219,7 +243,8 @@ class Ostro(object):
end_time = time.time() end_time = time.time()
self.logger.info("Ostro.place_app: total decision delay of request = " + str(end_time - start_time) + " sec") self.logger.info("Ostro.place_app: total decision delay of request = " +
str(end_time - start_time) + " sec")
self.data_lock.release() self.data_lock.release()
return True return True
@ -233,7 +258,8 @@ class Ostro(object):
if "parameters" in q.keys(): if "parameters" in q.keys():
params = q["parameters"] params = q["parameters"]
if "group_name" in params.keys(): if "group_name" in params.keys():
vm_list = self._get_vms_from_logical_group(params["group_name"]) vm_list = self._get_vms_from_logical_group(
params["group_name"])
query_results[q["stack_id"]] = vm_list query_results[q["stack_id"]] = vm_list
else: else:
self.status = "unknown paramenter in query" self.status = "unknown paramenter in query"
@ -261,7 +287,8 @@ class Ostro(object):
vm_id_list = [] vm_id_list = []
for lgk, lg in self.resource.logical_groups.iteritems(): for lgk, lg in self.resource.logical_groups.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_id = lgk.split(":") lg_id = lgk.split(":")
if lg_id[1] == _group_name: if lg_id[1] == _group_name:
vm_id_list = lg.vm_list vm_id_list = lg.vm_list
@ -282,14 +309,15 @@ class Ostro(object):
return logical_groups return logical_groups
def _place_app(self, _app_data): def _place_app(self, _app_data):
''' set application topology ''' """Set application topology."""
app_topology = self.app_handler.add_app(_app_data) app_topology = self.app_handler.add_app(_app_data)
if app_topology is None: if app_topology is None:
self.status = self.app_handler.status self.status = self.app_handler.status
self.logger.debug("Ostro._place_app: error while register requested apps: " + self.status) self.logger.debug("Ostro._place_app: error while register "
"requested apps: " + self.status)
return None return None
''' check and set vm flavor information ''' """Check and set vm flavor information."""
for _, vm in app_topology.vms.iteritems(): for _, vm in app_topology.vms.iteritems():
if self._set_vm_flavor_information(vm) is False: if self._set_vm_flavor_information(vm) is False:
self.status = "fail to set flavor information" self.status = "fail to set flavor information"
@ -301,22 +329,25 @@ class Ostro(object):
self.logger.error("Ostro._place_app: " + self.status) self.logger.error("Ostro._place_app: " + self.status)
return None return None
''' set weights for optimization ''' """Set weights for optimization."""
app_topology.set_weight() app_topology.set_weight()
app_topology.set_optimization_priority() app_topology.set_optimization_priority()
''' perform search for optimal placement of app topology ''' """Perform search for optimal placement of app topology."""
placement_map = self.optimizer.place(app_topology) placement_map = self.optimizer.place(app_topology)
if placement_map is None: if placement_map is None:
self.status = self.optimizer.status self.status = self.optimizer.status
self.logger.debug("Ostro._place_app: error while optimizing app placement: " + self.status) self.logger.debug("Ostro._place_app: error while optimizing app "
"placement: " + self.status)
return None return None
''' update resource and app information ''' """Update resource and app information."""
if len(placement_map) > 0: if len(placement_map) > 0:
self.resource.update_topology() self.resource.update_topology()
self.app_handler.add_placement(placement_map, self.resource.current_timestamp) self.app_handler.add_placement(placement_map,
if len(app_topology.exclusion_list_map) > 0 and len(app_topology.planned_vm_map) > 0: self.resource.current_timestamp)
if len(app_topology.exclusion_list_map) > 0 and \
len(app_topology.planned_vm_map) > 0:
for vk in app_topology.planned_vm_map.keys(): for vk in app_topology.planned_vm_map.keys():
if vk in placement_map.keys(): if vk in placement_map.keys():
del placement_map[vk] del placement_map[vk]
@ -336,9 +367,10 @@ class Ostro(object):
flavor = self.resource.get_flavor(_vm.flavor) flavor = self.resource.get_flavor(_vm.flavor)
if flavor is None: if flavor is None:
self.logger.warn("Ostro._set_vm_flavor_properties: does not exist flavor (" + _vm.flavor + ") and try to refetch") self.logger.warn("Ostro._set_vm_flavor_properties: does not exist "
"flavor (" + _vm.flavor + ") and try to refetch")
''' reset flavor resource and try again ''' """Reset flavor resource and try again."""
if self._set_flavors() is False: if self._set_flavors() is False:
return False return False
self.resource.update_topology() self.resource.update_topology()
@ -359,6 +391,10 @@ class Ostro(object):
return True return True
def handle_events(self, _event_list): def handle_events(self, _event_list):
"""Handle events in the event list."""
"""Update the engine's resource topology based on the properties of
each event in the event list.
"""
self.data_lock.acquire() self.data_lock.acquire()
resource_updated = False resource_updated = False
@ -366,101 +402,131 @@ class Ostro(object):
for e in _event_list: for e in _event_list:
if e.host is not None and e.host != "none": if e.host is not None and e.host != "none":
if self._check_host(e.host) is False: if self._check_host(e.host) is False:
self.logger.warn("Ostro.handle_events: host (" + e.host + ") related to this event not exists") self.logger.warn("Ostro.handle_events: host (" + e.host +
") related to this event not exists")
continue continue
if e.method == "build_and_run_instance": # VM is created (from stack) if e.method == "build_and_run_instance":
# VM is created (from stack)
self.logger.debug("Ostro.handle_events: got build_and_run event") self.logger.debug("Ostro.handle_events: got build_and_run event")
if self.db.put_uuid(e) is False: if self.db.put_uuid(e) is False:
self.data_lock.release() self.data_lock.release()
return False return False
elif e.method == "object_action": elif e.method == "object_action":
if e.object_name == 'Instance': # VM became active or deleted if e.object_name == 'Instance':
# VM became active or deleted
orch_id = self.db.get_uuid(e.uuid) orch_id = self.db.get_uuid(e.uuid)
if orch_id is None: if orch_id is None:
self.data_lock.release() self.data_lock.release()
return False return False
if e.vm_state == "active": if e.vm_state == "active":
self.logger.debug("Ostro.handle_events: got instance_active event") self.logger.debug("Ostro.handle_events: got instance_"
"active event")
vm_info = self.app_handler.get_vm_info(orch_id[1], orch_id[0], e.host) vm_info = self.app_handler.get_vm_info(orch_id[1], orch_id[0], e.host)
if vm_info is None: if vm_info is None:
self.logger.error("Ostro.handle_events: error while getting app info from MUSIC") self.logger.error("Ostro.handle_events: error "
"while getting app info from MUSIC")
self.data_lock.release() self.data_lock.release()
return False return False
if len(vm_info) == 0: if len(vm_info) == 0:
''' """
h_uuid is None or "none" because vm is not created by stack h_uuid is None or "none" because vm is not created
or, stack not found because vm is created by the other stack by stack or, stack not found because vm is created
''' by the other stack
self.logger.warn("Ostro.handle_events: no vm_info found in app placement record") """
self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) self.logger.warn("Ostro.handle_events: no vm_info "
"found in app placement record")
self._add_vm_to_host(e.uuid, orch_id[0], e.host,
e.vcpus, e.mem, e.local_disk)
else: else:
if "planned_host" in vm_info.keys() and vm_info["planned_host"] != e.host: if "planned_host" in vm_info.keys() and \
''' vm_info["planned_host"] != e.host:
"""
vm is activated in the different host vm is activated in the different host
''' """
self.logger.warn("Ostro.handle_events: vm activated in the different host") self.logger.warn("Ostro.handle_events: vm "
self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) "activated in the different "
"host")
self._add_vm_to_host(
e.uuid, orch_id[0], e.host, e.vcpus, e.mem,
e.local_disk)
self._remove_vm_from_host(e.uuid, orch_id[0], self._remove_vm_from_host(
vm_info["planned_host"], e.uuid, orch_id[0], vm_info["planned_host"],
float(vm_info["cpus"]), float(vm_info["cpus"]),
float(vm_info["mem"]), float(vm_info["mem"]),
float(vm_info["local_volume"])) float(vm_info["local_volume"]))
self._remove_vm_from_logical_groups(e.uuid, orch_id[0], vm_info["planned_host"]) self._remove_vm_from_logical_groups(
e.uuid, orch_id[0], vm_info["planned_host"])
else: else:
''' """
found vm in the planned host, found vm in the planned host,
possibly the vm deleted in the host while batch cleanup possibly the vm deleted in the host while batch cleanup
''' """
if self._check_h_uuid(orch_id[0], e.host) is False: if self._check_h_uuid(orch_id[0], e.host) \
self.logger.debug("Ostro.handle_events: planned vm was deleted") is False:
self.logger.debug("Ostro.handle_events: "
"planned vm was deleted")
if self._check_uuid(e.uuid, e.host) is True: if self._check_uuid(e.uuid, e.host) is True:
self._update_h_uuid_in_host(orch_id[0], e.uuid, e.host) self._update_h_uuid_in_host(orch_id[0],
self._update_h_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) e.uuid,
e.host)
self._update_h_uuid_in_logical_groups(
orch_id[0], e.uuid, e.host)
else: else:
self.logger.debug("Ostro.handle_events: vm activated as planned") self.logger.debug("Ostro.handle_events: vm "
self._update_uuid_in_host(orch_id[0], e.uuid, e.host) "activated as planned")
self._update_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) self._update_uuid_in_host(orch_id[0],
e.uuid, e.host)
self._update_uuid_in_logical_groups(
orch_id[0], e.uuid, e.host)
resource_updated = True resource_updated = True
elif e.vm_state == "deleted": elif e.vm_state == "deleted":
self.logger.debug("Ostro.handle_events: got instance_delete event") self.logger.debug("Ostro.handle_events: got instance_"
"delete event")
self._remove_vm_from_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) self._remove_vm_from_host(e.uuid, orch_id[0], e.host,
self._remove_vm_from_logical_groups(e.uuid, orch_id[0], e.host) e.vcpus, e.mem, e.local_disk)
self._remove_vm_from_logical_groups(e.uuid, orch_id[0],
e.host)
if self.app_handler.update_vm_info(orch_id[1], orch_id[0]) is False: if self.app_handler.update_vm_info(orch_id[1],
self.logger.error("Ostro.handle_events: error while updating app in MUSIC") orch_id[0]) is False:
self.logger.error("Ostro.handle_events: error "
"while updating app in MUSIC")
self.data_lock.release() self.data_lock.release()
return False return False
resource_updated = True resource_updated = True
else: else:
self.logger.warn("Ostro.handle_events: unknown vm_state = " + e.vm_state) self.logger.warn("Ostro.handle_events: unknown vm_"
"state = " + e.vm_state)
elif e.object_name == 'ComputeNode': # Host resource is updated elif e.object_name == 'ComputeNode':
# Host resource is updated
self.logger.debug("Ostro.handle_events: got compute event") self.logger.debug("Ostro.handle_events: got compute event")
# NOTE: what if host is disabled? # NOTE: what if host is disabled?
if self.resource.update_host_resources(e.host, e.status, if self.resource.update_host_resources(
e.vcpus, e.vcpus_used, e.host, e.status, e.vcpus, e.vcpus_used, e.mem,
e.mem, e.free_mem, e.free_mem, e.local_disk, e.free_local_disk,
e.local_disk, e.free_local_disk,
e.disk_available_least) is True: e.disk_available_least) is True:
self.resource.update_host_time(e.host) self.resource.update_host_time(e.host)
resource_updated = True resource_updated = True
else: else:
self.logger.warn("Ostro.handle_events: unknown object_name = " + e.object_name) self.logger.warn("Ostro.handle_events: unknown object_"
"name = " + e.object_name)
else: else:
self.logger.warn("Ostro.handle_events: unknown event method = " + e.method) self.logger.warn("Ostro.handle_events: unknown event "
"method = " + e.method)
if resource_updated is True: if resource_updated is True:
self.resource.update_topology() self.resource.update_topology()
@ -480,23 +546,30 @@ class Ostro(object):
return True return True
def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk): def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
_local_disk):
vm_id = None vm_id = None
if _h_uuid is None: if _h_uuid is None:
vm_id = ("none", "none", _uuid) vm_id = ("none", "none", _uuid)
else: else:
vm_id = (_h_uuid, "none", _uuid) vm_id = (_h_uuid, "none", _uuid)
self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem, _local_disk) self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem,
_local_disk)
self.resource.update_host_time(_host_name) self.resource.update_host_time(_host_name)
def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk): def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
_local_disk):
if self._check_h_uuid(_h_uuid, _host_name) is True: if self._check_h_uuid(_h_uuid, _host_name) is True:
self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid, _vcpus, _mem, _local_disk) self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid,
_vcpus, _mem,
_local_disk)
self.resource.update_host_time(_host_name) self.resource.update_host_time(_host_name)
else: else:
if self._check_uuid(_uuid, _host_name) is True: if self._check_uuid(_uuid, _host_name) is True:
self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid, _vcpus, _mem, _local_disk) self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid,
_vcpus, _mem,
_local_disk)
self.resource.update_host_time(_host_name) self.resource.update_host_time(_host_name)
def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name): def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name):
@ -537,7 +610,8 @@ class Ostro(object):
if host.update_uuid(_h_uuid, _uuid) is True: if host.update_uuid(_h_uuid, _uuid) is True:
self.resource.update_host_time(_host_name) self.resource.update_host_time(_host_name)
else: else:
self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid in host = " + host.name) self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid "
"in host = " + host.name)
def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name): def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name):
host = self.resource.hosts[_host_name] host = self.resource.hosts[_host_name]
@ -554,7 +628,8 @@ class Ostro(object):
self.resource.update_h_uuid_in_logical_groups(_h_uuid, _uuid, host) self.resource.update_h_uuid_in_logical_groups(_h_uuid, _uuid, host)
def _get_json_results(self, _request_type, _status_type, _status_message, _map): def _get_json_results(self, _request_type, _status_type, _status_message,
_map):
result = {} result = {}
if _request_type == "query": if _request_type == "query":

File diff suppressed because it is too large Load Diff

View File

@ -13,27 +13,55 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume, LEVELS """Resources utlized by search engine."""
from valet.engine.optimizer.app_manager.app_topology_base \
import VGroup, VM, Volume, LEVELS
class Resource(object): class Resource(object):
"""Resource."""
def __init__(self): def __init__(self):
self.level = None # level of placement """Initialization."""
# level of placement
self.level = None
self.host_name = None self.host_name = None
self.host_memberships = {} # all mapped logical groups to host
self.host_vCPUs = 0 # original total vCPUs before overcommit
self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit
self.host_mem = 0 # original total mem cap before overcommit
self.host_avail_mem = 0 # remaining mem cap after
self.host_local_disk = 0 # original total local disk cap before overcommit
self.host_avail_local_disk = 0 # remaining local disk cap after overcommit
self.host_avail_switches = {} # all mapped switches to host
self.host_avail_storages = {} # all mapped storage_resources to host
self.host_num_of_placed_vms = 0 # the number of vms currently placed in this host
self.rack_name = None # where this host is located # all mapped logical groups to host
self.host_memberships = {}
# original total vCPUs before overcommit
self.host_vCPUs = 0
# remaining vCPUs after overcommit
self.host_avail_vCPUs = 0
# original total mem cap before overcommit
self.host_mem = 0
# remaining mem cap after
self.host_avail_mem = 0
# original total local disk cap before overcommit
self.host_local_disk = 0
# remaining local disk cap after overcommit
self.host_avail_local_disk = 0
# all mapped switches to host
self.host_avail_switches = {}
# all mapped storage_resources to host
self.host_avail_storages = {}
# the number of vms currently placed in this host
self.host_num_of_placed_vms = 0
# where this host is located
self.rack_name = None
self.rack_memberships = {} self.rack_memberships = {}
self.rack_vCPUs = 0 self.rack_vCPUs = 0
self.rack_avail_vCPUs = 0 self.rack_avail_vCPUs = 0
@ -41,11 +69,18 @@ class Resource(object):
self.rack_avail_mem = 0 self.rack_avail_mem = 0
self.rack_local_disk = 0 self.rack_local_disk = 0
self.rack_avail_local_disk = 0 self.rack_avail_local_disk = 0
self.rack_avail_switches = {} # all mapped switches to rack
self.rack_avail_storages = {} # all mapped storage_resources to rack # all mapped switches to rack
self.rack_avail_switches = {}
# all mapped storage_resources to rack
self.rack_avail_storages = {}
self.rack_num_of_placed_vms = 0 self.rack_num_of_placed_vms = 0
self.cluster_name = None # where this host and rack are located # where this host and rack are located
self.cluster_name = None
self.cluster_memberships = {} self.cluster_memberships = {}
self.cluster_vCPUs = 0 self.cluster_vCPUs = 0
self.cluster_avail_vCPUs = 0 self.cluster_avail_vCPUs = 0
@ -53,15 +88,24 @@ class Resource(object):
self.cluster_avail_mem = 0 self.cluster_avail_mem = 0
self.cluster_local_disk = 0 self.cluster_local_disk = 0
self.cluster_avail_local_disk = 0 self.cluster_avail_local_disk = 0
self.cluster_avail_switches = {} # all mapped switches to cluster
self.cluster_avail_storages = {} # all mapped storage_resources to cluster # all mapped switches to cluster
self.cluster_avail_switches = {}
# all mapped storage_resources to cluster
self.cluster_avail_storages = {}
self.cluster_num_of_placed_vms = 0 self.cluster_num_of_placed_vms = 0
self.storage = None # selected best storage for volume among host_avail_storages # selected best storage for volume among host_avail_storages
self.storage = None
self.sort_base = 0 # order to place # order to place
self.sort_base = 0
def get_common_placement(self, _resource): def get_common_placement(self, _resource):
"""Get common placement level."""
"""Get the common level between this resource and the one
provided."""
level = None level = None
if self.cluster_name != _resource.cluster_name: if self.cluster_name != _resource.cluster_name:
@ -78,6 +122,7 @@ class Resource(object):
return level return level
def get_resource_name(self, _level): def get_resource_name(self, _level):
"""Get the name of this resource at the specified level."""
name = "unknown" name = "unknown"
if _level == "cluster": if _level == "cluster":
@ -90,6 +135,7 @@ class Resource(object):
return name return name
def get_memberships(self, _level): def get_memberships(self, _level):
"""Get the memberships of this resource at the specified level."""
memberships = None memberships = None
if _level == "cluster": if _level == "cluster":
@ -102,6 +148,7 @@ class Resource(object):
return memberships return memberships
def get_num_of_placed_vms(self, _level): def get_num_of_placed_vms(self, _level):
"""Get the number of placed vms of this resource at the specified level."""
num_of_vms = 0 num_of_vms = 0
if _level == "cluster": if _level == "cluster":
@ -114,6 +161,7 @@ class Resource(object):
return num_of_vms return num_of_vms
def get_avail_resources(self, _level): def get_avail_resources(self, _level):
"""Get the available vCPUs, memory, local disk of this resource at the specified level."""
avail_vCPUs = 0 avail_vCPUs = 0
avail_mem = 0 avail_mem = 0
avail_local_disk = 0 avail_local_disk = 0
@ -134,6 +182,7 @@ class Resource(object):
return (avail_vCPUs, avail_mem, avail_local_disk) return (avail_vCPUs, avail_mem, avail_local_disk)
def get_local_disk(self, _level): def get_local_disk(self, _level):
"""Get the local disk and available local disk of this resource at the specified level."""
local_disk = 0 local_disk = 0
avail_local_disk = 0 avail_local_disk = 0
@ -150,6 +199,7 @@ class Resource(object):
return (local_disk, avail_local_disk) return (local_disk, avail_local_disk)
def get_vCPUs(self, _level): def get_vCPUs(self, _level):
"""Get the vCPUs and available vCPUs of this resource at the specified level."""
vCPUs = 0 vCPUs = 0
avail_vCPUs = 0 avail_vCPUs = 0
@ -166,6 +216,7 @@ class Resource(object):
return (vCPUs, avail_vCPUs) return (vCPUs, avail_vCPUs)
def get_mem(self, _level): def get_mem(self, _level):
"""Get the memory and available memory of this resource at the specified level."""
mem = 0 mem = 0
avail_mem = 0 avail_mem = 0
@ -182,6 +233,7 @@ class Resource(object):
return (mem, avail_mem) return (mem, avail_mem)
def get_avail_storages(self, _level): def get_avail_storages(self, _level):
"""Get the available storages of this resource at the specified level."""
avail_storages = None avail_storages = None
if _level == "cluster": if _level == "cluster":
@ -194,6 +246,7 @@ class Resource(object):
return avail_storages return avail_storages
def get_avail_switches(self, _level): def get_avail_switches(self, _level):
"""Get the available switches of this resource at the specified level."""
avail_switches = None avail_switches = None
if _level == "cluster": if _level == "cluster":
@ -207,20 +260,26 @@ class Resource(object):
class LogicalGroupResource(object): class LogicalGroupResource(object):
"""LogicalGroupResource."""
def __init__(self): def __init__(self):
"""Initialization."""
self.name = None self.name = None
self.group_type = "AGGR" self.group_type = "AGGR"
self.metadata = {} self.metadata = {}
self.num_of_placed_vms = 0 self.num_of_placed_vms = 0
self.num_of_placed_vms_per_host = {} # key = host (i.e., id of host or rack), value = num_of_placed_vms
# key = host (i.e., id of host or rack), value = num_of_placed_vms
self.num_of_placed_vms_per_host = {}
class StorageResource(object): class StorageResource(object):
"""StorageResource."""
def __init__(self): def __init__(self):
"""Initialization."""
self.storage_name = None self.storage_name = None
self.storage_class = None self.storage_class = None
self.storage_avail_disk = 0 self.storage_avail_disk = 0
@ -229,8 +288,10 @@ class StorageResource(object):
class SwitchResource(object): class SwitchResource(object):
"""SwitchResource."""
def __init__(self): def __init__(self):
"""Initialization."""
self.switch_name = None self.switch_name = None
self.switch_type = None self.switch_type = None
self.avail_bandwidths = [] # out-bound bandwidths self.avail_bandwidths = [] # out-bound bandwidths
@ -239,13 +300,16 @@ class SwitchResource(object):
class Node(object): class Node(object):
"""Node."""
def __init__(self): def __init__(self):
"""Initialization."""
self.node = None # VM, Volume, or VGroup self.node = None # VM, Volume, or VGroup
self.sort_base = -1 self.sort_base = -1
def get_all_links(self): def get_all_links(self):
"""Return a list of links for vms, volumes, and/or vgroups."""
link_list = [] link_list = []
if isinstance(self.node, VM): if isinstance(self.node, VM):
@ -263,6 +327,7 @@ class Node(object):
return link_list return link_list
def get_bandwidth_of_link(self, _link): def get_bandwidth_of_link(self, _link):
"""Return bandwidth of link."""
bandwidth = 0 bandwidth = 0
if isinstance(self.node, VGroup) or isinstance(self.node, VM): if isinstance(self.node, VGroup) or isinstance(self.node, VM):
@ -276,6 +341,7 @@ class Node(object):
return bandwidth return bandwidth
def get_common_diversity(self, _diversity_groups): def get_common_diversity(self, _diversity_groups):
"""Return the common level of the given diversity groups."""
common_level = "ANY" common_level = "ANY"
for dk in self.node.diversity_groups.keys(): for dk in self.node.diversity_groups.keys():
@ -290,9 +356,11 @@ class Node(object):
return common_level return common_level
def get_affinity_id(self): def get_affinity_id(self):
"""Return the affinity id."""
aff_id = None aff_id = None
if isinstance(self.node, VGroup) and self.node.vgroup_type == "AFF" and \ if isinstance(self.node, VGroup) and \
self.node.vgroup_type == "AFF" and \
self.node.name != "any": self.node.name != "any":
aff_id = self.node.level + ":" + self.node.name aff_id = self.node.level + ":" + self.node.name
@ -300,6 +368,7 @@ class Node(object):
def compute_reservation(_level, _placement_level, _bandwidth): def compute_reservation(_level, _placement_level, _bandwidth):
"""Compute and return the reservation."""
reservation = 0 reservation = 0
if _placement_level != "ANY": if _placement_level != "ANY":

View File

@ -15,6 +15,8 @@
# - Set all configurations to run Ostro # - Set all configurations to run Ostro
"""Valet Engine Server Configuration."""
import os import os
from oslo_config import cfg from oslo_config import cfg
from valet.engine.conf import register_conf from valet.engine.conf import register_conf
@ -24,9 +26,10 @@ CONF = cfg.CONF
class Config(object): class Config(object):
"""Valet Engine Server Configuration."""
def __init__(self, *default_config_files): def __init__(self, *default_config_files):
"""Initialization."""
register_conf() register_conf()
if default_config_files: if default_config_files:
CONF(default_config_files=default_config_files) CONF(default_config_files=default_config_files)
@ -126,7 +129,7 @@ class Config(object):
self.base_flavor_disk = 0 self.base_flavor_disk = 0
def configure(self): def configure(self):
"""Store config info extracted from oslo."""
status = self._init_system() status = self._init_system()
if status != "success": if status != "success":
return status return status
@ -181,17 +184,21 @@ class Config(object):
self.network_control_url = CONF.engine.network_control_url self.network_control_url = CONF.engine.network_control_url
self.default_cpu_allocation_ratio = CONF.engine.default_cpu_allocation_ratio self.default_cpu_allocation_ratio = \
CONF.engine.default_cpu_allocation_ratio
self.default_ram_allocation_ratio = CONF.engine.default_ram_allocation_ratio self.default_ram_allocation_ratio = \
CONF.engine.default_ram_allocation_ratio
self.default_disk_allocation_ratio = CONF.engine.default_disk_allocation_ratio self.default_disk_allocation_ratio = \
CONF.engine.default_disk_allocation_ratio
self.static_cpu_standby_ratio = CONF.engine.static_cpu_standby_ratio self.static_cpu_standby_ratio = CONF.engine.static_cpu_standby_ratio
self.static_mem_standby_ratio = CONF.engine.static_mem_standby_ratio self.static_mem_standby_ratio = CONF.engine.static_mem_standby_ratio
self.static_local_disk_standby_ratio = CONF.engine.static_local_disk_standby_ratio self.static_local_disk_standby_ratio = \
CONF.engine.static_local_disk_standby_ratio
self.topology_trigger_time = CONF.engine.topology_trigger_time self.topology_trigger_time = CONF.engine.topology_trigger_time

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Generic Daemon."""
import atexit import atexit
import os import os
from signal import SIGTERM from signal import SIGTERM
@ -21,12 +23,14 @@ import time
class Daemon(object): class Daemon(object):
""" A generic daemon class. """A generic daemon class."""
Usage: subclass the Daemon class and override the run() method """Usage: subclass the Daemon class and override the run() method
""" """
def __init__(self, priority, pidfile, logger, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): def __init__(self, priority, pidfile, logger, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
"""Initialization."""
self.stdin = stdin self.stdin = stdin
self.stdout = stdout self.stdout = stdout
self.stderr = stderr self.stderr = stderr
@ -35,9 +39,9 @@ class Daemon(object):
self.logger = logger self.logger = logger
def daemonize(self): def daemonize(self):
""" Do the UNIX double-fork magic, see Stevens' "Advanced """Do the UNIX double-fork magic."""
"""See Stevens' "Advanced Programming in the UNIX Environment"
Programming in the UNIX Environment" for details (ISBN 0201563177) for details. (ISBN 0201563177).
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
""" """
try: try:
@ -47,7 +51,8 @@ class Daemon(object):
sys.exit(0) sys.exit(0)
except OSError as e: except OSError as e:
self.logger.error("Daemon error at step1: " + e.strerror) self.logger.error("Daemon error at step1: " + e.strerror)
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.stderr.write("fork #1 failed: %d (%s)\n" %
(e.errno, e.strerror))
sys.exit(1) sys.exit(1)
# decouple from parent environment # decouple from parent environment
@ -63,7 +68,8 @@ class Daemon(object):
sys.exit(0) sys.exit(0)
except OSError as e: except OSError as e:
self.logger.error("Daemon error at step2: " + e.strerror) self.logger.error("Daemon error at step2: " + e.strerror)
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.stderr.write("fork #2 failed: %d (%s)\n" %
(e.errno, e.strerror))
sys.exit(1) sys.exit(1)
# redirect standard file descriptors # redirect standard file descriptors
@ -82,10 +88,11 @@ class Daemon(object):
file(self.pidfile, 'w+').write("%s\n" % pid) file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self): def delpid(self):
"""Remove pidfile."""
os.remove(self.pidfile) os.remove(self.pidfile)
def getpid(self): def getpid(self):
"""returns the content of pidfile or None.""" """Return the content of pidfile or None."""
try: try:
pf = file(self.pidfile, 'r') pf = file(self.pidfile, 'r')
pid = int(pf.read().strip()) pid = int(pf.read().strip())
@ -95,7 +102,7 @@ class Daemon(object):
return pid return pid
def checkpid(self, pid): def checkpid(self, pid):
""" Check For the existence of a unix pid. """ """Check for the existence of a UNIX pid."""
if pid is None: if pid is None:
return False return False
@ -108,7 +115,7 @@ class Daemon(object):
return True return True
def start(self): def start(self):
"""Start the daemon""" """Start thedaemon."""
# Check for a pidfile to see if the daemon already runs # Check for a pidfile to see if the daemon already runs
pid = self.getpid() pid = self.getpid()
@ -122,7 +129,7 @@ class Daemon(object):
self.run() self.run()
def stop(self): def stop(self):
"""Stop the daemon""" """Stop the daemon."""
# Get the pid from the pidfile # Get the pid from the pidfile
pid = self.getpid() pid = self.getpid()
@ -146,12 +153,12 @@ class Daemon(object):
sys.exit(1) sys.exit(1)
def restart(self): def restart(self):
"""Restart the daemon""" """Restart the daemon."""
self.stop() self.stop()
self.start() self.start()
def status(self): def status(self):
""" returns instance's priority """ """Return instance's priority."""
# Check for a pidfile to see if the daemon already runs # Check for a pidfile to see if the daemon already runs
pid = self.getpid() pid = self.getpid()
@ -161,13 +168,14 @@ class Daemon(object):
message = "status: pidfile %s exist. Daemon is running\n" message = "status: pidfile %s exist. Daemon is running\n"
status = self.priority status = self.priority
else: else:
message = "status: pidfile %s does not exist. Daemon is not running\n" message = "status: pidfile %s does not exist. Daemon is not " \
"running\n"
sys.stderr.write(message % self.pidfile) sys.stderr.write(message % self.pidfile)
return status return status
def run(self): def run(self):
""" You should override this method when you subclass Daemon. """You should override this method when you subclass Daemon."""
"""It will be called after the process has been daemonized by
It will be called after the process has been daemonized by start() or restart(). start() or restart().
""" """

View File

@ -15,28 +15,38 @@
# - Handle user requests # - Handle user requests
import sys """Database Cleaner."""
from configuration import Config from configuration import Config
import sys
from valet.api.db.models.music import Music from valet.api.db.models.music import Music
class DBCleaner(object): class DBCleaner(object):
"""Database Cleaner."""
def __init__(self, _config): def __init__(self, _config):
"""Initialization."""
self.config = _config self.config = _config
self.music = Music() self.music = Music()
def clean_db_tables(self): def clean_db_tables(self):
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table) """Clean tables in Music."""
"""Clean resource, resource_index, request, response, event,
app, app_index, and uuid tables.
"""
results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_resource_table)
if len(results) > 0: if len(results) > 0:
print("resource table result = ", len(results)) print("resource table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_resource_table, 'site_name', row['site_name']) self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', row['site_name'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_request_table)
if len(results) > 0: if len(results) > 0:
print("request table result = ", len(results)) print("request table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
@ -44,7 +54,8 @@ class DBCleaner(object):
self.config.db_request_table, self.config.db_request_table,
'stack_id', row['stack_id']) 'stack_id', row['stack_id'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_response_table)
if len(results) > 0: if len(results) > 0:
print("response table result = ", len(results)) print("response table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
@ -52,7 +63,8 @@ class DBCleaner(object):
self.config.db_response_table, self.config.db_response_table,
'stack_id', row['stack_id']) 'stack_id', row['stack_id'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_event_table)
if len(results) > 0: if len(results) > 0:
print("event table result = ", len(results)) print("event table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
@ -60,15 +72,18 @@ class DBCleaner(object):
self.config.db_event_table, self.config.db_event_table,
'timestamp', row['timestamp']) 'timestamp', row['timestamp'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_resource_index_table)
if len(results) > 0: if len(results) > 0:
print("resource_index table result = ", len(results)) print("resource_index table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
self.music.delete_row_eventually(self.config.db_keyspace, self.music.delete_row_eventually(
self.config.db_keyspace,
self.config.db_resource_index_table, self.config.db_resource_index_table,
'site_name', row['site_name']) 'site_name', row['site_name'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_app_index_table)
if len(results) > 0: if len(results) > 0:
print("app_index table result = ", len(results)) print("app_index table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
@ -76,7 +91,8 @@ class DBCleaner(object):
self.config.db_app_index_table, self.config.db_app_index_table,
'site_name', row['site_name']) 'site_name', row['site_name'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_app_table)
if len(results) > 0: if len(results) > 0:
print("app table result = ", len(results)) print("app table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
@ -84,7 +100,8 @@ class DBCleaner(object):
self.config.db_app_table, self.config.db_app_table,
'stack_id', row['stack_id']) 'stack_id', row['stack_id'])
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_uuid_table)
if len(results) > 0: if len(results) > 0:
print("uuid table result = ", len(results)) print("uuid table result = ", len(results))
for _, row in results.iteritems(): for _, row in results.iteritems():
@ -93,49 +110,61 @@ class DBCleaner(object):
'uuid', row['uuid']) 'uuid', row['uuid'])
def check_db_tables(self): def check_db_tables(self):
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table) """Log whether tables in Music have been cleaned."""
"""Check resource, resource_index, request, response, event,
app, app_index, and uuid tables.
"""
results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_resource_table)
if len(results) > 0: if len(results) > 0:
print("resource table not cleaned ") print("resource table not cleaned ")
else: else:
print("resource table cleaned") print("resource table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_request_table)
if len(results) > 0: if len(results) > 0:
print("request table not cleaned ") print("request table not cleaned ")
else: else:
print("request table cleaned") print("request table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_response_table)
if len(results) > 0: if len(results) > 0:
print("response table not cleaned ") print("response table not cleaned ")
else: else:
print("response table cleaned") print("response table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_event_table)
if len(results) > 0: if len(results) > 0:
print("event table not cleaned ") print("event table not cleaned ")
else: else:
print("event table cleaned") print("event table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_resource_index_table)
if len(results) > 0: if len(results) > 0:
print("resource log index table not cleaned ") print("resource log index table not cleaned ")
else: else:
print("resource log index table cleaned") print("resource log index table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_app_index_table)
if len(results) > 0: if len(results) > 0:
print("app log index table not cleaned ") print("app log index table not cleaned ")
else: else:
print("app log index table cleaned") print("app log index table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_app_table)
if len(results) > 0: if len(results) > 0:
print("app log table not cleaned ") print("app log table not cleaned ")
else: else:
print("app log table cleaned") print("app log table cleaned")
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table) results = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_uuid_table)
if len(results) > 0: if len(results) > 0:
print("uuid table not cleaned ") print("uuid table not cleaned ")
else: else:

View File

@ -13,19 +13,22 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Daemon foe Valet Engine."""
import os import os
import sys import sys
import traceback import traceback
from valet.engine.optimizer.ostro.ostro import Ostro from valet.engine.optimizer.ostro.ostro import Ostro
from valet.engine.optimizer.ostro_server.configuration import Config from valet.engine.optimizer.ostro_server.configuration import Config
from valet.engine.optimizer.ostro_server.daemon import Daemon # implemented for Python v2.7 from valet.engine.optimizer.ostro_server.daemon import Daemon
from valet.engine.optimizer.util.util import init_logger from valet.engine.optimizer.util.util import init_logger
class OstroDaemon(Daemon): class OstroDaemon(Daemon):
"""Daemon foe Valet Engine."""
def run(self): def run(self):
"""Run the daemon."""
self.logger.info("##### Valet Engine is launched #####") self.logger.info("##### Valet Engine is launched #####")
try: try:
ostro = Ostro(config, self.logger) ostro = Ostro(config, self.logger)
@ -40,6 +43,7 @@ class OstroDaemon(Daemon):
def verify_dirs(list_of_dirs): def verify_dirs(list_of_dirs):
"""If a directory in the list does not exist, create it."""
for d in list_of_dirs: for d in list_of_dirs:
try: try:
if not os.path.exists(d): if not os.path.exists(d):
@ -50,7 +54,7 @@ def verify_dirs(list_of_dirs):
if __name__ == "__main__": if __name__ == "__main__":
''' configuration ''' """ configuration """
# Configuration # Configuration
try: try:
config = Config() config = Config()
@ -59,11 +63,12 @@ if __name__ == "__main__":
print(config_status) print(config_status)
sys.exit(2) sys.exit(2)
''' verify directories ''' """ verify directories """
dirs_list = [config.logging_loc, config.resource_log_loc, config.app_log_loc, os.path.dirname(config.process)] dirs_list = [config.logging_loc, config.resource_log_loc,
config.app_log_loc, os.path.dirname(config.process)]
verify_dirs(dirs_list) verify_dirs(dirs_list)
''' logger ''' """ logger """
logger = init_logger(config) logger = init_logger(config)
# Start daemon process # Start daemon process

View File

@ -1,4 +1,4 @@
# Version 2.0.2: Feb. 9, 2016 # Version 2.0.2
# Set simulation parameters # Set simulation parameters
num_of_spine_switches=0 num_of_spine_switches=0

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Util."""
from os import listdir, stat from os import listdir, stat
from os.path import isfile, join from os.path import isfile, join
import logging import logging
@ -20,6 +22,7 @@ from logging.handlers import RotatingFileHandler
def get_logfile(_loc, _max_log_size, _name): def get_logfile(_loc, _max_log_size, _name):
"""Get logfile from location and return with file mode."""
files = [f for f in listdir(_loc) if isfile(join(_loc, f))] files = [f for f in listdir(_loc) if isfile(join(_loc, f))]
logfile_index = 0 logfile_index = 0
@ -50,7 +53,9 @@ def get_logfile(_loc, _max_log_size, _name):
return (last_logfile, mode) return (last_logfile, mode)
def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index): def get_last_logfile(_loc, _max_log_size, _max_num_of_logs,
_name, _last_index):
"""Return last logfile from location with index and mode."""
last_logfile = _name + "_" + str(_last_index) + ".log" last_logfile = _name + "_" + str(_last_index) + ".log"
mode = None mode = None
@ -74,6 +79,7 @@ def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index):
def adjust_json_string(_data): def adjust_json_string(_data):
"""Adjust data value formatting to be consistent and return."""
_data = _data.replace("None", '"none"') _data = _data.replace("None", '"none"')
_data = _data.replace("False", '"false"') _data = _data.replace("False", '"false"')
_data = _data.replace("True", '"true"') _data = _data.replace("True", '"true"')
@ -85,7 +91,9 @@ def adjust_json_string(_data):
def init_logger(config): def init_logger(config):
log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") """Return an initialized logger."""
log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - "
"%(message)s")
log_handler = RotatingFileHandler(config.logging_loc + config.logger_name, log_handler = RotatingFileHandler(config.logging_loc + config.logger_name,
mode='a', mode='a',
maxBytes=config.max_main_log_size, maxBytes=config.max_main_log_size,
@ -94,7 +102,8 @@ def init_logger(config):
delay=0) delay=0)
log_handler.setFormatter(log_formatter) log_handler.setFormatter(log_formatter)
logger = logging.getLogger(config.logger_name) logger = logging.getLogger(config.logger_name)
logger.setLevel(logging.DEBUG if config.logging_level == "debug" else logging.INFO) logger.setLevel(logging.DEBUG if config.logging_level == "debug"
else logging.INFO)
logger.addHandler(log_handler) logger.addHandler(log_handler)
return logger return logger

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Compute."""
from novaclient import client as nova_client from novaclient import client as nova_client
from oslo_config import cfg from oslo_config import cfg
from resource_base import Host, LogicalGroup, Flavor from resource_base import Host, LogicalGroup, Flavor
@ -25,12 +27,21 @@ CONF = cfg.CONF
class Compute(object): class Compute(object):
"""Compute Class.
This class performs functions of setting hosts, availability zones,
aggregates, placed vms, resources, flavors, etc.
Interacts with nova client to perform these actions.
"""
def __init__(self, _logger): def __init__(self, _logger):
"""Compute init."""
self.logger = _logger self.logger = _logger
self.nova = None self.nova = None
def set_hosts(self, _hosts, _logical_groups): def set_hosts(self, _hosts, _logical_groups):
"""Return success if az's, aggregates, vms, resources, all set."""
self._get_nova_client() self._get_nova_client()
status = self._set_availability_zones(_hosts, _logical_groups) status = self._set_availability_zones(_hosts, _logical_groups)
@ -56,7 +67,7 @@ class Compute(object):
return "success" return "success"
def _get_nova_client(self): def _get_nova_client(self):
'''Returns a nova client''' """Return a nova client."""
self.nova = nova_client.Client(VERSION, self.nova = nova_client.Client(VERSION,
CONF.identity.username, CONF.identity.username,
CONF.identity.password, CONF.identity.password,
@ -86,7 +97,8 @@ class Compute(object):
if host.name not in logical_group.vms_per_host.keys(): if host.name not in logical_group.vms_per_host.keys():
logical_group.vms_per_host[host.name] = [] logical_group.vms_per_host[host.name] = []
self.logger.info("adding Host LogicalGroup: " + str(host.__dict__)) self.logger.info("adding Host LogicalGroup: " +
str(host.__dict__))
_hosts[host.name] = host _hosts[host.name] = host
@ -114,7 +126,8 @@ class Compute(object):
metadata[mk] = a.metadata.get(mk) metadata[mk] = a.metadata.get(mk)
aggregate.metadata = metadata aggregate.metadata = metadata
self.logger.info("adding aggregate LogicalGroup: " + str(aggregate.__dict__)) self.logger.info("adding aggregate LogicalGroup: " +
str(aggregate.__dict__))
_logical_groups[aggregate.name] = aggregate _logical_groups[aggregate.name] = aggregate
@ -141,7 +154,8 @@ class Compute(object):
if result_status == "success": if result_status == "success":
for vm_uuid in vm_uuid_list: for vm_uuid in vm_uuid_list:
vm_detail = [] # (vm_name, az, metadata, status) vm_detail = [] # (vm_name, az, metadata, status)
result_status_detail = self._get_vm_detail(vm_uuid, vm_detail) result_status_detail = self._get_vm_detail(vm_uuid,
vm_detail)
if result_status_detail == "success": if result_status_detail == "success":
vm_id = ("none", vm_detail[0], vm_uuid) vm_id = ("none", vm_detail[0], vm_uuid)
@ -162,7 +176,8 @@ class Compute(object):
return error_status return error_status
def _get_vms_of_host(self, _hk, _vm_list): def _get_vms_of_host(self, _hk, _vm_list):
hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk, servers=True) hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk,
servers=True)
try: try:
for hv in hypervisor_list: for hv in hypervisor_list:
@ -221,6 +236,7 @@ class Compute(object):
return "success" return "success"
def set_flavors(self, _flavors): def set_flavors(self, _flavors):
"""Set flavors."""
error_status = None error_status = None
self._get_nova_client() self._get_nova_client()
@ -260,7 +276,8 @@ class Compute(object):
ephemeral_gb = 0.0 ephemeral_gb = 0.0
if hasattr(f, "OS-FLV-EXT-DATA:ephemeral"): if hasattr(f, "OS-FLV-EXT-DATA:ephemeral"):
ephemeral_gb = float(getattr(f, "OS-FLV-EXT-DATA:ephemeral")) ephemeral_gb = float(getattr(f,
"OS-FLV-EXT-DATA:ephemeral"))
swap_mb = 0.0 swap_mb = 0.0
if hasattr(f, "swap"): if hasattr(f, "swap"):

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Compute Manager."""
import threading import threading
import time import time
@ -23,8 +25,14 @@ from valet.engine.resource_manager.resource_base import Host
class ComputeManager(threading.Thread): class ComputeManager(threading.Thread):
"""Compute Manager Class.
Threaded class to setup and manage compute for resources, hosts,
flavors, etc. Calls many functions from Resource.
"""
def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger): def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger):
"""Init Compute Manager."""
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.thread_id = _t_id self.thread_id = _t_id
@ -42,7 +50,9 @@ class ComputeManager(threading.Thread):
self.project_token = None self.project_token = None
def run(self): def run(self):
self.logger.info("ComputeManager: start " + self.thread_name + " ......") """Start Compute Manager thread to run setup."""
self.logger.info("ComputeManager: start " + self.thread_name +
" ......")
if self.config.compute_trigger_freq > 0: if self.config.compute_trigger_freq > 0:
period_end = time.time() + self.config.compute_trigger_freq period_end = time.time() + self.config.compute_trigger_freq
@ -67,7 +77,10 @@ class ComputeManager(threading.Thread):
time.sleep(60) time.sleep(60)
now = time.localtime() now = time.localtime()
if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday: if now.tm_year > last_trigger_year or \
now.tm_mon > last_trigger_mon or \
now.tm_mday > last_trigger_mday:
timeout = False timeout = False
if timeout is False and \ if timeout is False and \
@ -82,28 +95,33 @@ class ComputeManager(threading.Thread):
self.logger.info("ComputeManager: exit " + self.thread_name) self.logger.info("ComputeManager: exit " + self.thread_name)
def _run(self): def _run(self):
self.logger.info("ComputeManager: --- start compute_nodes status update ---") self.logger.info("ComputeManager: --- start compute_nodes "
"status update ---")
self.data_lock.acquire() self.data_lock.acquire()
try: try:
triggered_host_updates = self.set_hosts() triggered_host_updates = self.set_hosts()
triggered_flavor_updates = self.set_flavors() triggered_flavor_updates = self.set_flavors()
if triggered_host_updates is True and triggered_flavor_updates is True: if triggered_host_updates is True and \
triggered_flavor_updates is True:
if self.resource.update_topology() is False: if self.resource.update_topology() is False:
# TODO: error in MUSIC. ignore? # TODO(UNKNOWN): error in MUSIC. ignore?
pass pass
else: else:
# TODO: error handling, e.g., 3 times failure then stop Ostro? # TODO(UNKNOWN): error handling, e.g.,
# 3 times failure then stop Ostro?
pass pass
finally: finally:
self.data_lock.release() self.data_lock.release()
self.logger.info("ComputeManager: --- done compute_nodes status update ---") self.logger.info("ComputeManager: --- done compute_nodes "
"status update ---")
return True return True
def set_hosts(self): def set_hosts(self):
"""Return True if hosts set, compute avail resources, checks update."""
hosts = {} hosts = {}
logical_groups = {} logical_groups = {}
@ -136,25 +154,30 @@ class ComputeManager(threading.Thread):
self.resource.logical_groups[lk] = deepcopy(_logical_groups[lk]) self.resource.logical_groups[lk] = deepcopy(_logical_groups[lk])
self.resource.logical_groups[lk].last_update = time.time() self.resource.logical_groups[lk].last_update = time.time()
self.logger.warn("ComputeManager: new logical group (" + lk + ") added") self.logger.warn("ComputeManager: new logical group (" +
lk + ") added")
for rlk in self.resource.logical_groups.keys(): for rlk in self.resource.logical_groups.keys():
rl = self.resource.logical_groups[rlk] rl = self.resource.logical_groups[rlk]
if rl.group_type != "EX" and rl.group_type != "AFF" and rl.group_type != "DIV": if rl.group_type != "EX" and rl.group_type != "AFF" and \
rl.group_type != "DIV":
if rlk not in _logical_groups.keys(): if rlk not in _logical_groups.keys():
self.resource.logical_groups[rlk].status = "disabled" self.resource.logical_groups[rlk].status = "disabled"
self.resource.logical_groups[rlk].last_update = time.time() self.resource.logical_groups[rlk].last_update = time.time()
self.logger.warn("ComputeManager: logical group (" + rlk + ") removed") self.logger.warn("ComputeManager: logical group (" +
rlk + ") removed")
for lk in _logical_groups.keys(): for lk in _logical_groups.keys():
lg = _logical_groups[lk] lg = _logical_groups[lk]
rlg = self.resource.logical_groups[lk] rlg = self.resource.logical_groups[lk]
if lg.group_type != "EX" and lg.group_type != "AFF" and lg.group_type != "DIV": if lg.group_type != "EX" and lg.group_type != "AFF" and \
lg.group_type != "DIV":
if self._check_logical_group_metadata_update(lg, rlg) is True: if self._check_logical_group_metadata_update(lg, rlg) is True:
rlg.last_update = time.time() rlg.last_update = time.time()
self.logger.warn("ComputeManager: logical group (" + lk + ") updated") self.logger.warn("ComputeManager: logical group (" +
lk + ") updated")
def _check_logical_group_metadata_update(self, _lg, _rlg): def _check_logical_group_metadata_update(self, _lg, _rlg):
if _lg.status != _rlg.status: if _lg.status != _rlg.status:
@ -183,7 +206,8 @@ class ComputeManager(threading.Thread):
self.resource.hosts[new_host.name] = new_host self.resource.hosts[new_host.name] = new_host
new_host.last_update = time.time() new_host.last_update = time.time()
self.logger.warn("ComputeManager: new host (" + new_host.name + ") added") self.logger.warn("ComputeManager: new host (" +
new_host.name + ") added")
for rhk, rhost in self.resource.hosts.iteritems(): for rhk, rhost in self.resource.hosts.iteritems():
if rhk not in _hosts.keys(): if rhk not in _hosts.keys():
@ -191,7 +215,8 @@ class ComputeManager(threading.Thread):
rhost.tag.remove("nova") rhost.tag.remove("nova")
rhost.last_update = time.time() rhost.last_update = time.time()
self.logger.warn("ComputeManager: host (" + rhost.name + ") disabled") self.logger.warn("ComputeManager: host (" +
rhost.name + ") disabled")
for hk in _hosts.keys(): for hk in _hosts.keys():
host = _hosts[hk] host = _hosts[hk]
@ -202,7 +227,8 @@ class ComputeManager(threading.Thread):
for hk, h in self.resource.hosts.iteritems(): for hk, h in self.resource.hosts.iteritems():
if h.clean_memberships() is True: if h.clean_memberships() is True:
h.last_update = time.time() h.last_update = time.time()
self.logger.warn("ComputeManager: host (" + h.name + ") updated (delete EX/AFF/DIV membership)") self.logger.warn("ComputeManager: host (" + h.name +
") updated (delete EX/AFF/DIV membership)")
for hk, host in self.resource.hosts.iteritems(): for hk, host in self.resource.hosts.iteritems():
if host.last_update > self.resource.current_timestamp: if host.last_update > self.resource.current_timestamp:
@ -224,17 +250,20 @@ class ComputeManager(threading.Thread):
if "nova" not in _rhost.tag: if "nova" not in _rhost.tag:
_rhost.tag.append("nova") _rhost.tag.append("nova")
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (tag added)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (tag added)")
if _host.status != _rhost.status: if _host.status != _rhost.status:
_rhost.status = _host.status _rhost.status = _host.status
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (status changed)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (status changed)")
if _host.state != _rhost.state: if _host.state != _rhost.state:
_rhost.state = _host.state _rhost.state = _host.state
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (state changed)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (state changed)")
return topology_updated return topology_updated
@ -248,7 +277,8 @@ class ComputeManager(threading.Thread):
_rhost.original_vCPUs = _host.original_vCPUs _rhost.original_vCPUs = _host.original_vCPUs
_rhost.avail_vCPUs = _host.avail_vCPUs _rhost.avail_vCPUs = _host.avail_vCPUs
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (CPU updated)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (CPU updated)")
if _host.mem_cap != _rhost.mem_cap or \ if _host.mem_cap != _rhost.mem_cap or \
_host.original_mem_cap != _rhost.original_mem_cap or \ _host.original_mem_cap != _rhost.original_mem_cap or \
@ -257,7 +287,8 @@ class ComputeManager(threading.Thread):
_rhost.original_mem_cap = _host.original_mem_cap _rhost.original_mem_cap = _host.original_mem_cap
_rhost.avail_mem_cap = _host.avail_mem_cap _rhost.avail_mem_cap = _host.avail_mem_cap
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (mem updated)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (mem updated)")
if _host.local_disk_cap != _rhost.local_disk_cap or \ if _host.local_disk_cap != _rhost.local_disk_cap or \
_host.original_local_disk_cap != _rhost.original_local_disk_cap or \ _host.original_local_disk_cap != _rhost.original_local_disk_cap or \
@ -266,7 +297,8 @@ class ComputeManager(threading.Thread):
_rhost.original_local_disk_cap = _host.original_local_disk_cap _rhost.original_local_disk_cap = _host.original_local_disk_cap
_rhost.avail_local_disk_cap = _host.avail_local_disk_cap _rhost.avail_local_disk_cap = _host.avail_local_disk_cap
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (local disk space updated)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (local disk space updated)")
if _host.vCPUs_used != _rhost.vCPUs_used or \ if _host.vCPUs_used != _rhost.vCPUs_used or \
_host.free_mem_mb != _rhost.free_mem_mb or \ _host.free_mem_mb != _rhost.free_mem_mb or \
@ -277,7 +309,8 @@ class ComputeManager(threading.Thread):
_rhost.free_disk_gb = _host.free_disk_gb _rhost.free_disk_gb = _host.free_disk_gb
_rhost.disk_available_least = _host.disk_available_least _rhost.disk_available_least = _host.disk_available_least
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (other resource numbers)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (other resource numbers)")
return topology_updated return topology_updated
@ -288,15 +321,18 @@ class ComputeManager(threading.Thread):
if mk not in _rhost.memberships.keys(): if mk not in _rhost.memberships.keys():
_rhost.memberships[mk] = self.resource.logical_groups[mk] _rhost.memberships[mk] = self.resource.logical_groups[mk]
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new membership)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (new membership)")
for mk in _rhost.memberships.keys(): for mk in _rhost.memberships.keys():
m = _rhost.memberships[mk] m = _rhost.memberships[mk]
if m.group_type != "EX" and m.group_type != "AFF" and m.group_type != "DIV": if m.group_type != "EX" and m.group_type != "AFF" and \
m.group_type != "DIV":
if mk not in _host.memberships.keys(): if mk not in _host.memberships.keys():
del _rhost.memberships[mk] del _rhost.memberships[mk]
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (delete membership)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (delete membership)")
return topology_updated return topology_updated
@ -309,7 +345,8 @@ class ComputeManager(threading.Thread):
_rhost.vm_list.remove(rvm_id) _rhost.vm_list.remove(rvm_id)
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (none vm removed)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (none vm removed)")
self.resource.clean_none_vms_from_logical_groups(_rhost) self.resource.clean_none_vms_from_logical_groups(_rhost)
@ -318,20 +355,24 @@ class ComputeManager(threading.Thread):
_rhost.vm_list.append(vm_id) _rhost.vm_list.append(vm_id)
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new vm placed)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (new vm placed)")
for rvm_id in _rhost.vm_list: for rvm_id in _rhost.vm_list:
if _host.exist_vm_by_uuid(rvm_id[2]) is False: if _host.exist_vm_by_uuid(rvm_id[2]) is False:
_rhost.vm_list.remove(rvm_id) _rhost.vm_list.remove(rvm_id)
self.resource.remove_vm_by_uuid_from_logical_groups(_rhost, rvm_id[2]) self.resource.remove_vm_by_uuid_from_logical_groups(_rhost,
rvm_id[2])
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (vm removed)") self.logger.warn("ComputeManager: host (" + _rhost.name +
") updated (vm removed)")
return topology_updated return topology_updated
def set_flavors(self): def set_flavors(self):
"""Return True if compute set flavors returns success."""
flavors = {} flavors = {}
compute = None compute = None
@ -356,14 +397,16 @@ class ComputeManager(threading.Thread):
self.resource.flavors[fk] = deepcopy(_flavors[fk]) self.resource.flavors[fk] = deepcopy(_flavors[fk])
self.resource.flavors[fk].last_update = time.time() self.resource.flavors[fk].last_update = time.time()
self.logger.warn("ComputeManager: new flavor (" + fk + ") added") self.logger.warn("ComputeManager: new flavor (" +
fk + ") added")
for rfk in self.resource.flavors.keys(): for rfk in self.resource.flavors.keys():
if rfk not in _flavors.keys(): if rfk not in _flavors.keys():
self.resource.flavors[rfk].status = "disabled" self.resource.flavors[rfk].status = "disabled"
self.resource.flavors[rfk].last_update = time.time() self.resource.flavors[rfk].last_update = time.time()
self.logger.warn("ComputeManager: flavor (" + rfk + ") removed") self.logger.warn("ComputeManager: flavor (" +
rfk + ") removed")
for fk in _flavors.keys(): for fk in _flavors.keys():
f = _flavors[fk] f = _flavors[fk]
@ -371,7 +414,8 @@ class ComputeManager(threading.Thread):
if self._check_flavor_spec_update(f, rf) is True: if self._check_flavor_spec_update(f, rf) is True:
rf.last_update = time.time() rf.last_update = time.time()
self.logger.warn("ComputeManager: flavor (" + fk + ") spec updated") self.logger.warn("ComputeManager: flavor (" +
fk + ") spec updated")
def _check_flavor_spec_update(self, _f, _rf): def _check_flavor_spec_update(self, _f, _rf):
spec_updated = False spec_updated = False
@ -380,7 +424,8 @@ class ComputeManager(threading.Thread):
_rf.status = _f.status _rf.status = _f.status
spec_updated = True spec_updated = True
if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or _f.disk_cap != _rf.disk_cap: if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or \
_f.disk_cap != _rf.disk_cap:
_rf.vCPUs = _f.vCPUs _rf.vCPUs = _f.vCPUs
_rf.mem_cap = _f.mem_cap _rf.mem_cap = _f.mem_cap
_rf.disk_cap = _f.disk_cap _rf.disk_cap = _f.disk_cap

View File

@ -13,16 +13,25 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor """Compute Simulator."""
from valet.engine.resource_manager.resource_base \
import Host, LogicalGroup, Flavor
class SimCompute(object): class SimCompute(object):
"""Sim Compute Class.
This class simulates a compute datacenter using classes from resource_base.
"""
def __init__(self, _config): def __init__(self, _config):
"""Init Sim Compute class (object)."""
self.config = _config self.config = _config
self.datacenter_name = "sim" self.datacenter_name = "sim"
def set_hosts(self, _hosts, _logical_groups): def set_hosts(self, _hosts, _logical_groups):
"""Set hosts and logical groups using resource_base, return success."""
self._set_availability_zones(_hosts, _logical_groups) self._set_availability_zones(_hosts, _logical_groups)
self._set_aggregates(_hosts, _logical_groups) self._set_aggregates(_hosts, _logical_groups)
@ -40,7 +49,8 @@ class SimCompute(object):
for r_num in range(0, self.config.num_of_racks): for r_num in range(0, self.config.num_of_racks):
for h_num in range(0, self.config.num_of_hosts_per_rack): for h_num in range(0, self.config.num_of_hosts_per_rack):
host = Host(self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num)) host = Host(self.datacenter_name + "0r" + str(r_num) +
"c" + str(h_num))
host.tag.append("nova") host.tag.append("nova")
host.memberships["nova"] = logical_group host.memberships["nova"] = logical_group
@ -63,9 +73,11 @@ class SimCompute(object):
aggregate = _logical_groups["aggregate" + str(a_num)] aggregate = _logical_groups["aggregate" + str(a_num)]
for r_num in range(0, self.config.num_of_racks): for r_num in range(0, self.config.num_of_racks):
for h_num in range(0, self.config.num_of_hosts_per_rack): for h_num in range(0, self.config.num_of_hosts_per_rack):
host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num) host_name = self.datacenter_name + "0r" + str(r_num) +\
"c" + str(h_num)
if host_name in _hosts.keys(): if host_name in _hosts.keys():
if (h_num % (self.config.aggregated_ratio + a_num)) == 0: if (h_num %
(self.config.aggregated_ratio + a_num)) == 0:
host = _hosts[host_name] host = _hosts[host_name]
host.memberships[aggregate.name] = aggregate host.memberships[aggregate.name] = aggregate
@ -77,23 +89,28 @@ class SimCompute(object):
def _set_resources(self, _hosts): def _set_resources(self, _hosts):
for r_num in range(0, self.config.num_of_racks): for r_num in range(0, self.config.num_of_racks):
for h_num in range(0, self.config.num_of_hosts_per_rack): for h_num in range(0, self.config.num_of_hosts_per_rack):
host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num) host_name = self.datacenter_name + "0r" + str(r_num) +\
"c" + str(h_num)
if host_name in _hosts.keys(): if host_name in _hosts.keys():
host = _hosts[host_name] host = _hosts[host_name]
host.original_vCPUs = float(self.config.cpus_per_host) host.original_vCPUs = float(self.config.cpus_per_host)
host.vCPUs_used = 0.0 host.vCPUs_used = 0.0
host.original_mem_cap = float(self.config.mem_per_host) host.original_mem_cap = float(self.config.mem_per_host)
host.free_mem_mb = host.original_mem_cap host.free_mem_mb = host.original_mem_cap
host.original_local_disk_cap = float(self.config.disk_per_host) host.original_local_disk_cap = \
float(self.config.disk_per_host)
host.free_disk_gb = host.original_local_disk_cap host.free_disk_gb = host.original_local_disk_cap
host.disk_available_least = host.original_local_disk_cap host.disk_available_least = host.original_local_disk_cap
def set_flavors(self, _flavors): def set_flavors(self, _flavors):
"""Set flavors in compute sim, return success."""
for f_num in range(0, self.config.num_of_basic_flavors): for f_num in range(0, self.config.num_of_basic_flavors):
flavor = Flavor("bflavor" + str(f_num)) flavor = Flavor("bflavor" + str(f_num))
flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1)) flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1))
flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1)) flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1))
flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0 flavor.disk_cap = \
float(self.config.base_flavor_disk * (f_num + 1)) + \
10.0 + 20.0 / 1024.0
_flavors[flavor.name] = flavor _flavors[flavor.name] = flavor

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Resource - Handles data, metadata, status of resources."""
import json import json
import sys import sys
import time import time
@ -20,33 +22,42 @@ import traceback
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.util import util as util from valet.engine.optimizer.util import util as util
from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, LogicalGroup from valet.engine.resource_manager.resource_base \
import Datacenter, HostGroup, Host, LogicalGroup
from valet.engine.resource_manager.resource_base import Flavor, Switch, Link from valet.engine.resource_manager.resource_base import Flavor, Switch, Link
class Resource(object): class Resource(object):
"""Resource Class.
This class bootsraps the resources from the database and initializes
them using base resources (datacenter, host group, host, logical group).
Also manages aggregate status of resources and metadata and handles
updates to base resource types.
"""
def __init__(self, _db, _config, _logger): def __init__(self, _db, _config, _logger):
"""Init Resource Class."""
self.db = _db self.db = _db
self.config = _config self.config = _config
self.logger = _logger self.logger = _logger
''' resource data ''' """ resource data """
self.datacenter = Datacenter(self.config.datacenter_name) self.datacenter = Datacenter(self.config.datacenter_name)
self.host_groups = {} self.host_groups = {}
self.hosts = {} self.hosts = {}
self.switches = {} self.switches = {}
self.storage_hosts = {} self.storage_hosts = {}
''' metadata ''' """ metadata """
self.logical_groups = {} self.logical_groups = {}
self.flavors = {} self.flavors = {}
self.current_timestamp = 0 self.current_timestamp = 0
self.last_log_index = 0 self.last_log_index = 0
''' resource status aggregation ''' """ resource status aggregation """
self.CPU_avail = 0 self.CPU_avail = 0
self.mem_avail = 0 self.mem_avail = 0
self.local_disk_avail = 0 self.local_disk_avail = 0
@ -54,6 +65,7 @@ class Resource(object):
self.nw_bandwidth_avail = 0 self.nw_bandwidth_avail = 0
def bootstrap_from_db(self, _resource_status): def bootstrap_from_db(self, _resource_status):
"""Return True if bootsrap resource from database successful."""
try: try:
logical_groups = _resource_status.get("logical_groups") logical_groups = _resource_status.get("logical_groups")
if logical_groups: if logical_groups:
@ -69,9 +81,11 @@ class Resource(object):
self.logical_groups[lgk] = logical_group self.logical_groups[lgk] = logical_group
if len(self.logical_groups) > 0: if len(self.logical_groups) > 0:
self.logger.debug("Resource.bootstrap_from_db: logical_groups loaded") self.logger.debug("Resource.bootstrap_from_db: logical_groups "
"loaded")
else: else:
self.logger.warn("Resource.bootstrap_from_db: no logical_groups") self.logger.warn("Resource.bootstrap_from_db: no "
"logical_groups")
flavors = _resource_status.get("flavors") flavors = _resource_status.get("flavors")
if flavors: if flavors:
@ -89,7 +103,8 @@ class Resource(object):
if len(self.flavors) > 0: if len(self.flavors) > 0:
self.logger.debug("Resource.bootstrap_from_db: flavors loaded") self.logger.debug("Resource.bootstrap_from_db: flavors loaded")
else: else:
self.logger.error("Resource.bootstrap_from_db: fail loading flavors") self.logger.error("Resource.bootstrap_from_db: fail loading "
"flavors")
switches = _resource_status.get("switches") switches = _resource_status.get("switches")
if switches: if switches:
@ -129,9 +144,11 @@ class Resource(object):
switch.peer_links = peer_links switch.peer_links = peer_links
self.logger.debug("Resource.bootstrap_from_db: switch links loaded") self.logger.debug("Resource.bootstrap_from_db: switch links "
"loaded")
else: else:
self.logger.error("Resource.bootstrap_from_db: fail loading switches") self.logger.error("Resource.bootstrap_from_db: fail loading "
"switches")
# storage_hosts # storage_hosts
hosts = _resource_status.get("hosts") hosts = _resource_status.get("hosts")
@ -168,9 +185,11 @@ class Resource(object):
self.hosts[hk] = host self.hosts[hk] = host
if len(self.hosts) > 0: if len(self.hosts) > 0:
self.logger.debug("Resource.bootstrap_from_db: hosts loaded") self.logger.debug("Resource.bootstrap_from_db: hosts "
"loaded")
else: else:
self.logger.error("Resource.bootstrap_from_db: fail loading hosts") self.logger.error("Resource.bootstrap_from_db: fail "
"loading hosts")
host_groups = _resource_status.get("host_groups") host_groups = _resource_status.get("host_groups")
if host_groups: if host_groups:
@ -185,7 +204,8 @@ class Resource(object):
host_group.original_mem_cap = hg.get("original_mem") host_group.original_mem_cap = hg.get("original_mem")
host_group.avail_mem_cap = hg.get("avail_mem") host_group.avail_mem_cap = hg.get("avail_mem")
host_group.local_disk_cap = hg.get("local_disk") host_group.local_disk_cap = hg.get("local_disk")
host_group.original_local_disk_cap = hg.get("original_local_disk") host_group.original_local_disk_cap = \
hg.get("original_local_disk")
host_group.avail_local_disk_cap = hg.get("avail_local_disk") host_group.avail_local_disk_cap = hg.get("avail_local_disk")
host_group.vm_list = hg.get("vm_list") host_group.vm_list = hg.get("vm_list")
host_group.volume_list = hg.get("volume_list", []) host_group.volume_list = hg.get("volume_list", [])
@ -201,9 +221,11 @@ class Resource(object):
self.host_groups[hgk] = host_group self.host_groups[hgk] = host_group
if len(self.host_groups) > 0: if len(self.host_groups) > 0:
self.logger.debug("Resource.bootstrap_from_db: host_groups loaded") self.logger.debug("Resource.bootstrap_from_db: host_groups "
"loaded")
else: else:
self.logger.error("Resource.bootstrap_from_db: fail loading host_groups") self.logger.error("Resource.bootstrap_from_db: fail "
"loading host_groups")
dc = _resource_status.get("datacenter") dc = _resource_status.get("datacenter")
if dc: if dc:
@ -217,7 +239,8 @@ class Resource(object):
self.datacenter.original_mem_cap = dc.get("original_mem") self.datacenter.original_mem_cap = dc.get("original_mem")
self.datacenter.avail_mem_cap = dc.get("avail_mem") self.datacenter.avail_mem_cap = dc.get("avail_mem")
self.datacenter.local_disk_cap = dc.get("local_disk") self.datacenter.local_disk_cap = dc.get("local_disk")
self.datacenter.original_local_disk_cap = dc.get("original_local_disk") self.datacenter.original_local_disk_cap = \
dc.get("original_local_disk")
self.datacenter.avail_local_disk_cap = dc.get("avail_local_disk") self.datacenter.avail_local_disk_cap = dc.get("avail_local_disk")
self.datacenter.vm_list = dc.get("vm_list") self.datacenter.vm_list = dc.get("vm_list")
self.datacenter.volume_list = dc.get("volume_list", []) self.datacenter.volume_list = dc.get("volume_list", [])
@ -237,9 +260,11 @@ class Resource(object):
self.datacenter.resources[ck] = self.hosts[ck] self.datacenter.resources[ck] = self.hosts[ck]
if len(self.datacenter.resources) > 0: if len(self.datacenter.resources) > 0:
self.logger.debug("Resource.bootstrap_from_db: datacenter loaded") self.logger.debug("Resource.bootstrap_from_db: datacenter "
"loaded")
else: else:
self.logger.error("Resource.bootstrap_from_db: fail loading datacenter") self.logger.error("Resource.bootstrap_from_db: fail "
"loading datacenter")
hgs = _resource_status.get("host_groups") hgs = _resource_status.get("host_groups")
if hgs: if hgs:
@ -258,7 +283,8 @@ class Resource(object):
elif ck in self.host_groups.keys(): elif ck in self.host_groups.keys():
host_group.child_resources[ck] = self.host_groups[ck] host_group.child_resources[ck] = self.host_groups[ck]
self.logger.debug("Resource.bootstrap_from_db: host_groups'layout loaded") self.logger.debug("Resource.bootstrap_from_db: "
"host_groups'layout loaded")
hs = _resource_status.get("hosts") hs = _resource_status.get("hosts")
if hs: if hs:
@ -271,20 +297,24 @@ class Resource(object):
elif pk in self.host_groups.keys(): elif pk in self.host_groups.keys():
host.host_group = self.host_groups[pk] host.host_group = self.host_groups[pk]
self.logger.debug("Resource.bootstrap_from_db: hosts'layout loaded") self.logger.debug("Resource.bootstrap_from_db: "
"hosts'layout loaded")
self._update_compute_avail() self._update_compute_avail()
self._update_storage_avail() self._update_storage_avail()
self._update_nw_bandwidth_avail() self._update_nw_bandwidth_avail()
self.logger.debug("Resource.bootstrap_from_db: resource availability updated") self.logger.debug("Resource.bootstrap_from_db: "
"resource availability updated")
except Exception: except Exception:
self.logger.error("Resource.bootstrap_from_db - FAILED:" + traceback.format_exc()) self.logger.error("Resource.bootstrap_from_db - "
"FAILED:" + traceback.format_exc())
return True return True
def update_topology(self, store=True): def update_topology(self, store=True):
"""Update Topology and return True, if store True then store update."""
self._update_topology() self._update_topology()
self._update_compute_avail() self._update_compute_avail()
@ -304,7 +334,8 @@ class Resource(object):
def _update_topology(self): def _update_topology(self):
for level in LEVELS: for level in LEVELS:
for _, host_group in self.host_groups.iteritems(): for _, host_group in self.host_groups.iteritems():
if host_group.host_type == level and host_group.check_availability() is True: if host_group.host_type == level and \
host_group.check_availability() is True:
if host_group.last_update > self.current_timestamp: if host_group.last_update > self.current_timestamp:
self._update_host_group_topology(host_group) self._update_host_group_topology(host_group)
@ -326,7 +357,8 @@ class Resource(object):
_host_group.original_mem_cap += host.original_mem_cap _host_group.original_mem_cap += host.original_mem_cap
_host_group.avail_mem_cap += host.avail_mem_cap _host_group.avail_mem_cap += host.avail_mem_cap
_host_group.local_disk_cap += host.local_disk_cap _host_group.local_disk_cap += host.local_disk_cap
_host_group.original_local_disk_cap += host.original_local_disk_cap _host_group.original_local_disk_cap += \
host.original_local_disk_cap
_host_group.avail_local_disk_cap += host.avail_local_disk_cap _host_group.avail_local_disk_cap += host.avail_local_disk_cap
for shk, storage_host in host.storages.iteritems(): for shk, storage_host in host.storages.iteritems():
@ -362,8 +394,10 @@ class Resource(object):
self.datacenter.original_mem_cap += resource.original_mem_cap self.datacenter.original_mem_cap += resource.original_mem_cap
self.datacenter.avail_mem_cap += resource.avail_mem_cap self.datacenter.avail_mem_cap += resource.avail_mem_cap
self.datacenter.local_disk_cap += resource.local_disk_cap self.datacenter.local_disk_cap += resource.local_disk_cap
self.datacenter.original_local_disk_cap += resource.original_local_disk_cap self.datacenter.original_local_disk_cap += \
self.datacenter.avail_local_disk_cap += resource.avail_local_disk_cap resource.original_local_disk_cap
self.datacenter.avail_local_disk_cap += \
resource.avail_local_disk_cap
for shk, storage_host in resource.storages.iteritems(): for shk, storage_host in resource.storages.iteritems():
if storage_host.status == "enabled": if storage_host.status == "enabled":
@ -413,7 +447,8 @@ class Resource(object):
for sk, s in h.switches.iteritems(): for sk, s in h.switches.iteritems():
if s.status == "enabled": if s.status == "enabled":
for ulk, ul in s.up_links.iteritems(): for ulk, ul in s.up_links.iteritems():
avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth) avail_nw_bandwidth_list.append(
ul.avail_nw_bandwidth)
self.nw_bandwidth_avail += min(avail_nw_bandwidth_list) self.nw_bandwidth_avail += min(avail_nw_bandwidth_list)
elif level == "spine": elif level == "spine":
for _, hg in self.host_groups.iteritems(): for _, hg in self.host_groups.iteritems():
@ -422,7 +457,8 @@ class Resource(object):
for _, s in hg.switches.iteritems(): for _, s in hg.switches.iteritems():
if s.status == "enabled": if s.status == "enabled":
for _, ul in s.up_links.iteritems(): for _, ul in s.up_links.iteritems():
avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth) avail_nw_bandwidth_list.append(
ul.avail_nw_bandwidth)
# NOTE: peer links? # NOTE: peer links?
self.nw_bandwidth_avail += min(avail_nw_bandwidth_list) self.nw_bandwidth_avail += min(avail_nw_bandwidth_list)
@ -466,7 +502,8 @@ class Resource(object):
last_update_time = s.last_update last_update_time = s.last_update
for hk, host in self.hosts.iteritems(): for hk, host in self.hosts.iteritems():
if host.last_update > self.current_timestamp or host.last_link_update > self.current_timestamp: if host.last_update > self.current_timestamp or \
host.last_link_update > self.current_timestamp:
host_updates[hk] = host.get_json_info() host_updates[hk] = host.get_json_info()
if host.last_update > self.current_timestamp: if host.last_update > self.current_timestamp:
@ -493,10 +530,9 @@ class Resource(object):
if self.datacenter.last_link_update > self.current_timestamp: if self.datacenter.last_link_update > self.current_timestamp:
last_update_time = self.datacenter.last_link_update last_update_time = self.datacenter.last_link_update
(resource_logfile, last_index, mode) = util.get_last_logfile(self.config.resource_log_loc, (resource_logfile, last_index, mode) = util.get_last_logfile(
self.config.max_log_size, self.config.resource_log_loc, self.config.max_log_size,
self.config.max_num_of_logs, self.config.max_num_of_logs, self.datacenter.name,
self.datacenter.name,
self.last_log_index) self.last_log_index)
self.last_log_index = last_index self.last_log_index = last_index
@ -527,17 +563,21 @@ class Resource(object):
logging.close() logging.close()
self.logger.info("Resource._store_topology_updates: log resource status in " + resource_logfile) self.logger.info("Resource._store_topology_updates: log resource "
"status in " + resource_logfile)
if self.db is not None: if self.db is not None:
if self.db.update_resource_status(self.datacenter.name, json_logging) is False: if self.db.update_resource_status(self.datacenter.name,
json_logging) is False:
return None return None
if self.db.update_resource_log_index(self.datacenter.name, self.last_log_index) is False: if self.db.update_resource_log_index(self.datacenter.name,
self.last_log_index) is False:
return None return None
return last_update_time return last_update_time
def update_rack_resource(self, _host): def update_rack_resource(self, _host):
"""Update resources for rack (host), then update cluster."""
rack = _host.host_group rack = _host.host_group
if rack is not None: if rack is not None:
@ -547,6 +587,7 @@ class Resource(object):
self.update_cluster_resource(rack) self.update_cluster_resource(rack)
def update_cluster_resource(self, _rack): def update_cluster_resource(self, _rack):
"""Update cluster rack belonged to, then update datacenter."""
cluster = _rack.parent_resource cluster = _rack.parent_resource
if cluster is not None: if cluster is not None:
@ -556,11 +597,13 @@ class Resource(object):
self.datacenter.last_update = time.time() self.datacenter.last_update = time.time()
def get_uuid(self, _h_uuid, _host_name): def get_uuid(self, _h_uuid, _host_name):
"""Return host uuid."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
return host.get_uuid(_h_uuid) return host.get_uuid(_h_uuid)
def add_vm_to_host(self, _host_name, _vm_id, _vcpus, _mem, _ldisk): def add_vm_to_host(self, _host_name, _vm_id, _vcpus, _mem, _ldisk):
"""Add vm to host and adjust compute resources for host."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
host.vm_list.append(_vm_id) host.vm_list.append(_vm_id)
@ -574,7 +617,9 @@ class Resource(object):
host.free_disk_gb -= _ldisk host.free_disk_gb -= _ldisk
host.disk_available_least -= _ldisk host.disk_available_least -= _ldisk
def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem, _ldisk): def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem,
_ldisk):
"""Remove vm from host by h_uuid, adjust compute resources for host."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
host.remove_vm_by_h_uuid(_h_uuid) host.remove_vm_by_h_uuid(_h_uuid)
@ -588,7 +633,9 @@ class Resource(object):
host.free_disk_gb += _ldisk host.free_disk_gb += _ldisk
host.disk_available_least += _ldisk host.disk_available_least += _ldisk
def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem, _ldisk): def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem,
_ldisk):
"""Remove vm from host by uuid, adjust compute resources for host."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
host.remove_vm_by_uuid(_uuid) host.remove_vm_by_uuid(_uuid)
@ -603,6 +650,7 @@ class Resource(object):
host.disk_available_least += _ldisk host.disk_available_least += _ldisk
def add_vol_to_host(self, _host_name, _storage_name, _v_id, _disk): def add_vol_to_host(self, _host_name, _storage_name, _v_id, _disk):
"""Add volume to host and adjust available disk on host."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
host.volume_list.append(_v_id) host.volume_list.append(_v_id)
@ -612,9 +660,11 @@ class Resource(object):
storage_host.avail_disk_cap -= _disk storage_host.avail_disk_cap -= _disk
# NOTE: Assume the up-link of spine switch is not used except out-going from datacenter # NOTE: Assume the up-link of spine switch is not used except out-going
# from datacenter
# NOTE: What about peer-switches? # NOTE: What about peer-switches?
def deduct_bandwidth(self, _host_name, _placement_level, _bandwidth): def deduct_bandwidth(self, _host_name, _placement_level, _bandwidth):
"""Deduct bandwidth at appropriate placement level."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
if _placement_level == "host": if _placement_level == "host":
@ -648,26 +698,31 @@ class Resource(object):
hs.last_update = time.time() hs.last_update = time.time()
def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem, _ldisk, _fldisk, _avail_least): def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem,
_ldisk, _fldisk, _avail_least):
"""Return True if status or compute resources avail on host changed."""
updated = False updated = False
host = self.hosts[_hn] host = self.hosts[_hn]
if host.status != _st: if host.status != _st:
host.status = _st host.status = _st
self.logger.debug("Resource.update_host_resources: host status changed") self.logger.debug("Resource.update_host_resources: host status "
"changed")
updated = True updated = True
if host.original_vCPUs != _vcpus or \ if host.original_vCPUs != _vcpus or \
host.vCPUs_used != _vcpus_used: host.vCPUs_used != _vcpus_used:
self.logger.debug("Resource.update_host_resources: host cpu changed") self.logger.debug("Resource.update_host_resources: host cpu "
"changed")
host.original_vCPUs = _vcpus host.original_vCPUs = _vcpus
host.vCPUs_used = _vcpus_used host.vCPUs_used = _vcpus_used
updated = True updated = True
if host.free_mem_mb != _fmem or \ if host.free_mem_mb != _fmem or \
host.original_mem_cap != _mem: host.original_mem_cap != _mem:
self.logger.debug("Resource.update_host_resources: host mem changed") self.logger.debug("Resource.update_host_resources: host mem "
"changed")
host.free_mem_mb = _fmem host.free_mem_mb = _fmem
host.original_mem_cap = _mem host.original_mem_cap = _mem
updated = True updated = True
@ -675,7 +730,8 @@ class Resource(object):
if host.free_disk_gb != _fldisk or \ if host.free_disk_gb != _fldisk or \
host.original_local_disk_cap != _ldisk or \ host.original_local_disk_cap != _ldisk or \
host.disk_available_least != _avail_least: host.disk_available_least != _avail_least:
self.logger.debug("Resource.update_host_resources: host disk changed") self.logger.debug("Resource.update_host_resources: host disk "
"changed")
host.free_disk_gb = _fldisk host.free_disk_gb = _fldisk
host.original_local_disk_cap = _ldisk host.original_local_disk_cap = _ldisk
host.disk_available_least = _avail_least host.disk_available_least = _avail_least
@ -687,17 +743,20 @@ class Resource(object):
return updated return updated
def update_host_time(self, _host_name): def update_host_time(self, _host_name):
"""Update last host update time."""
host = self.hosts[_host_name] host = self.hosts[_host_name]
host.last_update = time.time() host.last_update = time.time()
self.update_rack_resource(host) self.update_rack_resource(host)
def update_storage_time(self, _storage_name): def update_storage_time(self, _storage_name):
"""Update last storage update time."""
storage_host = self.storage_hosts[_storage_name] storage_host = self.storage_hosts[_storage_name]
storage_host.last_cap_update = time.time() storage_host.last_cap_update = time.time()
def add_logical_group(self, _host_name, _lg_name, _lg_type): def add_logical_group(self, _host_name, _lg_name, _lg_type):
"""Add logical group to host memberships and update host resource."""
host = None host = None
if _host_name in self.hosts.keys(): if _host_name in self.hosts.keys():
host = self.hosts[_host_name] host = self.hosts[_host_name]
@ -720,6 +779,7 @@ class Resource(object):
self.update_cluster_resource(host) self.update_cluster_resource(host)
def add_vm_to_logical_groups(self, _host, _vm_id, _logical_groups_of_vm): def add_vm_to_logical_groups(self, _host, _vm_id, _logical_groups_of_vm):
"""Add vm to logical group and update corresponding lg."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
if lgk in _logical_groups_of_vm: if lgk in _logical_groups_of_vm:
lg = self.logical_groups[lgk] lg = self.logical_groups[lgk]
@ -728,17 +788,21 @@ class Resource(object):
if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
elif isinstance(_host, HostGroup): elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type: if lgk.split(":")[0] == _host.host_type:
if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
if isinstance(_host, Host) and _host.host_group is not None: if isinstance(_host, Host) and _host.host_group is not None:
self.add_vm_to_logical_groups(_host.host_group, _vm_id, _logical_groups_of_vm) self.add_vm_to_logical_groups(_host.host_group, _vm_id,
_logical_groups_of_vm)
elif isinstance(_host, HostGroup) and _host.parent_resource is not None: elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
self.add_vm_to_logical_groups(_host.parent_resource, _vm_id, _logical_groups_of_vm) self.add_vm_to_logical_groups(_host.parent_resource, _vm_id,
_logical_groups_of_vm)
def remove_vm_by_h_uuid_from_logical_groups(self, _host, _h_uuid): def remove_vm_by_h_uuid_from_logical_groups(self, _host, _h_uuid):
"""Remove vm by orchestration id from lgs. Update host and lgs."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys(): if lgk not in self.logical_groups.keys():
continue continue
@ -752,7 +816,8 @@ class Resource(object):
_host.last_update = time.time() _host.last_update = time.time()
elif isinstance(_host, HostGroup): elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type: if lgk.split(":")[0] == _host.host_type:
if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True: if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
@ -760,16 +825,20 @@ class Resource(object):
if _host.remove_membership(lg) is True: if _host.remove_membership(lg) is True:
_host.last_update = time.time() _host.last_update = time.time()
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if len(lg.vm_list) == 0: if len(lg.vm_list) == 0:
del self.logical_groups[lgk] del self.logical_groups[lgk]
if isinstance(_host, Host) and _host.host_group is not None: if isinstance(_host, Host) and _host.host_group is not None:
self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group, _h_uuid) self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group,
_h_uuid)
elif isinstance(_host, HostGroup) and _host.parent_resource is not None: elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource, _h_uuid) self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource,
_h_uuid)
def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid): def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid):
"""Remove vm by uuid from lgs and update proper host and lgs."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys(): if lgk not in self.logical_groups.keys():
continue continue
@ -783,7 +852,8 @@ class Resource(object):
_host.last_update = time.time() _host.last_update = time.time()
elif isinstance(_host, HostGroup): elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type: if lgk.split(":")[0] == _host.host_type:
if lg.remove_vm_by_uuid(_uuid, _host.name) is True: if lg.remove_vm_by_uuid(_uuid, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
@ -791,16 +861,19 @@ class Resource(object):
if _host.remove_membership(lg) is True: if _host.remove_membership(lg) is True:
_host.last_update = time.time() _host.last_update = time.time()
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if len(lg.vm_list) == 0: if len(lg.vm_list) == 0:
del self.logical_groups[lgk] del self.logical_groups[lgk]
if isinstance(_host, Host) and _host.host_group is not None: if isinstance(_host, Host) and _host.host_group is not None:
self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid) self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid)
elif isinstance(_host, HostGroup) and _host.parent_resource is not None: elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource, _uuid) self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource,
_uuid)
def clean_none_vms_from_logical_groups(self, _host): def clean_none_vms_from_logical_groups(self, _host):
"""Clean vms with status none from logical groups."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys(): if lgk not in self.logical_groups.keys():
continue continue
@ -814,7 +887,8 @@ class Resource(object):
_host.last_update = time.time() _host.last_update = time.time()
elif isinstance(_host, HostGroup): elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type: if lgk.split(":")[0] == _host.host_type:
if lg.clean_none_vms(_host.name) is True: if lg.clean_none_vms(_host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
@ -822,7 +896,8 @@ class Resource(object):
if _host.remove_membership(lg) is True: if _host.remove_membership(lg) is True:
_host.last_update = time.time() _host.last_update = time.time()
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if len(lg.vm_list) == 0: if len(lg.vm_list) == 0:
del self.logical_groups[lgk] del self.logical_groups[lgk]
@ -832,6 +907,7 @@ class Resource(object):
self.clean_none_vms_from_logical_groups(_host.parent_resource) self.clean_none_vms_from_logical_groups(_host.parent_resource)
def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host):
"""Update uuid in lgs and update lg last update time."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
lg = self.logical_groups[lgk] lg = self.logical_groups[lgk]
@ -839,7 +915,8 @@ class Resource(object):
if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: if lg.update_uuid(_h_uuid, _uuid, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
elif isinstance(_host, HostGroup): elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type: if lgk.split(":")[0] == _host.host_type:
if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: if lg.update_uuid(_h_uuid, _uuid, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
@ -847,9 +924,11 @@ class Resource(object):
if isinstance(_host, Host) and _host.host_group is not None: if isinstance(_host, Host) and _host.host_group is not None:
self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group)
elif isinstance(_host, HostGroup) and _host.parent_resource is not None: elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) self.update_uuid_in_logical_groups(_h_uuid, _uuid,
_host.parent_resource)
def update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): def update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host):
"""Update orchestration id in lgs and update lg last update time."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
lg = self.logical_groups[lgk] lg = self.logical_groups[lgk]
@ -857,17 +936,26 @@ class Resource(object):
if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
elif isinstance(_host, HostGroup): elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
if lgk.split(":")[0] == _host.host_type: if lgk.split(":")[0] == _host.host_type:
if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True:
lg.last_update = time.time() lg.last_update = time.time()
if isinstance(_host, Host) and _host.host_group is not None: if isinstance(_host, Host) and _host.host_group is not None:
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) self.update_h_uuid_in_logical_groups(_h_uuid, _uuid,
_host.host_group)
elif isinstance(_host, HostGroup) and _host.parent_resource is not None: elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) self.update_h_uuid_in_logical_groups(_h_uuid, _uuid,
_host.parent_resource)
def compute_avail_resources(self, hk, host): def compute_avail_resources(self, hk, host):
"""Compute avail resources for host.
This function computes ram, cpu and disk allocation ratios for
the passed in host. Then uses data to compute avail memory, disk
and vCPUs.
"""
ram_allocation_ratio_list = [] ram_allocation_ratio_list = []
cpu_allocation_ratio_list = [] cpu_allocation_ratio_list = []
disk_allocation_ratio_list = [] disk_allocation_ratio_list = []
@ -875,11 +963,14 @@ class Resource(object):
for _, lg in host.memberships.iteritems(): for _, lg in host.memberships.iteritems():
if lg.group_type == "AGGR": if lg.group_type == "AGGR":
if "ram_allocation_ratio" in lg.metadata.keys(): if "ram_allocation_ratio" in lg.metadata.keys():
ram_allocation_ratio_list.append(float(lg.metadata["ram_allocation_ratio"])) ram_allocation_ratio_list.append(
float(lg.metadata["ram_allocation_ratio"]))
if "cpu_allocation_ratio" in lg.metadata.keys(): if "cpu_allocation_ratio" in lg.metadata.keys():
cpu_allocation_ratio_list.append(float(lg.metadata["cpu_allocation_ratio"])) cpu_allocation_ratio_list.append(
float(lg.metadata["cpu_allocation_ratio"]))
if "disk_allocation_ratio" in lg.metadata.keys(): if "disk_allocation_ratio" in lg.metadata.keys():
disk_allocation_ratio_list.append(float(lg.metadata["disk_allocation_ratio"])) disk_allocation_ratio_list.append(
float(lg.metadata["disk_allocation_ratio"]))
ram_allocation_ratio = 1.0 ram_allocation_ratio = 1.0
if len(ram_allocation_ratio_list) > 0: if len(ram_allocation_ratio_list) > 0:
@ -890,12 +981,15 @@ class Resource(object):
static_ram_standby_ratio = 0 static_ram_standby_ratio = 0
if self.config.static_mem_standby_ratio > 0: if self.config.static_mem_standby_ratio > 0:
static_ram_standby_ratio = float(self.config.static_mem_standby_ratio) / float(100) static_ram_standby_ratio = \
float(self.config.static_mem_standby_ratio) / float(100)
host.compute_avail_mem(ram_allocation_ratio, static_ram_standby_ratio) host.compute_avail_mem(ram_allocation_ratio, static_ram_standby_ratio)
self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_mem = " + self.logger.debug("Resource.compute_avail_resources: host (" +
str(host.mem_cap) + ", avail_mem = " + str(host.avail_mem_cap)) hk + ")'s total_mem = " +
str(host.mem_cap) + ", avail_mem = " +
str(host.avail_mem_cap))
cpu_allocation_ratio = 1.0 cpu_allocation_ratio = 1.0
if len(cpu_allocation_ratio_list) > 0: if len(cpu_allocation_ratio_list) > 0:
@ -906,30 +1000,39 @@ class Resource(object):
static_cpu_standby_ratio = 0 static_cpu_standby_ratio = 0
if self.config.static_cpu_standby_ratio > 0: if self.config.static_cpu_standby_ratio > 0:
static_cpu_standby_ratio = float(self.config.static_cpu_standby_ratio) / float(100) static_cpu_standby_ratio = \
float(self.config.static_cpu_standby_ratio) / float(100)
host.compute_avail_vCPUs(cpu_allocation_ratio, static_cpu_standby_ratio) host.compute_avail_vCPUs(cpu_allocation_ratio, static_cpu_standby_ratio)
self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_vCPUs = " + self.logger.debug("Resource.compute_avail_resources: host (" +
str(host.vCPUs) + ", avail_vCPUs = " + str(host.avail_vCPUs)) hk + ")'s total_vCPUs = " +
str(host.vCPUs) + ", avail_vCPUs = " +
str(host.avail_vCPUs))
disk_allocation_ratio = 1.0 disk_allocation_ratio = 1.0
if len(disk_allocation_ratio_list) > 0: if len(disk_allocation_ratio_list) > 0:
disk_allocation_ratio = min(disk_allocation_ratio_list) disk_allocation_ratio = min(disk_allocation_ratio_list)
else: else:
if self.config.default_disk_allocation_ratio > 0: if self.config.default_disk_allocation_ratio > 0:
disk_allocation_ratio = self.config.default_disk_allocation_ratio disk_allocation_ratio = \
self.config.default_disk_allocation_ratio
static_disk_standby_ratio = 0 static_disk_standby_ratio = 0
if self.config.static_local_disk_standby_ratio > 0: if self.config.static_local_disk_standby_ratio > 0:
static_disk_standby_ratio = float(self.config.static_local_disk_standby_ratio) / float(100) static_disk_standby_ratio = \
float(self.config.static_local_disk_standby_ratio) / float(100)
host.compute_avail_disk(disk_allocation_ratio, static_disk_standby_ratio) host.compute_avail_disk(disk_allocation_ratio,
static_disk_standby_ratio)
self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_local_disk = " + self.logger.debug("Resource.compute_avail_resources: host (" +
str(host.local_disk_cap) + ", avail_local_disk = " + str(host.avail_local_disk_cap)) hk + ")'s total_local_disk = " +
str(host.local_disk_cap) + ", avail_local_disk = " +
str(host.avail_local_disk_cap))
def get_flavor(self, _name): def get_flavor(self, _name):
"""Return flavor according to name passed in."""
flavor = None flavor = None
if _name in self.flavors.keys(): if _name in self.flavors.keys():

View File

@ -13,19 +13,33 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Resource Base.
File contains resource datatype objects from base type of a flavor and
builds all the way up to a datacenter object.
"""
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
class Datacenter(object): class Datacenter(object):
"""Datacenter Class.
This object represents a datacenter. It contains all memberships or
logical groups in the datacenter, all resources available, placed vms,
and more throughout the datacenter.
"""
def __init__(self, _name): def __init__(self, _name):
"""Init Datacenter object."""
self.name = _name self.name = _name
self.region_code_list = [] self.region_code_list = []
self.status = "enabled" self.status = "enabled"
self.memberships = {} # all available logical groups (e.g., aggregate) in the datacenter # all available logical groups (e.g., aggregate) in the datacenter
self.memberships = {}
self.vCPUs = 0 self.vCPUs = 0
self.original_vCPUs = 0 self.original_vCPUs = 0
@ -42,13 +56,17 @@ class Datacenter(object):
self.resources = {} self.resources = {}
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.volume_list = [] # a list of placed volumes self.vm_list = []
# a list of placed volumes
self.volume_list = []
self.last_update = 0 self.last_update = 0
self.last_link_update = 0 self.last_link_update = 0
def init_resources(self): def init_resources(self):
"""Init datacenter resources to 0."""
self.vCPUs = 0 self.vCPUs = 0
self.original_vCPUs = 0 self.original_vCPUs = 0
self.avail_vCPUs = 0 self.avail_vCPUs = 0
@ -60,6 +78,7 @@ class Datacenter(object):
self.avail_local_disk_cap = 0 self.avail_local_disk_cap = 0
def get_json_info(self): def get_json_info(self):
"""Return JSON info for datacenter object."""
membership_list = [] membership_list = []
for lgk in self.memberships.keys(): for lgk in self.memberships.keys():
membership_list.append(lgk) membership_list.append(lgk)
@ -100,14 +119,24 @@ class Datacenter(object):
# data container for rack or cluster # data container for rack or cluster
class HostGroup(object): class HostGroup(object):
"""Class for Host Group Object.
This Class represents a group of hosts. If a single host is a single server
then host group is a rack or cluster of servers. This class contains all
memberships and resources for the group of hosts.
"""
def __init__(self, _id): def __init__(self, _id):
"""Init for Host Group Class."""
self.name = _id self.name = _id
self.host_type = "rack" # rack or cluster(e.g., power domain, zone)
# rack or cluster(e.g., power domain, zone)
self.host_type = "rack"
self.status = "enabled" self.status = "enabled"
self.memberships = {} # all available logical groups (e.g., aggregate) in this group # all available logical groups (e.g., aggregate) in this group
self.memberships = {}
self.vCPUs = 0 self.vCPUs = 0
self.original_vCPUs = 0 self.original_vCPUs = 0
@ -125,13 +154,17 @@ class HostGroup(object):
self.parent_resource = None # e.g., datacenter self.parent_resource = None # e.g., datacenter
self.child_resources = {} # e.g., hosting servers self.child_resources = {} # e.g., hosting servers
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.volume_list = [] # a list of placed volumes self.vm_list = []
# a list of placed volumes
self.volume_list = []
self.last_update = 0 self.last_update = 0
self.last_link_update = 0 self.last_link_update = 0
def init_resources(self): def init_resources(self):
"""Init all host group resources to 0."""
self.vCPUs = 0 self.vCPUs = 0
self.original_vCPUs = 0 self.original_vCPUs = 0
self.avail_vCPUs = 0 self.avail_vCPUs = 0
@ -143,19 +176,24 @@ class HostGroup(object):
self.avail_local_disk_cap = 0 self.avail_local_disk_cap = 0
def init_memberships(self): def init_memberships(self):
"""Init Host Group memberships."""
for lgk in self.memberships.keys(): for lgk in self.memberships.keys():
lg = self.memberships[lgk] lg = self.memberships[lgk]
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV": if lg.group_type == "EX" or lg.group_type == "AFF" or \
lg.group_type == "DIV":
level = lg.name.split(":")[0] level = lg.name.split(":")[0]
if LEVELS.index(level) < LEVELS.index(self.host_type) or self.name not in lg.vms_per_host.keys(): if LEVELS.index(level) < LEVELS.index(self.host_type) or \
self.name not in lg.vms_per_host.keys():
del self.memberships[lgk] del self.memberships[lgk]
else: else:
del self.memberships[lgk] del self.memberships[lgk]
def remove_membership(self, _lg): def remove_membership(self, _lg):
"""Return True if membership to group _lg removed."""
cleaned = False cleaned = False
if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV": if _lg.group_type == "EX" or _lg.group_type == "AFF" or \
_lg.group_type == "DIV":
if self.name not in _lg.vms_per_host.keys(): if self.name not in _lg.vms_per_host.keys():
del self.memberships[_lg.name] del self.memberships[_lg.name]
cleaned = True cleaned = True
@ -163,12 +201,14 @@ class HostGroup(object):
return cleaned return cleaned
def check_availability(self): def check_availability(self):
"""Return True if Host Group status is 'enabled'."""
if self.status == "enabled": if self.status == "enabled":
return True return True
else: else:
return False return False
def get_json_info(self): def get_json_info(self):
"""Return JSON info for Host Group object."""
membership_list = [] membership_list = []
for lgk in self.memberships.keys(): for lgk in self.memberships.keys():
membership_list.append(lgk) membership_list.append(lgk)
@ -208,15 +248,25 @@ class HostGroup(object):
class Host(object): class Host(object):
"""Class for Host Object.
This class is for a Host Object, imagine a server. This means
information about the groups the host is a part of, all the hardware
parameters (vCPUs, local disk, memory) as well as the list of vms and
volumes placed on the host.
"""
def __init__(self, _name): def __init__(self, _name):
"""Init for Host object."""
self.name = _name self.name = _name
self.tag = [] # mark if this is synch'ed by multiple sources # mark if this is synch'ed by multiple sources
self.tag = []
self.status = "enabled" self.status = "enabled"
self.state = "up" self.state = "up"
self.memberships = {} # logical group (e.g., aggregate) this hosting server is involved in # logical group (e.g., aggregate) this hosting server is involved in
self.memberships = {}
self.vCPUs = 0 self.vCPUs = 0
self.original_vCPUs = 0 self.original_vCPUs = 0
@ -238,13 +288,17 @@ class Host(object):
self.host_group = None # e.g., rack self.host_group = None # e.g., rack
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.volume_list = [] # a list of placed volumes self.vm_list = []
# a list of placed volumes
self.volume_list = []
self.last_update = 0 self.last_update = 0
self.last_link_update = 0 self.last_link_update = 0
def clean_memberships(self): def clean_memberships(self):
"""Return True if host cleaned from logical group membership."""
cleaned = False cleaned = False
for lgk in self.memberships.keys(): for lgk in self.memberships.keys():
@ -256,9 +310,11 @@ class Host(object):
return cleaned return cleaned
def remove_membership(self, _lg): def remove_membership(self, _lg):
"""Return True if host removed from logical group _lg passed in."""
cleaned = False cleaned = False
if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV": if _lg.group_type == "EX" or _lg.group_type == "AFF" or \
_lg.group_type == "DIV":
if self.name not in _lg.vms_per_host.keys(): if self.name not in _lg.vms_per_host.keys():
del self.memberships[_lg.name] del self.memberships[_lg.name]
cleaned = True cleaned = True
@ -266,12 +322,15 @@ class Host(object):
return cleaned return cleaned
def check_availability(self): def check_availability(self):
if self.status == "enabled" and self.state == "up" and ("nova" in self.tag) and ("infra" in self.tag): """Return True if host is up, enabled and tagged as nova infra."""
if self.status == "enabled" and self.state == "up" and \
("nova" in self.tag) and ("infra" in self.tag):
return True return True
else: else:
return False return False
def get_uuid(self, _h_uuid): def get_uuid(self, _h_uuid):
"""Return uuid of vm with matching orchestration id(_h_uuid)."""
uuid = None uuid = None
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -282,6 +341,7 @@ class Host(object):
return uuid return uuid
def exist_vm_by_h_uuid(self, _h_uuid): def exist_vm_by_h_uuid(self, _h_uuid):
"""Return True if vm with orchestration id(_h_uuid) exists on host."""
exist = False exist = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -292,6 +352,7 @@ class Host(object):
return exist return exist
def exist_vm_by_uuid(self, _uuid): def exist_vm_by_uuid(self, _uuid):
"""Return True if vm with physical id(_uuid) exists on host."""
exist = False exist = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -302,6 +363,7 @@ class Host(object):
return exist return exist
def remove_vm_by_h_uuid(self, _h_uuid): def remove_vm_by_h_uuid(self, _h_uuid):
"""Return True if vm removed with matching _h_uuid."""
success = False success = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -313,6 +375,7 @@ class Host(object):
return success return success
def remove_vm_by_uuid(self, _uuid): def remove_vm_by_uuid(self, _uuid):
"""Return True if vm removed with matching _uuid."""
success = False success = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -324,6 +387,7 @@ class Host(object):
return success return success
def update_uuid(self, _h_uuid, _uuid): def update_uuid(self, _h_uuid, _uuid):
"""Return True if vm physical id updated."""
success = False success = False
vm_name = "none" vm_name = "none"
@ -341,6 +405,7 @@ class Host(object):
return success return success
def update_h_uuid(self, _h_uuid, _uuid): def update_h_uuid(self, _h_uuid, _uuid):
"""Return True if vm orchestration id (_h_uuid) updated."""
success = False success = False
vm_name = "none" vm_name = "none"
@ -358,19 +423,27 @@ class Host(object):
return success return success
def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio): def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio):
self.vCPUs = self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio) """Calc avail_vCPUs by calculating vCPUs and subtracting in use."""
self.vCPUs = \
self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio)
self.avail_vCPUs = self.vCPUs - self.vCPUs_used self.avail_vCPUs = self.vCPUs - self.vCPUs_used
def compute_avail_mem(self, _overcommit_ratio, _standby_ratio): def compute_avail_mem(self, _overcommit_ratio, _standby_ratio):
self.mem_cap = self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio) """Calc avail_mem by calculating mem_cap and subtract used mem."""
self.mem_cap = \
self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio)
used_mem_mb = self.original_mem_cap - self.free_mem_mb used_mem_mb = self.original_mem_cap - self.free_mem_mb
self.avail_mem_cap = self.mem_cap - used_mem_mb self.avail_mem_cap = self.mem_cap - used_mem_mb
def compute_avail_disk(self, _overcommit_ratio, _standby_ratio): def compute_avail_disk(self, _overcommit_ratio, _standby_ratio):
self.local_disk_cap = self.original_local_disk_cap * _overcommit_ratio * (1.0 - _standby_ratio) """Calc avail_disk by calc local_disk_cap and subtract used disk."""
self.local_disk_cap = \
self.original_local_disk_cap * \
_overcommit_ratio * \
(1.0 - _standby_ratio)
free_disk_cap = self.free_disk_gb free_disk_cap = self.free_disk_gb
if self.disk_available_least > 0: if self.disk_available_least > 0:
@ -381,6 +454,7 @@ class Host(object):
self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap
def get_json_info(self): def get_json_info(self):
"""Return JSON info for Host object."""
membership_list = [] membership_list = []
for lgk in self.memberships.keys(): for lgk in self.memberships.keys():
membership_list.append(lgk) membership_list.append(lgk)
@ -418,23 +492,37 @@ class Host(object):
class LogicalGroup(object): class LogicalGroup(object):
"""Logical Group class.
This class contains info about grouped vms, such as metadata when placing
nodes, list of placed vms, list of placed volumes and group type.
"""
def __init__(self, _name): def __init__(self, _name):
"""Init Logical Group object."""
self.name = _name self.name = _name
self.group_type = "AGGR" # AGGR, AZ, INTG, EX, DIV, or AFF
# AGGR, AZ, INTG, EX, DIV, or AFF
self.group_type = "AGGR"
self.status = "enabled" self.status = "enabled"
self.metadata = {} # any metadata to be matched when placing nodes # any metadata to be matched when placing nodes
self.metadata = {}
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid) # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
self.volume_list = [] # a list of placed volumes self.vm_list = []
self.vms_per_host = {} # key = host_id, value = a list of placed vms # a list of placed volumes
self.volume_list = []
# key = host_id, value = a list of placed vms
self.vms_per_host = {}
self.last_update = 0 self.last_update = 0
def exist_vm_by_h_uuid(self, _h_uuid): def exist_vm_by_h_uuid(self, _h_uuid):
"""Return True if h_uuid exist in vm_list as an orchestration_uuid."""
exist = False exist = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -445,6 +533,7 @@ class LogicalGroup(object):
return exist return exist
def exist_vm_by_uuid(self, _uuid): def exist_vm_by_uuid(self, _uuid):
"""Return True if uuid exist in vm_list as physical_uuid."""
exist = False exist = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -455,6 +544,7 @@ class LogicalGroup(object):
return exist return exist
def update_uuid(self, _h_uuid, _uuid, _host_id): def update_uuid(self, _h_uuid, _uuid, _host_id):
"""Return True if _uuid and/or _host_id successfully updated."""
success = False success = False
vm_name = "none" vm_name = "none"
@ -481,6 +571,7 @@ class LogicalGroup(object):
return success return success
def update_h_uuid(self, _h_uuid, _uuid, _host_id): def update_h_uuid(self, _h_uuid, _uuid, _host_id):
"""Return True physical_uuid and/or _host_id successfully updated."""
success = False success = False
vm_name = "none" vm_name = "none"
@ -507,12 +598,14 @@ class LogicalGroup(object):
return success return success
def add_vm_by_h_uuid(self, _vm_id, _host_id): def add_vm_by_h_uuid(self, _vm_id, _host_id):
"""Return True if vm added with id _vm_id(orchestration id)."""
success = False success = False
if self.exist_vm_by_h_uuid(_vm_id[0]) is False: if self.exist_vm_by_h_uuid(_vm_id[0]) is False:
self.vm_list.append(_vm_id) self.vm_list.append(_vm_id)
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": if self.group_type == "EX" or self.group_type == "AFF" or \
self.group_type == "DIV":
if _host_id not in self.vms_per_host.keys(): if _host_id not in self.vms_per_host.keys():
self.vms_per_host[_host_id] = [] self.vms_per_host[_host_id] = []
self.vms_per_host[_host_id].append(_vm_id) self.vms_per_host[_host_id].append(_vm_id)
@ -522,6 +615,7 @@ class LogicalGroup(object):
return success return success
def remove_vm_by_h_uuid(self, _h_uuid, _host_id): def remove_vm_by_h_uuid(self, _h_uuid, _host_id):
"""Return True if vm removed with id _h_uuid(orchestration id)."""
success = False success = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -537,13 +631,16 @@ class LogicalGroup(object):
success = True success = True
break break
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": if self.group_type == "EX" or self.group_type == "AFF" or \
if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: self.group_type == "DIV":
if (_host_id in self.vms_per_host.keys()) and \
len(self.vms_per_host[_host_id]) == 0:
del self.vms_per_host[_host_id] del self.vms_per_host[_host_id]
return success return success
def remove_vm_by_uuid(self, _uuid, _host_id): def remove_vm_by_uuid(self, _uuid, _host_id):
"""Return True if vm with matching uuid found and removed."""
success = False success = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -559,13 +656,16 @@ class LogicalGroup(object):
success = True success = True
break break
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": if self.group_type == "EX" or self.group_type == "AFF" or \
if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: self.group_type == "DIV":
if (_host_id in self.vms_per_host.keys()) and \
len(self.vms_per_host[_host_id]) == 0:
del self.vms_per_host[_host_id] del self.vms_per_host[_host_id]
return success return success
def clean_none_vms(self, _host_id): def clean_none_vms(self, _host_id):
"""Return True if vm's or host vm's removed with physical id none."""
success = False success = False
for vm_id in self.vm_list: for vm_id in self.vm_list:
@ -579,13 +679,16 @@ class LogicalGroup(object):
self.vms_per_host[_host_id].remove(vm_id) self.vms_per_host[_host_id].remove(vm_id)
success = True success = True
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV": if self.group_type == "EX" or self.group_type == "AFF" or \
if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0: self.group_type == "DIV":
if (_host_id in self.vms_per_host.keys()) and \
len(self.vms_per_host[_host_id]) == 0:
del self.vms_per_host[_host_id] del self.vms_per_host[_host_id]
return success return success
def get_json_info(self): def get_json_info(self):
"""Return JSON info for Logical Group object."""
return {'status': self.status, return {'status': self.status,
'group_type': self.group_type, 'group_type': self.group_type,
'metadata': self.metadata, 'metadata': self.metadata,
@ -596,8 +699,10 @@ class LogicalGroup(object):
class Switch(object): class Switch(object):
"""Switch class."""
def __init__(self, _switch_id): def __init__(self, _switch_id):
"""Init Switch object."""
self.name = _switch_id self.name = _switch_id
self.switch_type = "ToR" # root, spine, ToR, or leaf self.switch_type = "ToR" # root, spine, ToR, or leaf
@ -610,6 +715,7 @@ class Switch(object):
self.last_update = 0 self.last_update = 0
def get_json_info(self): def get_json_info(self):
"""Return JSON info on Switch object."""
ulinks = {} ulinks = {}
for ulk, ul in self.up_links.iteritems(): for ulk, ul in self.up_links.iteritems():
ulinks[ulk] = ul.get_json_info() ulinks[ulk] = ul.get_json_info()
@ -626,8 +732,10 @@ class Switch(object):
class Link(object): class Link(object):
"""Link class."""
def __init__(self, _name): def __init__(self, _name):
"""Init Link object."""
self.name = _name # format: source + "-" + target self.name = _name # format: source + "-" + target
self.resource = None # switch beging connected to self.resource = None # switch beging connected to
@ -635,14 +743,17 @@ class Link(object):
self.avail_nw_bandwidth = 0 self.avail_nw_bandwidth = 0
def get_json_info(self): def get_json_info(self):
"""Return JSON info on Link object."""
return {'resource': self.resource.name, return {'resource': self.resource.name,
'bandwidth': self.nw_bandwidth, 'bandwidth': self.nw_bandwidth,
'avail_bandwidth': self.avail_nw_bandwidth} 'avail_bandwidth': self.avail_nw_bandwidth}
class StorageHost(object): class StorageHost(object):
"""Storage Host class."""
def __init__(self, _name): def __init__(self, _name):
"""Init Storage Host object."""
self.name = _name self.name = _name
self.storage_class = None # tiering, e.g., platinum, gold, silver self.storage_class = None # tiering, e.g., platinum, gold, silver
@ -658,6 +769,7 @@ class StorageHost(object):
self.last_cap_update = 0 self.last_cap_update = 0
def get_json_info(self): def get_json_info(self):
"""Return JSON info on Storage Host object."""
return {'status': self.status, return {'status': self.status,
'class': self.storage_class, 'class': self.storage_class,
'host_list': self.host_list, 'host_list': self.host_list,
@ -669,8 +781,10 @@ class StorageHost(object):
class Flavor(object): class Flavor(object):
"""Flavor class."""
def __init__(self, _name): def __init__(self, _name):
"""Init flavor object."""
self.name = _name self.name = _name
self.flavor_id = None self.flavor_id = None
@ -685,6 +799,7 @@ class Flavor(object):
self.last_update = 0 self.last_update = 0
def get_json_info(self): def get_json_info(self):
"""Return JSON info of Flavor Object."""
return {'status': self.status, return {'status': self.status,
'flavor_id': self.flavor_id, 'flavor_id': self.flavor_id,
'vCPUs': self.vCPUs, 'vCPUs': self.vCPUs,

View File

@ -1,36 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain # Copyright 2014-2017 AT&T Intellectual Property
# a copy of the License at #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS,
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# License for the specific language governing permissions and limitations # See the License for the specific language governing permissions and
# under the License. # limitations under the License.
"""Simulate hosts and flavors."""
################################################################################################################# from valet.engine.resource_manager.resource_base \
# Author: Gueyoung Jung import Host, LogicalGroup, Flavor
# Contact: gjung@research.att.com
# Version 2.0.2: Feb. 9, 2016
#
# Functions
# - Simulate hosts and flavors
#
#################################################################################################################
from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor
class SimCompute(object): class SimCompute(object):
"""Simulate Compute class."""
def __init__(self, _config): def __init__(self, _config):
"""Init."""
self.config = _config self.config = _config
def set_hosts(self, _hosts, _logical_groups): def set_hosts(self, _hosts, _logical_groups):
"""Return success after setting sim hosts and flavors."""
self._set_availability_zones(_hosts, _logical_groups) self._set_availability_zones(_hosts, _logical_groups)
self._set_aggregates(_hosts, _logical_groups) self._set_aggregates(_hosts, _logical_groups)
@ -47,18 +44,9 @@ class SimCompute(object):
_logical_groups[logical_group.name] = logical_group _logical_groups[logical_group.name] = logical_group
for r_num in range(0, self.config.num_of_racks): for r_num in range(0, self.config.num_of_racks):
# for test
'''
num_of_hosts = 0
if r_num == 1:
num_of_hosts = 1
else:
num_of_hosts = 2
for h_num in range(0, num_of_hosts):
'''
for h_num in range(0, self.config.num_of_hosts_per_rack): for h_num in range(0, self.config.num_of_hosts_per_rack):
host = Host(self.config.mode + "0r" + str(r_num) + "c" + str(h_num)) host = Host(self.config.mode + "0r" + str(r_num) + "c" +
str(h_num))
host.tag.append("nova") host.tag.append("nova")
host.memberships["nova"] = logical_group host.memberships["nova"] = logical_group
@ -81,9 +69,11 @@ class SimCompute(object):
aggregate = _logical_groups["aggregate" + str(a_num)] aggregate = _logical_groups["aggregate" + str(a_num)]
for r_num in range(0, self.config.num_of_racks): for r_num in range(0, self.config.num_of_racks):
for h_num in range(0, self.config.num_of_hosts_per_rack): for h_num in range(0, self.config.num_of_hosts_per_rack):
host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num) host_name = self.config.mode + "0r" + str(r_num) + "c" + \
str(h_num)
if host_name in _hosts.keys(): if host_name in _hosts.keys():
if (h_num % (self.config.aggregated_ratio + a_num)) == 0: if (h_num %
(self.config.aggregated_ratio + a_num)) == 0:
host = _hosts[host_name] host = _hosts[host_name]
host.memberships[aggregate.name] = aggregate host.memberships[aggregate.name] = aggregate
@ -94,40 +84,29 @@ class SimCompute(object):
def _set_resources(self, _hosts): def _set_resources(self, _hosts):
for r_num in range(0, self.config.num_of_racks): for r_num in range(0, self.config.num_of_racks):
# for test
'''
num_of_hosts = 0
if r_num == 1:
num_of_hosts = 1
else:
num_of_hosts = 2
for h_num in range(0, num_of_hosts):
'''
for h_num in range(0, self.config.num_of_hosts_per_rack): for h_num in range(0, self.config.num_of_hosts_per_rack):
host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num) host_name = self.config.mode + "0r" + str(r_num) + "c" + \
str(h_num)
if host_name in _hosts.keys(): if host_name in _hosts.keys():
host = _hosts[host_name] host = _hosts[host_name]
# for test
'''
if r_num == 1:
host.status = "disabled"
host.state = "down"
'''
host.original_vCPUs = float(self.config.cpus_per_host) host.original_vCPUs = float(self.config.cpus_per_host)
host.vCPUs_used = 0.0 host.vCPUs_used = 0.0
host.original_mem_cap = float(self.config.mem_per_host) host.original_mem_cap = float(self.config.mem_per_host)
host.free_mem_mb = host.original_mem_cap host.free_mem_mb = host.original_mem_cap
host.original_local_disk_cap = float(self.config.disk_per_host) host.original_local_disk_cap = \
float(self.config.disk_per_host)
host.free_disk_gb = host.original_local_disk_cap host.free_disk_gb = host.original_local_disk_cap
host.disk_available_least = host.original_local_disk_cap host.disk_available_least = host.original_local_disk_cap
def set_flavors(self, _flavors): def set_flavors(self, _flavors):
"""Return success after setting passed in flavors."""
for f_num in range(0, self.config.num_of_basic_flavors): for f_num in range(0, self.config.num_of_basic_flavors):
flavor = Flavor("bflavor" + str(f_num)) flavor = Flavor("bflavor" + str(f_num))
flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1)) flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1))
flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1)) flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1))
flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0 flavor.disk_cap = \
float(self.config.base_flavor_disk * (f_num + 1)) + \
10.0 + 20.0 / 1024.0
_flavors[flavor.name] = flavor _flavors[flavor.name] = flavor
@ -137,7 +116,6 @@ class SimCompute(object):
flavor.mem_cap = self.config.base_flavor_mem * (a_num + 1) flavor.mem_cap = self.config.base_flavor_mem * (a_num + 1)
flavor.disk_cap = self.config.base_flavor_disk * (a_num + 1) flavor.disk_cap = self.config.base_flavor_disk * (a_num + 1)
# flavor.extra_specs["availability_zone"] = "nova"
flavor.extra_specs["cpu_allocation_ratio"] = "0.5" flavor.extra_specs["cpu_allocation_ratio"] = "0.5"
_flavors[flavor.name] = flavor _flavors[flavor.name] = flavor

View File

@ -1,36 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain # Copyright 2014-2017 AT&T Intellectual Property
# a copy of the License at #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS,
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# License for the specific language governing permissions and limitations # See the License for the specific language governing permissions and
# under the License. # limitations under the License.
"""Simulate datacenter configurations (i.e., layout, cabling)."""
################################################################################################################# from valet.engine.resource_manager.resource_base \
# Author: Gueyoung Jung import HostGroup, Host, Switch, Link
# Contact: gjung@research.att.com
# Version 2.0.2: Feb. 9, 2016
#
# Functions
# - Simulate datacenter configurations (i.e., layout, cabling)
#
#################################################################################################################
from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link
class SimTopology(object): class SimTopology(object):
"""Simulate Network and Host Topology class."""
def __init__(self, _config): def __init__(self, _config):
"""Init."""
self.config = _config self.config = _config
def set_topology(self, _datacenter, _host_groups, _hosts, _switches): def set_topology(self, _datacenter, _host_groups, _hosts, _switches):
"""Return success string after setting network and host topology."""
self._set_network_topology(_switches) self._set_network_topology(_switches)
self._set_host_topology(_datacenter, _host_groups, _hosts, _switches) self._set_host_topology(_datacenter, _host_groups, _hosts, _switches)
@ -71,7 +68,8 @@ class SimTopology(object):
ps = None ps = None
if (s_num % 2) == 0: if (s_num % 2) == 0:
if (s_num + 1) < self.config.num_of_spine_switches: if (s_num + 1) < self.config.num_of_spine_switches:
ps = _switches[root_switch.name + "s" + str(s_num + 1)] ps = _switches[root_switch.name + "s" +
str(s_num + 1)]
else: else:
ps = _switches[root_switch.name + "s" + str(s_num - 1)] ps = _switches[root_switch.name + "s" + str(s_num - 1)]
if ps is not None: if ps is not None:
@ -87,7 +85,8 @@ class SimTopology(object):
parent_switch_list = [] parent_switch_list = []
if self.config.num_of_spine_switches > 0: if self.config.num_of_spine_switches > 0:
for s_num in range(0, self.config.num_of_spine_switches): for s_num in range(0, self.config.num_of_spine_switches):
parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)]) parent_switch_list.append(_switches[root_switch.name +
"s" + str(s_num)])
else: else:
parent_switch_list.append(_switches[root_switch.name]) parent_switch_list.append(_switches[root_switch.name])

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Topology class - performs actual setting up of Topology object."""
import copy import copy
import sys import sys
@ -21,18 +23,24 @@ from valet.engine.resource_manager.resource_base import HostGroup, Switch, Link
class Topology(object): class Topology(object):
"""Topology class."""
def __init__(self, _config, _logger): def __init__(self, _config, _logger):
"""Init config and logger."""
self.config = _config self.config = _config
self.logger = _logger self.logger = _logger
# Triggered by rhosts change # Triggered by rhosts change
def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts, _switches): def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts,
result_status = self._set_host_topology(_datacenter, _host_groups, _hosts, _rhosts) _switches):
"""Return result status if setting host or network topology fails."""
result_status = self._set_host_topology(_datacenter, _host_groups,
_hosts, _rhosts)
if result_status != "success": if result_status != "success":
return result_status return result_status
result_status = self._set_network_topology(_datacenter, _host_groups, _hosts, _switches) result_status = self._set_network_topology(_datacenter, _host_groups,
_hosts, _switches)
if result_status != "success": if result_status != "success":
return result_status return result_status
@ -80,7 +88,8 @@ class Topology(object):
return "success" return "success"
# NOTE: this is just muck-ups # NOTE: this is just muck-ups
def _set_network_topology(self, _datacenter, _host_groups, _hosts, _switches): def _set_network_topology(self, _datacenter, _host_groups, _hosts,
_switches):
root_switch = Switch(_datacenter.name) root_switch = Switch(_datacenter.name)
root_switch.switch_type = "root" root_switch.switch_type = "root"
@ -134,7 +143,8 @@ class Topology(object):
if index >= self.config.num_of_region_chars: if index >= self.config.num_of_region_chars:
if not isdigit(c): if not isdigit(c):
if index == self.config.num_of_region_chars: if index == self.config.num_of_region_chars:
status = "invalid region name = " + _host_name[:index] + c status = "invalid region name = " + \
_host_name[:index] + c
validated_name = False validated_name = False
break break
@ -152,7 +162,9 @@ class Topology(object):
validated_name = False validated_name = False
break break
if end_of_rack_index == 0 and index > (end_of_region_index + 1): if end_of_rack_index == 0 and \
index > (end_of_region_index + 1):
end_of_rack_index = index end_of_rack_index = index
num_of_fields += 1 num_of_fields += 1
@ -179,7 +191,8 @@ class Topology(object):
validated_name = False validated_name = False
if num_of_fields != 3: if num_of_fields != 3:
status = "invalid number of identification fields = " + str(num_of_fields) status = "invalid number of identification fields = " + \
str(num_of_fields)
validated_name = False validated_name = False
if validated_name is False: if validated_name is False:

View File

@ -13,16 +13,26 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Topology Manager.
Actions involved in setting up and managing topology. This includes setting
topology, checking updates, creating new switches( also hosts and links), as
well as updating them.
"""
import threading import threading
import time import time
from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, Switch, Link from valet.engine.resource_manager.resource_base \
import Datacenter, HostGroup, Host, Switch, Link
from valet.engine.resource_manager.topology import Topology from valet.engine.resource_manager.topology import Topology
class TopologyManager(threading.Thread): class TopologyManager(threading.Thread):
"""Topology Manager Class."""
def __init__(self, _t_id, _t_name, _resource, _data_lock, _config, _logger): def __init__(self, _t_id, _t_name, _resource, _data_lock, _config, _logger):
"""Init Topology Manager."""
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.thread_id = _t_id self.thread_id = _t_id
@ -37,7 +47,9 @@ class TopologyManager(threading.Thread):
self.logger = _logger self.logger = _logger
def run(self): def run(self):
self.logger.info("TopologyManager: start " + self.thread_name + " ......") """Function starts and tracks Topology Manager Thread."""
self.logger.info("TopologyManager: start " +
self.thread_name + " ......")
if self.config.topology_trigger_freq > 0: if self.config.topology_trigger_freq > 0:
period_end = time.time() + self.config.topology_trigger_freq period_end = time.time() + self.config.topology_trigger_freq
@ -61,7 +73,10 @@ class TopologyManager(threading.Thread):
time.sleep(70) time.sleep(70)
now = time.localtime() now = time.localtime()
if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday: if now.tm_year > last_trigger_year or \
now.tm_mon > last_trigger_mon or \
now.tm_mday > last_trigger_mday:
timeout = False timeout = False
if timeout is False and \ if timeout is False and \
@ -77,13 +92,14 @@ class TopologyManager(threading.Thread):
def _run(self): def _run(self):
self.logger.info("TopologyManager: --- start topology status update ---") self.logger.info("TopologyManager: --- start topology "
"status update ---")
self.data_lock.acquire() self.data_lock.acquire()
try: try:
if self.set_topology() is True: if self.set_topology() is True:
if self.resource.update_topology() is False: if self.resource.update_topology() is False:
# TODO: ignore? # TODO(UNKOWN): ignore?
pass pass
finally: finally:
self.data_lock.release() self.data_lock.release()
@ -91,6 +107,7 @@ class TopologyManager(threading.Thread):
self.logger.info("TopologyManager: --- done topology status update ---") self.logger.info("TopologyManager: --- done topology status update ---")
def set_topology(self): def set_topology(self):
"""Return True if datacenter topology successfully setup."""
datacenter = None datacenter = None
host_groups = {} host_groups = {}
hosts = {} hosts = {}
@ -105,7 +122,8 @@ class TopologyManager(threading.Thread):
topology = Topology(self.config, self.logger) topology = Topology(self.config, self.logger)
status = topology.set_topology(datacenter, host_groups, hosts, self.resource.hosts, switches) status = topology.set_topology(datacenter, host_groups, hosts,
self.resource.hosts, switches)
if status != "success": if status != "success":
self.logger.error("TopologyManager: " + status) self.logger.error("TopologyManager: " + status)
return False return False
@ -122,7 +140,8 @@ class TopologyManager(threading.Thread):
new_switch.last_update = time.time() new_switch.last_update = time.time()
self.logger.warn("TopologyManager: new switch (" + new_switch.name + ") added") self.logger.warn("TopologyManager: new switch (" +
new_switch.name + ") added")
for rsk in self.resource.switches.keys(): for rsk in self.resource.switches.keys():
if rsk not in _switches.keys(): if rsk not in _switches.keys():
@ -131,7 +150,8 @@ class TopologyManager(threading.Thread):
switch.last_update = time.time() switch.last_update = time.time()
self.logger.warn("TopologyManager: switch (" + switch.name + ") disabled") self.logger.warn("TopologyManager: switch (" +
switch.name + ") disabled")
for hk in _hosts.keys(): for hk in _hosts.keys():
if hk not in self.resource.hosts.keys(): if hk not in self.resource.hosts.keys():
@ -140,7 +160,8 @@ class TopologyManager(threading.Thread):
new_host.last_update = time.time() new_host.last_update = time.time()
self.logger.warn("TopologyManager: new host (" + new_host.name + ") added from configuration") self.logger.warn("TopologyManager: new host (" +
new_host.name + ") added from configuration")
for rhk in self.resource.hosts.keys(): for rhk in self.resource.hosts.keys():
if rhk not in _hosts.keys(): if rhk not in _hosts.keys():
@ -150,7 +171,8 @@ class TopologyManager(threading.Thread):
host.last_update = time.time() host.last_update = time.time()
self.logger.warn("TopologyManager: host (" + host.name + ") removed from configuration") self.logger.warn("TopologyManager: host (" +
host.name + ") removed from configuration")
for hgk in _host_groups.keys(): for hgk in _host_groups.keys():
if hgk not in self.resource.host_groups.keys(): if hgk not in self.resource.host_groups.keys():
@ -159,7 +181,8 @@ class TopologyManager(threading.Thread):
new_host_group.last_update = time.time() new_host_group.last_update = time.time()
self.logger.warn("TopologyManager: new host_group (" + new_host_group.name + ") added") self.logger.warn("TopologyManager: new host_group (" +
new_host_group.name + ") added")
for rhgk in self.resource.host_groups.keys(): for rhgk in self.resource.host_groups.keys():
if rhgk not in _host_groups.keys(): if rhgk not in _host_groups.keys():
@ -168,7 +191,8 @@ class TopologyManager(threading.Thread):
host_group.last_update = time.time() host_group.last_update = time.time()
self.logger.warn("TopologyManager: host_group (" + host_group.name + ") disabled") self.logger.warn("TopologyManager: host_group (" +
host_group.name + ") disabled")
for sk in _switches.keys(): for sk in _switches.keys():
switch = _switches[sk] switch = _switches[sk]
@ -180,7 +204,8 @@ class TopologyManager(threading.Thread):
for hk in _hosts.keys(): for hk in _hosts.keys():
host = _hosts[hk] host = _hosts[hk]
rhost = self.resource.hosts[hk] rhost = self.resource.hosts[hk]
(topology_updated, link_updated) = self._check_host_update(host, rhost) (topology_updated, link_updated) = \
self._check_host_update(host, rhost)
if topology_updated is True: if topology_updated is True:
rhost.last_update = time.time() rhost.last_update = time.time()
if link_updated is True: if link_updated is True:
@ -189,13 +214,15 @@ class TopologyManager(threading.Thread):
for hgk in _host_groups.keys(): for hgk in _host_groups.keys():
hg = _host_groups[hgk] hg = _host_groups[hgk]
rhg = self.resource.host_groups[hgk] rhg = self.resource.host_groups[hgk]
(topology_updated, link_updated) = self._check_host_group_update(hg, rhg) (topology_updated, link_updated) = \
self._check_host_group_update(hg, rhg)
if topology_updated is True: if topology_updated is True:
rhg.last_update = time.time() rhg.last_update = time.time()
if link_updated is True: if link_updated is True:
rhg.last_link_update = time.time() rhg.last_link_update = time.time()
(topology_updated, link_updated) = self._check_datacenter_update(_datacenter) (topology_updated, link_updated) = \
self._check_datacenter_update(_datacenter)
if topology_updated is True: if topology_updated is True:
self.resource.datacenter.last_update = time.time() self.resource.datacenter.last_update = time.time()
if link_updated is True: if link_updated is True:
@ -242,12 +269,14 @@ class TopologyManager(threading.Thread):
if _switch.switch_type != _rswitch.switch_type: if _switch.switch_type != _rswitch.switch_type:
_rswitch.switch_type = _switch.switch_type _rswitch.switch_type = _switch.switch_type
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (switch type)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (switch type)")
if _rswitch.status == "disabled": if _rswitch.status == "disabled":
_rswitch.status = "enabled" _rswitch.status = "enabled"
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (enabled)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (enabled)")
for ulk in _switch.up_links.keys(): for ulk in _switch.up_links.keys():
exist = False exist = False
@ -259,7 +288,8 @@ class TopologyManager(threading.Thread):
new_link = self._create_new_link(_switch.up_links[ulk]) new_link = self._create_new_link(_switch.up_links[ulk])
_rswitch.up_links[new_link.name] = new_link _rswitch.up_links[new_link.name] = new_link
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (new link)")
for rulk in _rswitch.up_links.keys(): for rulk in _rswitch.up_links.keys():
exist = False exist = False
@ -270,14 +300,16 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del _rswitch.up_links[rulk] del _rswitch.up_links[rulk]
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (link removed)")
for ulk in _rswitch.up_links.keys(): for ulk in _rswitch.up_links.keys():
link = _switch.up_links[ulk] link = _switch.up_links[ulk]
rlink = _rswitch.up_links[ulk] rlink = _rswitch.up_links[ulk]
if self._check_link_update(link, rlink) is True: if self._check_link_update(link, rlink) is True:
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (bandwidth)")
for plk in _switch.peer_links.keys(): for plk in _switch.peer_links.keys():
exist = False exist = False
@ -289,7 +321,8 @@ class TopologyManager(threading.Thread):
new_link = self._create_new_link(_switch.peer_links[plk]) new_link = self._create_new_link(_switch.peer_links[plk])
_rswitch.peer_links[new_link.name] = new_link _rswitch.peer_links[new_link.name] = new_link
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (new link)")
for rplk in _rswitch.peer_links.keys(): for rplk in _rswitch.peer_links.keys():
exist = False exist = False
@ -300,14 +333,16 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del _rswitch.peer_links[rplk] del _rswitch.peer_links[rplk]
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (link removed)")
for plk in _rswitch.peer_links.keys(): for plk in _rswitch.peer_links.keys():
link = _switch.peer_links[plk] link = _switch.peer_links[plk]
rlink = _rswitch.peer_links[plk] rlink = _rswitch.peer_links[plk]
if self._check_link_update(link, rlink) is True: if self._check_link_update(link, rlink) is True:
updated = True updated = True
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)") self.logger.warn("TopologyManager: switch (" + _rswitch.name +
") updated (bandwidth)")
return updated return updated
@ -327,15 +362,20 @@ class TopologyManager(threading.Thread):
if "infra" not in _rhost.tag: if "infra" not in _rhost.tag:
_rhost.tag.append("infra") _rhost.tag.append("infra")
updated = True updated = True
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (tag)") self.logger.warn("TopologyManager: host (" + _rhost.name +
") updated (tag)")
if _rhost.host_group is None or \
_host.host_group.name != _rhost.host_group.name:
if _rhost.host_group is None or _host.host_group.name != _rhost.host_group.name:
if _host.host_group.name in self.resource.host_groups.keys(): if _host.host_group.name in self.resource.host_groups.keys():
_rhost.host_group = self.resource.host_groups[_host.host_group.name] _rhost.host_group = \
self.resource.host_groups[_host.host_group.name]
else: else:
_rhost.host_group = self.resource.datacenter _rhost.host_group = self.resource.datacenter
updated = True updated = True
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (host_group)") self.logger.warn("TopologyManager: host (" + _rhost.name +
") updated (host_group)")
for sk in _host.switches.keys(): for sk in _host.switches.keys():
exist = False exist = False
@ -346,7 +386,8 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
_rhost.switches[sk] = self.resource.switches[sk] _rhost.switches[sk] = self.resource.switches[sk]
link_updated = True link_updated = True
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (new switch)") self.logger.warn("TopologyManager: host (" + _rhost.name +
") updated (new switch)")
for rsk in _rhost.switches.keys(): for rsk in _rhost.switches.keys():
exist = False exist = False
@ -357,7 +398,8 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del _rhost.switches[rsk] del _rhost.switches[rsk]
link_updated = True link_updated = True
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (switch removed)") self.logger.warn("TopologyManager: host (" + _rhost.name +
") updated (switch removed)")
return (updated, link_updated) return (updated, link_updated)
@ -368,20 +410,26 @@ class TopologyManager(threading.Thread):
if _hg.host_type != _rhg.host_type: if _hg.host_type != _rhg.host_type:
_rhg.host_type = _hg.host_type _rhg.host_type = _hg.host_type
updated = True updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (hosting type)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (hosting type)")
if _rhg.status == "disabled": if _rhg.status == "disabled":
_rhg.status = "enabled" _rhg.status = "enabled"
updated = True updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (enabled)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (enabled)")
if _rhg.parent_resource is None or \
_hg.parent_resource.name != _rhg.parent_resource.name:
if _rhg.parent_resource is None or _hg.parent_resource.name != _rhg.parent_resource.name:
if _hg.parent_resource.name in self.resource.host_groups.keys(): if _hg.parent_resource.name in self.resource.host_groups.keys():
_rhg.parent_resource = self.resource.host_groups[_hg.parent_resource.name] _rhg.parent_resource = \
self.resource.host_groups[_hg.parent_resource.name]
else: else:
_rhg.parent_resource = self.resource.datacenter _rhg.parent_resource = self.resource.datacenter
updated = True updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (parent host_group)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (parent host_group)")
for rk in _hg.child_resources.keys(): for rk in _hg.child_resources.keys():
exist = False exist = False
@ -395,7 +443,8 @@ class TopologyManager(threading.Thread):
elif _rhg.host_type == "cluster": elif _rhg.host_type == "cluster":
_rhg.child_resources[rk] = self.resource.host_groups[rk] _rhg.child_resources[rk] = self.resource.host_groups[rk]
updated = True updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new child host)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (new child host)")
for rrk in _rhg.child_resources.keys(): for rrk in _rhg.child_resources.keys():
exist = False exist = False
@ -406,7 +455,8 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del _rhg.child_resources[rrk] del _rhg.child_resources[rrk]
updated = True updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (child host removed)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (child host removed)")
for sk in _hg.switches.keys(): for sk in _hg.switches.keys():
exist = False exist = False
@ -417,7 +467,8 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
_rhg.switches[sk] = self.resource.switches[sk] _rhg.switches[sk] = self.resource.switches[sk]
link_updated = True link_updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new switch)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (new switch)")
for rsk in _rhg.switches.keys(): for rsk in _rhg.switches.keys():
exist = False exist = False
@ -428,7 +479,8 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del _rhg.switches[rsk] del _rhg.switches[rsk]
link_updated = True link_updated = True
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (switch removed)") self.logger.warn("TopologyManager: host_group (" + _rhg.name +
") updated (switch removed)")
return (updated, link_updated) return (updated, link_updated)
@ -440,13 +492,15 @@ class TopologyManager(threading.Thread):
if rc not in self.resource.datacenter.region_code_list: if rc not in self.resource.datacenter.region_code_list:
self.resource.datacenter.region_code_list.append(rc) self.resource.datacenter.region_code_list.append(rc)
updated = True updated = True
self.logger.warn("TopologyManager: datacenter updated (new region code, " + rc + ")") self.logger.warn("TopologyManager: datacenter updated "
"(new region code, " + rc + ")")
for rrc in self.resource.datacenter.region_code_list: for rrc in self.resource.datacenter.region_code_list:
if rrc not in _datacenter.region_code_list: if rrc not in _datacenter.region_code_list:
self.resource.datacenter.region_code_list.remove(rrc) self.resource.datacenter.region_code_list.remove(rrc)
updated = True updated = True
self.logger.warn("TopologyManager: datacenter updated (region code, " + rrc + ", removed)") self.logger.warn("TopologyManager: datacenter updated "
"(region code, " + rrc + ", removed)")
for rk in _datacenter.resources.keys(): for rk in _datacenter.resources.keys():
exist = False exist = False
@ -457,11 +511,14 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
r = _datacenter.resources[rk] r = _datacenter.resources[rk]
if isinstance(r, HostGroup): if isinstance(r, HostGroup):
self.resource.datacenter.resources[rk] = self.resource.host_groups[rk] self.resource.datacenter.resources[rk] = \
self.resource.host_groups[rk]
elif isinstance(r, Host): elif isinstance(r, Host):
self.resource.datacenter.resources[rk] = self.resource.hosts[rk] self.resource.datacenter.resources[rk] = \
self.resource.hosts[rk]
updated = True updated = True
self.logger.warn("TopologyManager: datacenter updated (new resource)") self.logger.warn("TopologyManager: datacenter updated "
"(new resource)")
for rrk in self.resource.datacenter.resources.keys(): for rrk in self.resource.datacenter.resources.keys():
exist = False exist = False
@ -472,7 +529,8 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del self.resource.datacenter.resources[rrk] del self.resource.datacenter.resources[rrk]
updated = True updated = True
self.logger.warn("TopologyManager: datacenter updated (resource removed)") self.logger.warn("TopologyManager: datacenter updated "
"(resource removed)")
for sk in _datacenter.root_switches.keys(): for sk in _datacenter.root_switches.keys():
exist = False exist = False
@ -481,9 +539,11 @@ class TopologyManager(threading.Thread):
exist = True exist = True
break break
if exist is False: if exist is False:
self.resource.datacenter.root_switches[sk] = self.resource.switches[sk] self.resource.datacenter.root_switches[sk] = \
self.resource.switches[sk]
link_updated = True link_updated = True
self.logger.warn("TopologyManager: datacenter updated (new switch)") self.logger.warn("TopologyManager: datacenter updated "
"(new switch)")
for rsk in self.resource.datacenter.root_switches.keys(): for rsk in self.resource.datacenter.root_switches.keys():
exist = False exist = False
@ -494,6 +554,7 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del self.resource.datacenter.root_switches[rsk] del self.resource.datacenter.root_switches[rsk]
link_updated = True link_updated = True
self.logger.warn("TopologyManager: datacenter updated (switch removed)") self.logger.warn("TopologyManager: datacenter updated "
"(switch removed)")
return (updated, link_updated) return (updated, link_updated)

View File

@ -13,17 +13,24 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# - Simulate datacenter configurations (i.e., layout, cabling) """Simulate datacenter configurations (i.e., layout, cabling)."""
from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link from valet.engine.resource_manager.resource_base \
import HostGroup, Host, Switch, Link
class SimTopology(object): class SimTopology(object):
"""Simulate Topology class.
Sim network and host topology for datacenters.
"""
def __init__(self, _config): def __init__(self, _config):
"""Init."""
self.config = _config self.config = _config
def set_topology(self, _datacenter, _host_groups, _hosts, _switches): def set_topology(self, _datacenter, _host_groups, _hosts, _switches):
"""Return success after setting network and host topology."""
self._set_network_topology(_switches) self._set_network_topology(_switches)
self._set_host_topology(_datacenter, _host_groups, _hosts, _switches) self._set_host_topology(_datacenter, _host_groups, _hosts, _switches)
@ -64,7 +71,8 @@ class SimTopology(object):
ps = None ps = None
if (s_num % 2) == 0: if (s_num % 2) == 0:
if (s_num + 1) < self.config.num_of_spine_switches: if (s_num + 1) < self.config.num_of_spine_switches:
ps = _switches[root_switch.name + "s" + str(s_num + 1)] ps = _switches[root_switch.name + "s" +
str(s_num + 1)]
else: else:
ps = _switches[root_switch.name + "s" + str(s_num - 1)] ps = _switches[root_switch.name + "s" + str(s_num - 1)]
if ps is not None: if ps is not None:
@ -80,7 +88,8 @@ class SimTopology(object):
parent_switch_list = [] parent_switch_list = []
if self.config.num_of_spine_switches > 0: if self.config.num_of_spine_switches > 0:
for s_num in range(0, self.config.num_of_spine_switches): for s_num in range(0, self.config.num_of_spine_switches):
parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)]) parent_switch_list.append(_switches[root_switch.name +
"s" + str(s_num)])
else: else:
parent_switch_list.append(_switches[root_switch.name]) parent_switch_list.append(_switches[root_switch.name])

View File

@ -1,5 +1,3 @@
#!/usr/bin/env python
# vi: sw=4 ts=4:
# #
# Copyright 2014-2017 AT&T Intellectual Property # Copyright 2014-2017 AT&T Intellectual Property
# #
@ -15,25 +13,25 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """HA Valet.
Mnemonic: ha_valet.py Mnemonic: ha_valet.py
Abstract: High availability script for valet processes. Abstract: High availability script for valet processes. Starts it's
starts it's configured processes, and pings for their availability. configured processes, and pings for their availability. If local
If local instances are not running, then makes the instances are not running, then makes the current instances
current instances start. If it finds multiple instances running, then start. If it finds multiple instances running, then determines
determines which instance should be shut down based on priorities. which instance should be shut down based on priorities.
Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi
------------------------------------------------------------------------------ ------------------------------------------------------------------------------
Algorithm Algorithm
----------- -----------
The ha_valet script runs on each valet node in a continuous loop checking for The ha_valet script runs on each valet node in a continuous loop checking for
heartbeats from all the valet nodes found in the "stand_by_list" conf property once heartbeats from all the valet nodes found in the "stand_by_list" conf property
every 5 secs (default). A heartbeat is obtained by invoking the "test_command" once every 5 secs (default). A heartbeat is obtained by invoking the
conf property. "test_command" conf property.
If exactly one monitored process instance is running, the script does If exactly one monitored process instance is running, the script does
nothing. If no instance is running, then the local instance is activated after nothing. If no instance is running, then the local instance is activated after
waiting for 5*priority seconds to let a higher priority valet take over waiting for 5*priority seconds to let a higher priority valet take over
@ -72,7 +70,7 @@ max_num_of_logs = 10
PRIMARY_SETUP = 1 PRIMARY_SETUP = 1
RETRY_COUNT = 3 # How many times to retry ping command RETRY_COUNT = 3 # How many times to retry ping command
CONNECT_TIMEOUT = 3 # Ping timeout CONNECT_TIMEOUT = 3 # Ping timeout
MAX_QUICK_STARTS = 10 # we stop if there are > 10 restarts in quick succession MAX_QUICK_STARTS = 10 # we stop if there are > 10 restart in quick succession
QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this
# HA Configuration # HA Configuration
@ -111,7 +109,7 @@ CONF.register_opts(havalet_opts, ostro_group)
def read_conf(): def read_conf():
"""returns dictionary of configured processes""" """Return dictionary of configured processes."""
return dict([ return dict([
('Ostro', { ('Ostro', {
NAME: 'Ostro', NAME: 'Ostro',
@ -143,7 +141,8 @@ def prepare_log(obj, name):
obj.log.setLevel(logging.DEBUG) obj.log.setLevel(logging.DEBUG)
# logging.register_options(CONF) # logging.register_options(CONF)
# logging.setup(CONF, 'valet') # logging.setup(CONF, 'valet')
handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log', maxBytes=max_log_size, handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log',
maxBytes=max_log_size,
backupCount=max_num_of_logs) backupCount=max_num_of_logs)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s') fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(fmt) handler.setFormatter(fmt)
@ -153,14 +152,16 @@ def prepare_log(obj, name):
class HaValetThread (threading.Thread): class HaValetThread (threading.Thread):
def __init__(self, data, exit_event): def __init__(self, data, exit_event):
"""Initialize HAValetThread."""
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.data = data self.data = data
self.log = None self.log = None
def run(self): def run(self):
"""Main function""" """Main function."""
prepare_log(self, self.data[NAME]) prepare_log(self, self.data[NAME])
self.log.info('HA Valet - ' + self.data[NAME] + ' Watcher Thread - starting') self.log.info('HA Valet - ' + self.data[NAME] +
' Watcher Thread - starting')
fqdn_list = [] fqdn_list = []
this_node = socket.getfqdn() this_node = socket.getfqdn()
@ -181,7 +182,8 @@ class HaValetThread (threading.Thread):
self.data[STAND_BY_LIST] = standby_list self.data[STAND_BY_LIST] = standby_list
self.log.debug("modified stand by list: " + str(standby_list)) self.log.debug("modified stand by list: " + str(standby_list))
except ValueError: except ValueError:
self.log.debug("host " + this_node + " is not in standby list: %s - continue" self.log.debug("host " + this_node +
" is not in standby list: %s - continue"
% str(standby_list)) % str(standby_list))
break break
@ -193,7 +195,7 @@ class HaValetThread (threading.Thread):
pass pass
def _main_loop(self, this_node): def _main_loop(self, this_node):
""" Main heartbeat and liveness check loop """Main heartbeat and liveness check loop.
:param this_node: host name :param this_node: host name
:type this_node: string :type this_node: string
@ -225,16 +227,19 @@ class HaValetThread (threading.Thread):
# No valet running. Wait for higher priority valet to activate. # No valet running. Wait for higher priority valet to activate.
time.sleep(HEARTBEAT_SEC * my_priority) time.sleep(HEARTBEAT_SEC * my_priority)
self.log.info('checking status here - ' + host + ', my priority: ' + str(my_priority)) self.log.info('checking status here - ' + host +
', my priority: ' + str(my_priority))
i_am_active, priority = self._is_active(eval(test_command)) i_am_active, priority = self._is_active(eval(test_command))
self.log.info(host + ': host_active = ' + str(i_am_active) + ', ' + str(priority)) self.log.info(host + ': host_active = ' + str(i_am_active) +
', ' + str(priority))
any_active = i_am_active any_active = i_am_active
self.log.info('any active = ' + str(any_active)) self.log.info('any active = ' + str(any_active))
# Check for active valets # Check for active valets
standby_list_is_empty = not standby_list standby_list_is_empty = not standby_list
if not standby_list_is_empty: if not standby_list_is_empty:
self.log.debug('main loop: standby_list is not empty ' + str(standby_list)) self.log.debug('main loop: standby_list is not empty ' +
str(standby_list))
for host_in_list in standby_list: for host_in_list in standby_list:
if host_in_list == this_node: if host_in_list == this_node:
self.log.info('host_in_list is this_node - skipping') self.log.info('host_in_list is this_node - skipping')
@ -242,39 +247,51 @@ class HaValetThread (threading.Thread):
self.log.info('checking status on - ' + host_in_list) self.log.info('checking status on - ' + host_in_list)
host = host_in_list host = host_in_list
host_active, host_priority = self._is_active(eval(test_command)) host_active, host_priority = \
self._is_active(eval(test_command))
host = self.data.get(HOST, 'localhost') host = self.data.get(HOST, 'localhost')
self.log.info(host_in_list + ' - host_active = ' + str(host_active) + ', ' + str(host_priority)) self.log.info(host_in_list + ' - host_active = ' +
str(host_active) + ', ' + str(host_priority))
# Check for split brain: 2 valets active # Check for split brain: 2 valets active
if i_am_active and host_active: if i_am_active and host_active:
self.log.info('found two live instances, checking priorities') self.log.info('found two live instances, '
'checking priorities')
should_be_active = self._should_be_active(host_priority, my_priority) should_be_active = self._should_be_active(host_priority, my_priority)
if should_be_active: if should_be_active:
self.log.info('deactivate myself, ' + host_in_list + ' already running') self.log.info('deactivate myself, ' + host_in_list +
self._deactivate_process(eval(stop_command)) # Deactivate myself ' already running')
# Deactivate myself
self._deactivate_process(eval(stop_command))
i_am_active = False i_am_active = False
else: else:
self.log.info('deactivate ' + self.data[NAME] + ' on ' + host_in_list + self.log.info('deactivate ' + self.data[NAME] +
' on ' + host_in_list +
', already running here') ', already running here')
host = host_in_list host = host_in_list
self._deactivate_process(eval(stop_command)) # Deactivate other valet # Deactivate other valet
self._deactivate_process(eval(stop_command))
host = self.data.get(HOST, 'localhost') host = self.data.get(HOST, 'localhost')
# Track that at-least one valet is active # Track that at-least one valet is active
any_active = any_active or host_active any_active = any_active or host_active
# If no active process or I'm primary, then we must try to start one # If no active process or I'm primary, then we must try to start one
if not any_active or (not i_am_active and my_priority == PRIMARY_SETUP): if not any_active or \
(not i_am_active and my_priority == PRIMARY_SETUP):
self.log.warn('there is no instance up') self.log.warn('there is no instance up')
self.log.info('Im primary instance: ' + str(my_priority is PRIMARY_SETUP)) self.log.info('Im primary instance: ' +
str(my_priority is PRIMARY_SETUP))
if priority_wait or my_priority == PRIMARY_SETUP: if priority_wait or my_priority == PRIMARY_SETUP:
now = int(time.time()) now = int(time.time())
if now - last_start < QUICK_RESTART_SEC: # quick restart (crash?) # quick restart (crash?)
if now - last_start < QUICK_RESTART_SEC:
quick_start += 1 quick_start += 1
if quick_start > MAX_QUICK_STARTS: if quick_start > MAX_QUICK_STARTS:
self.log.critical("too many restarts in quick succession.") self.log.critical("too many restarts "
"in quick succession.")
else: else:
quick_start = 0 # reset if it's been a while since last restart # reset if it's been a while since last restart
quick_start = 0
if last_start == 0: if last_start == 0:
diff = "never by this instance" diff = "never by this instance"
@ -283,12 +300,16 @@ class HaValetThread (threading.Thread):
last_start = now last_start = now
priority_wait = False priority_wait = False
if (not i_am_active and my_priority == PRIMARY_SETUP) or (standby_list is not None): if (not i_am_active and my_priority == PRIMARY_SETUP) or \
self.log.info('no running instance found, starting here; last start %s' % diff) (standby_list is not None):
self.log.info('no running instance found, '
'starting here; last start %s' % diff)
self._activate_process(start_command, my_priority) self._activate_process(start_command, my_priority)
else: else:
host = standby_list[0] # LIMITATION - supporting only 1 stand by host # LIMITATION - supporting only 1 stand by host
self.log.info('no running instances found, starting on %s; last start %s' % (host, diff)) host = standby_list[0]
self.log.info('no running instances found, starting '
'on %s; last start %s' % (host, diff))
self._activate_process(start_command, my_priority) self._activate_process(start_command, my_priority)
host = self.data.get(HOST, 'localhost') host = self.data.get(HOST, 'localhost')
else: else:
@ -298,7 +319,10 @@ class HaValetThread (threading.Thread):
# end loop # end loop
def _should_be_active(self, host_priority, my_priority): def _should_be_active(self, host_priority, my_priority):
""" Returns True if host should be active as opposed to current node, based on the hosts priorities. """Should Be Active.
Returns True if host should be active as opposed to current node,
based on the hosts priorities.
Lower value means higher Priority, Lower value means higher Priority,
0 (zero) - invalid priority (e.g. process is down) 0 (zero) - invalid priority (e.g. process is down)
@ -310,38 +334,42 @@ class HaValetThread (threading.Thread):
:return: True/False :return: True/False
:rtype: bool :rtype: bool
""" """
self.log.info('my priority is %d, remote priority is %d' % (my_priority, host_priority)) self.log.info('my priority is %d, remote priority is %d' %
(my_priority, host_priority))
return host_priority < my_priority return host_priority < my_priority
def _is_active(self, call): def _is_active(self, call):
""" Return 'True, Priority' if valet is running on host """_is_active.
Return 'True, Priority' if valet is running on host
'False, None' Otherwise. 'False, None' Otherwise.
""" """
# must use no-proxy to avoid proxy servers gumming up the works # must use no-proxy to avoid proxy servers gumming up the works
for i in xrange(RETRY_COUNT): for i in xrange(RETRY_COUNT):
try: try:
self.log.info('ping (retry %d): %s' % (i, call)) self.log.info('ping (retry %d): %s' % (i, call))
proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) proc = subprocess.Popen(call, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
priority = proc.wait() priority = proc.wait()
if priority == 255: # no route to host if priority == 255: # no route to host
priority = 0 priority = 0
out, err = proc.communicate() out, err = proc.communicate()
self.log.debug('out: ' + out + ', err: ' + err) self.log.debug('out: ' + out + ', err: ' + err)
self.log.info('ping result (should be > 0): %s' % (str(priority))) self.log.info('ping result (should be > 0): %s'
% (str(priority)))
return (priority > 0), priority return (priority > 0), priority
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
self.log.error('ping error: ' + str(subprocess.CalledProcessError)) self.log.error('ping error: ' +
str(subprocess.CalledProcessError))
continue continue
return False, None return False, None
def _deactivate_process(self, deactivate_command): def _deactivate_process(self, deactivate_command):
""" Deactivate valet on a given host. If host is omitted, local """Deactivate Process.
Deactivate valet on a given host. If host is omitted, local
valet is stopped. Returns True if successful, False on error. valet is stopped. Returns True if successful, False on error.
""" """
try: try:
# call = "'" + deactivate_command % (PROTO, host, port) + "'" # call = "'" + deactivate_command % (PROTO, host, port) + "'"
self.log.info('deactivate_command: ' + deactivate_command) self.log.info('deactivate_command: ' + deactivate_command)
@ -352,11 +380,11 @@ class HaValetThread (threading.Thread):
return False return False
def _activate_process(self, activate_command, priority): def _activate_process(self, activate_command, priority):
""" Activate valet on a given host. If host is omitted, local """Activate Process.
Activate valet on a given host. If host is omitted, local
valet is started. Returns True if successful, False on error. valet is started. Returns True if successful, False on error.
""" """
try: try:
self.log.info('activate_command: ' + activate_command) self.log.info('activate_command: ' + activate_command)
subprocess.check_call(activate_command, shell=True) subprocess.check_call(activate_command, shell=True)
@ -368,27 +396,31 @@ class HaValetThread (threading.Thread):
class HAValet(object): class HAValet(object):
""""""
def __init__(self): def __init__(self):
"""Init HAValet object."""
if not os.path.exists(LOG_DIR): if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR) os.makedirs(LOG_DIR)
self.log = None self.log = None
@DeprecationWarning @DeprecationWarning
def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE, process=''): def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE,
""" This function reads the valet config file and returns configuration process=''):
"""Parse Valet Conf v010.
This function reads the valet config file and returns configuration
attributes in key/value format attributes in key/value format
:param conf_file_name: config file name :param conf_file_name: config file name
:type conf_file_name: string :type conf_file_name: string
:param process: specific process name :param process: specific process name
when not supplied - the module launches all the processes in the configuration when not supplied - the module launches all the
processes in the configuration
:type process: string :type process: string
:return: dictionary of configured monitored processes :return: dictionary of configured monitored processes
:rtype: dict :rtype: dict
""" """
cdata = {} cdata = {}
section = '' section = ''
@ -423,14 +455,16 @@ class HAValet(object):
return cdata return cdata
def _valid_process_conf_data(self, process_data): def _valid_process_conf_data(self, process_data):
""" verify all mandatory parameters are found in the monitored process configuration only standby_list is optional """Valid Process conf data.
verify all mandatory parameters are found in the monitored process
configuration only standby_list is optional
:param process_data: specific process configuration parameters :param process_data: specific process configuration parameters
:type process_data: dict :type process_data: dict
:return: are all mandatory parameters are found :return: are all mandatory parameters are found
:rtype: bool :rtype: bool
""" """
if (process_data.get(HOST) is not None and if (process_data.get(HOST) is not None and
process_data.get(PRIORITY) is not None and process_data.get(PRIORITY) is not None and
process_data.get(ORDER) is not None and process_data.get(ORDER) is not None and
@ -442,7 +476,7 @@ class HAValet(object):
return False return False
def start(self): def start(self):
"""Start valet HA - Main function""" """Start valet HA - Main function."""
prepare_log(self, 'havalet') prepare_log(self, 'havalet')
self.log.info('ha_valet v1.1 starting') self.log.info('ha_valet v1.1 starting')
@ -460,13 +494,15 @@ class HAValet(object):
for proc in proc_sorted: for proc in proc_sorted:
if self._valid_process_conf_data(proc): if self._valid_process_conf_data(proc):
self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' + str(proc)) self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' +
str(proc))
thread = HaValetThread(proc, exit_event) thread = HaValetThread(proc, exit_event)
time.sleep(HEARTBEAT_SEC) time.sleep(HEARTBEAT_SEC)
thread.start() thread.start()
threads.append(thread) threads.append(thread)
else: else:
self.log.info(proc[NAME] + " section is missing mandatory parameter.") self.log.info(proc[NAME] +
" section is missing mandatory parameter.")
continue continue
self.log.info('on air.') self.log.info('on air.')

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Config."""
from pecan.hooks import TransactionHook from pecan.hooks import TransactionHook
from valet.api.db import models from valet.api.db import models

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Conf Test - Test config file (app, connection, session, etc)."""
from copy import deepcopy from copy import deepcopy
import os import os
from pecan import conf from pecan import conf
@ -33,12 +35,14 @@ BIND = 'mysql+pymysql://root:password@127.0.0.1'
def config_file(): def config_file():
"""Return config file."""
here = os.path.abspath(os.path.dirname(__file__)) here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'config.py') return os.path.join(here, 'config.py')
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
def app(request): def app(request):
"""Return test app based on config file."""
config = configuration.conf_from_file(config_file()).to_dict() config = configuration.conf_from_file(config_file()).to_dict()
# Add the appropriate connection string to the app config. # Add the appropriate connection string to the app config.
@ -60,8 +64,10 @@ def connection(app, request):
print("=" * 80) print("=" * 80)
print("CREATING TEMPORARY DATABASE FOR TESTS") print("CREATING TEMPORARY DATABASE FOR TESTS")
print("=" * 80) print("=" * 80)
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop', DBNAME]) subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop',
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create', DBNAME]) DBNAME])
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create',
DBNAME])
# Bind and create the database tables # Bind and create the database tables
_db.clear() _db.clear()
@ -94,7 +100,7 @@ def connection(app, request):
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
def session(connection, request): def session(connection, request):
"""Creates a new database session for a test.""" """Create new database session for a test."""
_config = configuration.conf_from_file(config_file()).to_dict() _config = configuration.conf_from_file(config_file()).to_dict()
config = deepcopy(_config) config = deepcopy(_config)
@ -137,11 +143,16 @@ def session(connection, request):
class TestApp(object): class TestApp(object):
""" A controller test starts a database transaction and creates a fake WSGI app. """ """Test App Class.
A controller test starts a database transaction
and creates a fake WSGI app.
"""
__headers__ = {} __headers__ = {}
def __init__(self, app): def __init__(self, app):
"""Init Test App."""
self.app = app self.app = app
def _do_request(self, url, method='GET', **kwargs): def _do_request(self, url, method='GET', **kwargs):
@ -156,7 +167,7 @@ class TestApp(object):
return methods.get(method, self.app.get)(str(url), **kwargs) return methods.get(method, self.app.get)(str(url), **kwargs)
def post_json(self, url, **kwargs): def post_json(self, url, **kwargs):
""" note: """Post json.
@param (string) url - The URL to emulate a POST request to @param (string) url - The URL to emulate a POST request to
@returns (paste.fixture.TestResponse) @returns (paste.fixture.TestResponse)
@ -164,7 +175,7 @@ class TestApp(object):
return self._do_request(url, 'POSTJ', **kwargs) return self._do_request(url, 'POSTJ', **kwargs)
def post(self, url, **kwargs): def post(self, url, **kwargs):
""" note: """Post.
@param (string) url - The URL to emulate a POST request to @param (string) url - The URL to emulate a POST request to
@returns (paste.fixture.TestResponse) @returns (paste.fixture.TestResponse)
@ -172,7 +183,7 @@ class TestApp(object):
return self._do_request(url, 'POST', **kwargs) return self._do_request(url, 'POST', **kwargs)
def get(self, url, **kwargs): def get(self, url, **kwargs):
""" note: """Get.
@param (string) url - The URL to emulate a GET request to @param (string) url - The URL to emulate a GET request to
@returns (paste.fixture.TestResponse) @returns (paste.fixture.TestResponse)
@ -180,7 +191,7 @@ class TestApp(object):
return self._do_request(url, 'GET', **kwargs) return self._do_request(url, 'GET', **kwargs)
def put(self, url, **kwargs): def put(self, url, **kwargs):
""" note: """Put.
@param (string) url - The URL to emulate a PUT request to @param (string) url - The URL to emulate a PUT request to
@returns (paste.fixture.TestResponse) @returns (paste.fixture.TestResponse)
@ -188,7 +199,7 @@ class TestApp(object):
return self._do_request(url, 'PUT', **kwargs) return self._do_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs): def delete(self, url, **kwargs):
""" note: """Delete.
@param (string) url - The URL to emulate a DELETE request to @param (string) url - The URL to emulate a DELETE request to
@returns (paste.fixture.TestResponse) @returns (paste.fixture.TestResponse)

View File

@ -1,19 +1,25 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain # Copyright 2014-2017 AT&T Intellectual Property
# a copy of the License at #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS,
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# License for the specific language governing permissions and limitations # See the License for the specific language governing permissions and
# under the License. # limitations under the License.
"""Init."""
from uuid import UUID from uuid import UUID
def is_valid_uuid4(uuid_string): def is_valid_uuid4(uuid_string):
""" Validate that a UUID string is in fact a valid uuid4. """Validate that a UUID string is in fact a valid uuid4.
Happily, the uuid module does the actual Happily, the uuid module does the actual
checking for us. checking for us.
@ -22,7 +28,6 @@ def is_valid_uuid4(uuid_string):
to the UUID() call, otherwise any 32-character to the UUID() call, otherwise any 32-character
hex string is considered valid. hex string is considered valid.
""" """
try: try:
val = UUID(uuid_string, version=4) val = UUID(uuid_string, version=4)
except ValueError: except ValueError:

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Plans."""
from uuid import uuid4 from uuid import uuid4
from valet.api.db.models import Plan, Placement from valet.api.db.models import Plan, Placement
@ -24,18 +26,23 @@ PLAN_NAME = 'ihaveaplan'
class TestPlansController(object): class TestPlansController(object):
"""Test Plans Controller Class."""
def test_get_index_no_plans(self, session): def test_get_index_no_plans(self, session):
"""Test getting plans where there are none, should be empty."""
result = session.app.get('/v1/plans/') result = session.app.get('/v1/plans/')
assert result.status_int == 200 assert result.status_int == 200
assert result.json == [] assert result.json == []
def test_get_index_a_plan(self, session): def test_get_index_a_plan(self, session):
"""Test get a plan using an index, should get a plan name."""
Plan(PLAN_NAME, STACK_ID) Plan(PLAN_NAME, STACK_ID)
session.commit() session.commit()
result = session.app.get('/v1/plans/').json result = session.app.get('/v1/plans/').json
assert result == [PLAN_NAME] assert result == [PLAN_NAME]
def test_single_plan_should_have_one_item(self, session): def test_single_plan_should_have_one_item(self, session):
"""Test getting a single plan with one item."""
Plan(PLAN_NAME, STACK_ID) Plan(PLAN_NAME, STACK_ID)
session.commit() session.commit()
result = session.app.get('/v1/plans/') result = session.app.get('/v1/plans/')
@ -43,6 +50,7 @@ class TestPlansController(object):
assert len(result.json) == 1 assert len(result.json) == 1
def test_list_a_few_plans(self, session): def test_list_a_few_plans(self, session):
"""Test returning a list of plans."""
for plan_number in range(20): for plan_number in range(20):
stack_id = str(uuid4()) stack_id = str(uuid4())
Plan('foo_%s' % plan_number, stack_id) Plan('foo_%s' % plan_number, stack_id)
@ -55,21 +63,26 @@ class TestPlansController(object):
class TestPlansItemController(object): class TestPlansItemController(object):
"""Test Plans Item Controller Class."""
def test_get_index_single_plan(self, session): def test_get_index_single_plan(self, session):
"""Test get index of a single plan."""
Plan(PLAN_NAME, STACK_ID) Plan(PLAN_NAME, STACK_ID)
session.commit() session.commit()
result = session.app.get('/v1/plans/%s/' % (STACK_ID)) result = session.app.get('/v1/plans/%s/' % STACK_ID)
assert result.status_int == 200 assert result.status_int == 200
def test_get_index_no_plan(self, session): def test_get_index_no_plan(self, session):
result = session.app.get('/v1/plans/%s/' % (STACK_ID), """Test getting index of no plan, should return 404."""
result = session.app.get('/v1/plans/%s/' % STACK_ID,
expect_errors=True) expect_errors=True)
assert result.status_int == 404 assert result.status_int == 404
def test_get_index_single_plan_data(self, session): def test_get_index_single_plan_data(self, session):
"""Test getting a single plan data."""
Plan(PLAN_NAME, STACK_ID) Plan(PLAN_NAME, STACK_ID)
session.commit() session.commit()
result = session.app.get('/v1/plans/%s/' % (STACK_ID)) result = session.app.get('/v1/plans/%s/' % STACK_ID)
json = result.json json = result.json
assert is_valid_uuid4(json['id']) assert is_valid_uuid4(json['id'])
assert json['name'] == PLAN_NAME assert json['name'] == PLAN_NAME
@ -77,6 +90,7 @@ class TestPlansItemController(object):
assert json['stack_id'] == STACK_ID assert json['stack_id'] == STACK_ID
def test_get_plan_refs(self, session): def test_get_plan_refs(self, session):
"""Test get plan refs by getting app json result."""
plan = Plan(PLAN_NAME, STACK_ID) plan = Plan(PLAN_NAME, STACK_ID)
Placement( Placement(
'placement_1', str(uuid4()), 'placement_1', str(uuid4()),
@ -89,7 +103,7 @@ class TestPlansItemController(object):
location='foo_2' location='foo_2'
) )
session.commit() session.commit()
result = session.app.get('/v1/plans/%s/' % (STACK_ID)) result = session.app.get('/v1/plans/%s/' % STACK_ID)
json = result.json json = result.json
assert is_valid_uuid4(json['id']) assert is_valid_uuid4(json['id'])
assert json['name'] == PLAN_NAME assert json['name'] == PLAN_NAME

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Base."""
from oslo_config import fixture as fixture_config from oslo_config import fixture as fixture_config
from oslo_log import log as logging from oslo_log import log as logging
from oslotest.base import BaseTestCase from oslotest.base import BaseTestCase
@ -26,24 +28,31 @@ class Base(BaseTestCase):
"""Test case base class for all unit tests.""" """Test case base class for all unit tests."""
def __init__(self, *args, **kwds): def __init__(self, *args, **kwds):
''' ''' """Init Base."""
super(Base, self).__init__(*args, **kwds) super(Base, self).__init__(*args, **kwds)
self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF = self.useFixture(fixture_config.Config()).conf
init.prepare(self.CONF) init.prepare(self.CONF)
def setUp(self): def setUp(self):
"""Setup."""
super(Base, self).setUp() super(Base, self).setUp()
def run_test(self, stack_name, template_path): def run_test(self, stack_name, template_path):
''' main function ''' """Main Function."""
pass pass
def validate(self, result): def validate(self, result):
"""Validate."""
# TODO(CM): Maybe fix unnecessary obfuscation of assertEqual code.
self.assertEqual(True, result.ok, result.message) self.assertEqual(True, result.ok, result.message)
def validate_test(self, result): def validate_test(self, result):
"""Validate Test."""
# TODO(CM): Maybe fix unnecessary obfuscation of assertTrue code.
self.assertTrue(result) self.assertTrue(result)
def get_name(self): def get_name(self):
"""Get Name."""
# TODO(CM): Make this function actually do something.
pass pass

View File

@ -5,7 +5,7 @@ default_log_levels="valet_validator=DEBUG,tests=DEBUG,compute=DEBUG,common=DEBUG
[auth] [auth]
OS_AUTH_URL_WITH_VERSION=http://controller:5000/v2.0 OS_AUTH_URL_WITH_VERSION=http://controller:5000/v2.0
OS_USERNAME=admin OS_USERNAME=admin
OS_PASSWORD=qwer4321 OS_PASSWORD=PASSWORD
OS_TENANT_NAME=demo OS_TENANT_NAME=demo
TOKEN_EXPIRATION=600 TOKEN_EXPIRATION=600
@ -20,7 +20,7 @@ VALUE=output_value
VERSION=1 VERSION=1
[valet] [valet]
HOST=http://192.168.10.18:8090/v1 HOST=http://127.0.0.1:8090/v1
DELAY_DURATION=30 DELAY_DURATION=30
PAUSE=10 PAUSE=10
TRIES_TO_CREATE=5 TRIES_TO_CREATE=5
@ -32,7 +32,7 @@ TEMPLATE_NAME=affinity_basic_2_instances
[test_affinity_3] [test_affinity_3]
STACK_NAME=affinity_3_stack STACK_NAME=affinity_3_stack
TEMPLATE_NAME=affinity_ 3_Instances TEMPLATE_NAME=affinity_3_Instances
[test_diversity] [test_diversity]
STACK_NAME=basic_diversity_stack STACK_NAME=basic_diversity_stack
@ -45,4 +45,3 @@ TEMPLATE_NAME=diversity_between_2_affinity
[test_exclusivity] [test_exclusivity]
STACK_NAME=basic_exclusivity_stack STACK_NAME=basic_exclusivity_stack
TEMPLATE_NAME=exclusivity_basic_2_instances TEMPLATE_NAME=exclusivity_basic_2_instances

View File

@ -1,14 +1,20 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain # Copyright 2014-2017 AT&T Intellectual Property
# a copy of the License at #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # distributed under the License is distributed on an "AS IS" BASIS,
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# License for the specific language governing permissions and limitations # See the License for the specific language governing permissions and
# under the License. # limitations under the License.
"""Init."""
from oslo_log import log as logging from oslo_log import log as logging
import time import time
from valet.tests.functional.valet_validator.common.init import CONF, COLORS from valet.tests.functional.valet_validator.common.init import CONF, COLORS
@ -17,32 +23,42 @@ LOG = logging.getLogger(__name__)
class Result(object): class Result(object):
"""Class consisting of ok (bool) and a string message."""
ok = False ok = False
message = "" message = ""
def __init__(self, ok=True, msg=""): def __init__(self, ok=True, msg=""):
"""Init a Result."""
self.ok = ok self.ok = ok
self.message = msg self.message = msg
class GeneralLogger(object): class GeneralLogger(object):
"""Class consisting of different logging functions."""
@staticmethod @staticmethod
def delay(duration=None): def delay(duration=None):
"""Delay method by performing time sleep."""
time.sleep(duration or CONF.heat.DELAY_DURATION) time.sleep(duration or CONF.heat.DELAY_DURATION)
@staticmethod @staticmethod
def log_info(msg): def log_info(msg):
"""Generic log info method."""
LOG.info("%s %s %s" % (COLORS["L_GREEN"], msg, COLORS["WHITE"])) LOG.info("%s %s %s" % (COLORS["L_GREEN"], msg, COLORS["WHITE"]))
@staticmethod @staticmethod
def log_error(msg, trc_back=""): def log_error(msg, trc_back=""):
"""Log error mthd with msg and trace back."""
LOG.error("%s %s %s" % (COLORS["L_RED"], msg, COLORS["WHITE"])) LOG.error("%s %s %s" % (COLORS["L_RED"], msg, COLORS["WHITE"]))
LOG.error("%s %s %s" % (COLORS["L_RED"], trc_back, COLORS["WHITE"])) LOG.error("%s %s %s" % (COLORS["L_RED"], trc_back, COLORS["WHITE"]))
@staticmethod @staticmethod
def log_debug(msg): def log_debug(msg):
"""Log debug method."""
LOG.debug("%s %s %s" % (COLORS["L_BLUE"], msg, COLORS["WHITE"])) LOG.debug("%s %s %s" % (COLORS["L_BLUE"], msg, COLORS["WHITE"]))
@staticmethod @staticmethod
def log_group(msg): def log_group(msg):
"""Log info method for group."""
LOG.info("%s %s %s" % (COLORS["Yellow"], msg, COLORS["WHITE"])) LOG.info("%s %s %s" % (COLORS["Yellow"], msg, COLORS["WHITE"]))

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Auth."""
from keystoneclient.auth.identity import v2 as identity from keystoneclient.auth.identity import v2 as identity
from keystoneclient import session from keystoneclient import session
from oslo_log import log as logging from oslo_log import log as logging
@ -24,14 +26,16 @@ MIN_TOKEN_LIFE_SECONDS = 120
class Auth(object): class Auth(object):
''' Singleton class for authentication token ''' """Singleton class for authentication token."""
auth = None auth = None
session = None session = None
@staticmethod @staticmethod
def _init(): def _init():
if Auth.is_auth_invalid(): if Auth.is_auth_invalid():
Auth.auth = identity.Password(auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION, Auth.auth = identity.Password(
auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION,
username=CONF.auth.OS_USERNAME, username=CONF.auth.OS_USERNAME,
password=CONF.auth.OS_PASSWORD, password=CONF.auth.OS_PASSWORD,
tenant_name=CONF.auth.OS_TENANT_NAME) tenant_name=CONF.auth.OS_TENANT_NAME)
@ -39,22 +43,29 @@ class Auth(object):
@staticmethod @staticmethod
def get_password_plugin(): def get_password_plugin():
"""Return auth after init."""
Auth._init() Auth._init()
return Auth.auth return Auth.auth
@staticmethod @staticmethod
def get_auth_token(): def get_auth_token():
"""Return auth token for session."""
return Auth.get_password_plugin().get_token(Auth.get_auth_session()) return Auth.get_password_plugin().get_token(Auth.get_auth_session())
@staticmethod @staticmethod
def get_auth_session(): def get_auth_session():
"""Return auth session."""
Auth._init() Auth._init()
return Auth.session return Auth.session
@staticmethod @staticmethod
def get_project_id(): def get_project_id():
return Auth.get_password_plugin().get_project_id(Auth.get_auth_session()) """Return auth_session based on project_id."""
return Auth.get_password_plugin().get_project_id(
Auth.get_auth_session())
@staticmethod @staticmethod
def is_auth_invalid(): def is_auth_invalid():
return Auth.auth is None or Auth.auth.get_auth_ref(Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION) """Return True/False based on status of auth."""
return Auth.auth is None or Auth.auth.get_auth_ref(
Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION)

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Init."""
import os import os
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@ -45,7 +47,8 @@ COLORS = \
opts_auth = \ opts_auth = \
[ [
cfg.StrOpt('OS_AUTH_URL_WITH_VERSION', default='http://controller:5000/v2.0'), cfg.StrOpt('OS_AUTH_URL_WITH_VERSION',
default='http://controller:5000/v2.0'),
cfg.StrOpt('OS_USERNAME', default="addddmin"), cfg.StrOpt('OS_USERNAME', default="addddmin"),
cfg.StrOpt('OS_PASSWORD', default="qwer4321"), cfg.StrOpt('OS_PASSWORD', default="qwer4321"),
cfg.StrOpt('OS_TENANT_NAME', default="demo"), cfg.StrOpt('OS_TENANT_NAME', default="demo"),
@ -87,6 +90,7 @@ _initialized = False
def prepare(CONF): def prepare(CONF):
"""Prepare config options."""
global _initialized global _initialized
try: try:
if _initialized is False: if _initialized is False:
@ -94,9 +98,12 @@ def prepare(CONF):
_initialized = True _initialized = True
# Adding config file # Adding config file
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)) possible_topdir = os.path.normpath(
os.path.join(os.path.abspath(__file__), os.pardir,
os.pardir, os.pardir))
conf_file = os.path.join(possible_topdir, 'etc', DOMAIN + '.cfg') conf_file = os.path.join(possible_topdir, 'etc', DOMAIN + '.cfg')
CONF([], project=DOMAIN, default_config_files=[conf_file] or None, validate_default_values=True) CONF([], project=DOMAIN, default_config_files=[conf_file] or None,
validate_default_values=True)
logging.setup(CONF, DOMAIN) logging.setup(CONF, DOMAIN)

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Resources."""
from oslo_log import log as logging from oslo_log import log as logging
import traceback import traceback
import yaml import yaml
@ -23,8 +25,10 @@ TEMPLATE_RES = "resources"
class TemplateResources(object): class TemplateResources(object):
''' Heat template parser ''' """Heat template parser."""
def __init__(self, template): def __init__(self, template):
"""Init Template resources."""
self.instances = [] self.instances = []
self.groups = {} self.groups = {}
self.template_data = None self.template_data = None
@ -47,7 +51,10 @@ class TemplateResources(object):
class Instance(object): class Instance(object):
"""Contains instance details from template (name, image, flavor, etc)."""
def __init__(self, doc, instance_name): def __init__(self, doc, instance_name):
"""Init Instance Object."""
self.resource_name = instance_name self.resource_name = instance_name
self.name = None self.name = None
self.image = None self.image = None
@ -57,6 +64,7 @@ class Instance(object):
self.fill(doc, instance_name) self.fill(doc, instance_name)
def fill(self, doc, instance_name): def fill(self, doc, instance_name):
"""Fill Instance details from template properties."""
try: try:
template_property = doc[TEMPLATE_RES][instance_name]["properties"] template_property = doc[TEMPLATE_RES][instance_name]["properties"]
@ -69,12 +77,17 @@ class Instance(object):
LOG.error(traceback.format_exc()) LOG.error(traceback.format_exc())
def get_ins(self): def get_ins(self):
"""Return instance data."""
return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s " return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s "
% (self.type, self.name, self.image, self.flavor, self.resource_name)) % (self.type, self.name, self.image,
self.flavor, self.resource_name))
class Group(object): class Group(object):
"""Class containing group details (type, name, resources) from template."""
def __init__(self, doc, group_name): def __init__(self, doc, group_name):
"""Init Group Object."""
self.group_type = None self.group_type = None
self.group_name = None self.group_name = None
self.level = None self.level = None
@ -83,11 +96,13 @@ class Group(object):
self.fill(doc, group_name) self.fill(doc, group_name)
def fill(self, doc, group_name): def fill(self, doc, group_name):
"""Fill group from template properties."""
try: try:
template_property = doc[TEMPLATE_RES][group_name]["properties"] template_property = doc[TEMPLATE_RES][group_name]["properties"]
self.group_type = template_property["group_type"] self.group_type = template_property["group_type"]
self.group_name = template_property["group_name"] if "group_name" in template_property else None self.group_name = template_property["group_name"] \
if "group_name" in template_property else None
self.level = template_property["level"] self.level = template_property["level"]
for res in template_property[TEMPLATE_RES]: for res in template_property[TEMPLATE_RES]:
self.group_resources.append(res["get_resource"]) self.group_resources.append(res["get_resource"])

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Analyzer."""
from novaclient import client from novaclient import client
import traceback import traceback
from valet.tests.functional.valet_validator.common import Result, GeneralLogger from valet.tests.functional.valet_validator.common import Result, GeneralLogger
@ -21,24 +23,27 @@ from valet.tests.functional.valet_validator.common.init import CONF
class Analyzer(object): class Analyzer(object):
"""Methods to perform analysis on hosts, vms, racks."""
def __init__(self): def __init__(self):
''' initializing the analyzer - connecting to nova ''' """Initializing the analyzer - connecting to nova."""
GeneralLogger.log_info("Initializing Analyzer") GeneralLogger.log_info("Initializing Analyzer")
self.nova = client.Client(CONF.nova.VERSION, session=Auth.get_auth_session()) self.nova = client.Client(CONF.nova.VERSION,
session=Auth.get_auth_session())
def get_host_name(self, instance_name): def get_host_name(self, instance_name):
''' Returning host by instance name ''' """Returning host by instance name."""
serv = self.nova.servers.find(name=instance_name) serv = self.nova.servers.find(name=instance_name)
return self.get_hostname(serv) return self.get_hostname(serv)
def get_all_hosts(self, instances_list): def get_all_hosts(self, instances_list):
''' Returning all hosts of all instances ''' """Returning all hosts of all instances."""
GeneralLogger.log_debug("Getting hosts names") GeneralLogger.log_debug("Getting hosts names")
return [self.get_host_name(instance.name) for instance in instances_list] return [self.get_host_name(instance.name)
for instance in instances_list]
def check(self, resources): def check(self, resources):
''' Checking if all instances are on the Appropriate hosts and racks ''' """Check if all instances are on the Appropriate hosts and racks."""
GeneralLogger.log_debug("Starting to check instances location") GeneralLogger.log_debug("Starting to check instances location")
result = True result = True
@ -46,39 +51,51 @@ class Analyzer(object):
for key in resources.groups: for key in resources.groups:
group = resources.groups[key] group = resources.groups[key]
resources_to_compare = self.get_resources_to_compare(resources, group.group_resources) or group.group_resources resources_to_compare = self.get_resources_to_compare(
instances_for_group = self.get_group_instances(resources, resources_to_compare) resources, group.group_resources) or group.group_resources
instances_for_group = self.get_group_instances(
resources, resources_to_compare)
hosts_list = self.get_all_hosts(instances_for_group) hosts_list = self.get_all_hosts(instances_for_group)
# switch case # switch case
result = result and \ result = result and \
{ {
"affinity": self.are_the_same(hosts_list, group.level), "affinity": self.are_the_same(hosts_list,
"diversity": self.are_different(hosts_list, group.level), group.level),
"exclusivity": self.are_we_alone(hosts_list, instances_for_group) "diversity": self.are_different(hosts_list,
group.level),
"exclusivity": self.are_we_alone(hosts_list,
instances_for_group)
}[group.group_type] }[group.group_type]
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method check: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method check: %s" % ex,
traceback.format_exc())
result = False result = False
return Result(result) return Result(result)
def get_resources_to_compare(self, resources, group_resources): def get_resources_to_compare(self, resources, group_resources):
"""Return resources to compare."""
resources_to_compare = [] resources_to_compare = []
try: try:
for group_name in group_resources: # ['test-affinity-group1', 'test-affinity-group2'] # ['test-affinity-group1', 'test-affinity-group2']
for group_name in group_resources:
if "test" in group_name: if "test" in group_name:
resources_to_compare.append(resources.groups[group_name].group_resources) resources_to_compare.append(
resources.groups[group_name].group_resources)
else: else:
return None return None
return resources_to_compare return resources_to_compare
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method get_resources_to_compare: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method "
"get_resources_to_compare: %s"
% ex, traceback.format_exc())
def are_we_alone(self, hosts_list, ins_for_group): def are_we_alone(self, hosts_list, ins_for_group):
"""Return result of whether any instances on host."""
try: try:
# instances is all the instances on this host # instances is all the instances on this host
all_instances_on_host = self.get_instances_per_host(hosts_list) all_instances_on_host = self.get_instances_per_host(hosts_list)
@ -88,10 +105,11 @@ class Analyzer(object):
return not all_instances_on_host return not all_instances_on_host
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method are_we_alone: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method are_we_alone: %s"
% ex, traceback.format_exc())
def get_instances_per_host(self, hosts_list): def get_instances_per_host(self, hosts_list):
''' get_instances_per_host ''' """Get number of instances per host."""
instances = [] instances = []
try: try:
for host in set(hosts_list): for host in set(hosts_list):
@ -100,39 +118,50 @@ class Analyzer(object):
return instances return instances
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method get_instances_per_host: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method "
"get_instances_per_host: %s"
% ex, traceback.format_exc())
def are_different(self, hosts_list, level): def are_different(self, hosts_list, level):
''' Checking if all hosts (and racks) are different for all instances ''' """Check if all hosts (and racks) are different for all instances."""
diction = {} diction = {}
try: try:
for h in hosts_list: for h in hosts_list:
if self.is_already_exists(diction, self.get_host_or_rack(level, h)): if self.is_already_exists(diction,
self.get_host_or_rack(level, h)):
return False return False
return True return True
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method "
"are_all_hosts_different: %s"
% ex, traceback.format_exc())
return False return False
def are_the_same(self, hosts_list, level): def are_the_same(self, hosts_list, level):
"""Check if all hosts (and racks) are the same for all instances."""
GeneralLogger.log_debug("Hosts are:") GeneralLogger.log_debug("Hosts are:")
try: try:
for h in hosts_list: for h in hosts_list:
if self.compare_host(self.get_host_or_rack(level, h), self.get_host_or_rack(level, hosts_list[0])) is False: if self.compare_host(
self.get_host_or_rack(level, h),
self.get_host_or_rack(level, hosts_list[0])) is False:
return False return False
return True return True
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method "
"are_all_hosts_different: %s"
% ex, traceback.format_exc())
return False return False
def get_group_instances(self, resources, group_ins): def get_group_instances(self, resources, group_ins):
''' gets the instance object according to the group_ins """Get the instance object according to the group_ins.
group_ins - the group_resources name of the instances belong to this group (['my-instance-1', 'my-instance-2']) group_ins - the group_resources name of the instances belong to
''' this group (['my-instance-1', 'my-instance-2']).
"""
ins_for_group = [] ins_for_group = []
try: try:
for instance in resources.instances: for instance in resources.instances:
@ -141,13 +170,17 @@ class Analyzer(object):
return ins_for_group return ins_for_group
except Exception as ex: except Exception as ex:
GeneralLogger.log_error("Exception at method get_group_instances: %s" % ex, traceback.format_exc()) GeneralLogger.log_error("Exception at method "
"get_group_instances: %s"
% ex, traceback.format_exc())
return None return None
def get_hostname(self, vm): def get_hostname(self, vm):
"""Get hostname of vm."""
return str(getattr(vm, CONF.nova.ATTR)) return str(getattr(vm, CONF.nova.ATTR))
def is_already_exists(self, diction, item): def is_already_exists(self, diction, item):
"""If item exists, return True, otherwise return False."""
if item in diction: if item in diction:
return True return True
@ -155,18 +188,24 @@ class Analyzer(object):
return False return False
def compare_rack(self, current_host, first_host): def compare_rack(self, current_host, first_host):
"""Return True if racks of current and first host are equal."""
GeneralLogger.log_debug(current_host) GeneralLogger.log_debug(current_host)
return self.get_rack(current_host) == self.get_rack(first_host) return self.get_rack(current_host) == self.get_rack(first_host)
def compare_host(self, current_host, first_host): def compare_host(self, current_host, first_host):
"""Compare current host to first host."""
GeneralLogger.log_debug(current_host) GeneralLogger.log_debug(current_host)
return current_host == first_host return current_host == first_host
def get_rack(self, host): def get_rack(self, host):
"""Get rack from host."""
return (host.split("r")[1])[:2] return (host.split("r")[1])[:2]
def get_host_or_rack(self, level, host): def get_host_or_rack(self, level, host):
"""Return host if current level is host, otherwise return rack."""
return host if level == "host" else self.get_rack(host) return host if level == "host" else self.get_rack(host)
def get_vms_by_hypervisor(self, host): def get_vms_by_hypervisor(self, host):
return [vm for vm in self.nova.servers.list(search_opts={"all_tenants": True}) if self.get_hostname(vm) == host] """Return vms based on hypervisor(host)."""
return [vm for vm in self.nova.servers.list(
search_opts={"all_tenants": True}) if self.get_hostname(vm) == host]

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Valet Group."""
import json import json
import requests import requests
import traceback import traceback
@ -22,61 +24,73 @@ from valet.tests.functional.valet_validator.common.init import CONF
class ValetGroup(object): class ValetGroup(object):
"""Class of helpers and basic functions for Valet Groups."""
def __init__(self): def __init__(self):
"""Init groups_url and headers for Valet Group."""
self.groups_url = "%s/groups" % CONF.valet.HOST self.groups_url = "%s/groups" % CONF.valet.HOST
self.headers = {"X-Auth-Token": Auth.get_auth_token(), self.headers = {"X-Auth-Token": Auth.get_auth_token(),
"Content-Type": "application/json"} "Content-Type": "application/json"}
def create_group(self, group_name, group_type): def create_group(self, group_name, group_type):
"""Create group given name and type."""
grp_data = {"name": group_name, "type": group_type} grp_data = {"name": group_name, "type": group_type}
return requests.post(self.groups_url, data=json.dumps(grp_data), headers=self.headers) return requests.post(self.groups_url, data=json.dumps(grp_data),
headers=self.headers)
def get_list_groups(self): def get_list_groups(self):
"""Return a list of groups."""
list_response = requests.get(self.groups_url, headers=self.headers) list_response = requests.get(self.groups_url, headers=self.headers)
return list_response.json()["groups"] return list_response.json()["groups"]
def get_group_details(self, group_id): def get_group_details(self, group_id):
"""Return Group Details based on group_id."""
url = self.groups_url + "/" + group_id url = self.groups_url + "/" + group_id
return requests.get(url, headers=self.headers) return requests.get(url, headers=self.headers)
def update_group_members(self, group_id, members=None): def update_group_members(self, group_id, members=None):
"""Update group members based on group_id."""
add_member_url = self.groups_url + "/%s/members" % group_id add_member_url = self.groups_url + "/%s/members" % group_id
data = json.dumps({"members": [members or Auth.get_project_id()]}) data = json.dumps({"members": [members or Auth.get_project_id()]})
return requests.put(add_member_url, data=data, headers=self.headers) return requests.put(add_member_url, data=data, headers=self.headers)
def update_group(self, group_id, new_description): def update_group(self, group_id, new_description):
"""Update group based on its id with a new description."""
url = self.groups_url + "/" + group_id url = self.groups_url + "/" + group_id
new_data = json.dumps({"description": new_description}) new_data = json.dumps({"description": new_description})
return requests.put(url, new_data, headers=self.headers) return requests.put(url, new_data, headers=self.headers)
def delete_group_member(self, group_id, member_id): def delete_group_member(self, group_id, member_id):
"""Delete a single group member based on its member_id."""
url = self.groups_url + "/%s/members/%s" % (group_id, member_id) url = self.groups_url + "/%s/members/%s" % (group_id, member_id)
return requests.delete(url, headers=self.headers) return requests.delete(url, headers=self.headers)
def delete_all_group_member(self, group_id): def delete_all_group_member(self, group_id):
"""Delete all members of a group based on group_id."""
url = self.groups_url + "/%s/members" % group_id url = self.groups_url + "/%s/members" % group_id
return requests.delete(url, headers=self.headers) return requests.delete(url, headers=self.headers)
def delete_group(self, group_id): def delete_group(self, group_id):
"""Delete group based on its id."""
url = self.groups_url + "/%s" % group_id url = self.groups_url + "/%s" % group_id
return requests.delete(url, headers=self.headers) return requests.delete(url, headers=self.headers)
def get_group_id_and_members(self, group_name, group_type="exclusivity"): def get_group_id_and_members(self, group_name, group_type="exclusivity"):
''' Checks if group name exists, if not - creates it """Check if group name exists, if not - creates it.
returns group's id and members list Returns group's id and members list.
''' """
group_details = self.check_group_exists(group_name) group_details = self.check_group_exists(group_name)
try: try:
if group_details is None: if group_details is None:
GeneralLogger.log_info("Creating group") GeneralLogger.log_info("Creating group")
create_response = self.create_group(group_name, group_type) create_response = self.create_group(group_name, group_type)
return create_response.json()["id"], create_response.json()["members"] return create_response.json()["id"], \
create_response.json()["members"]
else: else:
GeneralLogger.log_info("Group exists") GeneralLogger.log_info("Group exists")
@ -86,17 +100,18 @@ class ValetGroup(object):
GeneralLogger.log_error(traceback.format_exc()) GeneralLogger.log_error(traceback.format_exc())
def add_group_member(self, group_details): def add_group_member(self, group_details):
''' Checks if member exists in group, if not - adds it ''' """Check if member exists in group, if not - adds it."""
# group_details - group id, group members # group_details - group id, group members
try: try:
if Auth.get_project_id() not in group_details[1]: if Auth.get_project_id() not in group_details[1]:
GeneralLogger.log_info("Adding member to group") GeneralLogger.log_info("Adding member to group")
self.update_group_members(group_details[0]) self.update_group_members(group_details[0])
except Exception: except Exception:
GeneralLogger.log_error("Failed to add group member", traceback.format_exc()) GeneralLogger.log_error("Failed to add group member",
traceback.format_exc())
def check_group_exists(self, group_name): def check_group_exists(self, group_name):
''' Checks if group exists in group list, if not returns None ''' """Check if group exists in group list, if not returns None."""
for grp in self.get_list_groups(): for grp in self.get_list_groups():
if grp["name"] == group_name: if grp["name"] == group_name:
return grp["id"], grp["members"] return grp["id"], grp["members"]
@ -104,12 +119,14 @@ class ValetGroup(object):
return None return None
def delete_all_groups(self): def delete_all_groups(self):
DELETED = 204 """Return deleted code 204 if all groups deleted."""
deleted = 204
for group in self.get_list_groups(): for group in self.get_list_groups():
codes = [self.delete_all_group_member(group["id"]).status_code, self.delete_group(group["id"]).status_code] codes = [self.delete_all_group_member(group["id"]).status_code,
self.delete_group(group["id"]).status_code]
res = filter(lambda a: a != DELETED, codes) res = filter(lambda a: a != deleted, codes)
if res: if res:
return res[0] return res[0]
return DELETED return deleted

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Loader."""
from heatclient.client import Client from heatclient.client import Client
import sys import sys
import time import time
@ -24,9 +26,10 @@ from valet.tests.functional.valet_validator.group_api.valet_group import ValetGr
class Loader(object): class Loader(object):
"""Class is responsible for loading stacks and groups."""
def __init__(self): def __init__(self):
''' initializing the loader - connecting to heat ''' """Initializing the loader - connecting to heat."""
GeneralLogger.log_info("Initializing Loader") GeneralLogger.log_info("Initializing Loader")
heat_url = CONF.heat.HEAT_URL + str(Auth.get_project_id()) heat_url = CONF.heat.HEAT_URL + str(Auth.get_project_id())
@ -36,6 +39,7 @@ class Loader(object):
self.stacks = heat.stacks self.stacks = heat.stacks
def create_stack(self, stack_name, template_resources): def create_stack(self, stack_name, template_resources):
"""Create stack from template resources."""
GeneralLogger.log_info("Starting to create stacks") GeneralLogger.log_info("Starting to create stacks")
groups = template_resources.groups groups = template_resources.groups
@ -44,40 +48,50 @@ class Loader(object):
if groups[key].group_type == "exclusivity": if groups[key].group_type == "exclusivity":
self.create_valet_group(groups[key].group_name) self.create_valet_group(groups[key].group_name)
self.stacks.create(stack_name=stack_name, template=template_resources.template_data) self.stacks.create(stack_name=stack_name,
template=template_resources.template_data)
return self.wait(stack_name, operation="create") return self.wait(stack_name, operation="create")
except Exception: except Exception:
GeneralLogger.log_error("Failed to create stack", traceback.format_exc()) GeneralLogger.log_error("Failed to create stack",
traceback.format_exc())
sys.exit(1) sys.exit(1)
def create_valet_group(self, group_name): def create_valet_group(self, group_name):
"""Create valet group."""
try: try:
v_group = ValetGroup() v_group = ValetGroup()
group_details = v_group.get_group_id_and_members(group_name) # (group_name, group_type) # (group_name, group_type)
group_details = v_group.get_group_id_and_members(group_name)
v_group.add_group_member(group_details) v_group.add_group_member(group_details)
except Exception: except Exception:
GeneralLogger.log_error("Failed to create valet group", traceback.format_exc()) GeneralLogger.log_error("Failed to create valet group",
traceback.format_exc())
sys.exit(1) sys.exit(1)
def delete_stack(self, stack_name): def delete_stack(self, stack_name):
"""Delete stack according to stack_name."""
self.stacks.delete(stack_id=stack_name) self.stacks.delete(stack_id=stack_name)
return self.wait(stack_name, operation="delete") return self.wait(stack_name, operation="delete")
def delete_all_stacks(self): def delete_all_stacks(self):
"""Delete all stacks."""
GeneralLogger.log_info("Starting to delete stacks") GeneralLogger.log_info("Starting to delete stacks")
try: try:
for stack in self.stacks.list(): for stack in self.stacks.list():
self.delete_stack(stack.id) self.delete_stack(stack.id)
except Exception: except Exception:
GeneralLogger.log_error("Failed to delete stacks", traceback.format_exc()) GeneralLogger.log_error("Failed to delete stacks",
traceback.format_exc())
def wait(self, stack_name, count=CONF.valet.TIME_CAP, operation="Operation"): def wait(self, stack_name, count=CONF.valet.TIME_CAP,
''' Checking the result of the process (create/delete) and writing the result to log ''' operation="Operation"):
while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" and count > 0: """Check result of process (create/delete) and write result to log."""
while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" \
and count > 0:
count -= 1 count -= 1
time.sleep(1) time.sleep(1)
@ -85,7 +99,8 @@ class Loader(object):
GeneralLogger.log_info(operation + " Successfully completed") GeneralLogger.log_info(operation + " Successfully completed")
return Result() return Result()
elif str(self.stacks.get(stack_name).status) == "FAILED": elif str(self.stacks.get(stack_name).status) == "FAILED":
msg = operation + " failed - " + self.stacks.get(stack_name).stack_status_reason msg = operation + " failed - " + \
self.stacks.get(stack_name).stack_status_reason
else: else:
msg = operation + " timed out" msg = operation + " timed out"
GeneralLogger.log_error(msg) GeneralLogger.log_error(msg)

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Functional Base."""
import os import os
from oslo_log import log as logging from oslo_log import log as logging
@ -31,24 +32,32 @@ class FunctionalTestCase(Base):
"""Test case base class for all unit tests.""" """Test case base class for all unit tests."""
def __init__(self, *args, **kwds): def __init__(self, *args, **kwds):
''' initializing the FunctionalTestCase - loading the logger, loader and analyzer ''' """Init.
Initializing the FunctionalTestCase - loading the
logger, loader and analyzer.
"""
super(FunctionalTestCase, self).__init__(*args, **kwds) super(FunctionalTestCase, self).__init__(*args, **kwds)
def setUp(self): def setUp(self):
"""Start loader and analyzer."""
super(FunctionalTestCase, self).setUp() super(FunctionalTestCase, self).setUp()
self.load = Loader() self.load = Loader()
self.compute = Analyzer() self.compute = Analyzer()
LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"], self.get_name(), COLORS["WHITE"])) LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"],
self.get_name(),
COLORS["WHITE"]))
def run_test(self, stack_name, template_path): def run_test(self, stack_name, template_path):
''' scenario - """Run Test.
scenario -
deletes all stacks deletes all stacks
create new stack create new stack
checks if host (or rack) is the same for all instances checks if host (or rack) is the same for all instances
''' """
# delete all stacks # delete all stacks
self.load.delete_all_stacks() self.load.delete_all_stacks()
@ -60,17 +69,21 @@ class FunctionalTestCase(Base):
res = self.try_again(res, stack_name, my_resources) res = self.try_again(res, stack_name, my_resources)
self.validate(res) self.validate(res)
LOG.info("%s stack creation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"])) LOG.info("%s stack creation is done successfully %s"
% (COLORS["L_PURPLE"], COLORS["WHITE"]))
time.sleep(self.CONF.valet.DELAY_DURATION) time.sleep(self.CONF.valet.DELAY_DURATION)
# validation # validation
self.validate(self.compute.check(my_resources)) self.validate(self.compute.check(my_resources))
LOG.info("%s validation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"])) LOG.info("%s validation is done successfully %s"
% (COLORS["L_PURPLE"], COLORS["WHITE"]))
def try_again(self, res, stack_name, my_resources): def try_again(self, res, stack_name, my_resources):
"""Try creating stack again."""
tries = CONF.valet.TRIES_TO_CREATE tries = CONF.valet.TRIES_TO_CREATE
while "Ostro error" in res.message and tries > 0: while "Ostro error" in res.message and tries > 0:
LOG.error("Ostro error - try number %d" % (CONF.valet.TRIES_TO_CREATE - tries + 2)) LOG.error("Ostro error - try number %d"
% (CONF.valet.TRIES_TO_CREATE - tries + 2))
self.load.delete_all_stacks() self.load.delete_all_stacks()
res = self.load.create_stack(stack_name, my_resources) res = self.load.create_stack(stack_name, my_resources)
tries -= 1 tries -= 1
@ -79,9 +92,13 @@ class FunctionalTestCase(Base):
return res return res
def get_template_path(self, template_name): def get_template_path(self, template_name):
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir)) """Return template path for the template name given."""
return os.path.join(possible_topdir, 'tests/templates', template_name + '.yml') possible_topdir = os.path.normpath(os.path.join(
os.path.abspath(__file__), os.pardir, os.pardir))
return os.path.join(possible_topdir, 'tests/templates',
template_name + '.yml')
def init_template(self, test): def init_template(self, test):
"""Init template, call get path for test template."""
self.stack_name = test.STACK_NAME self.stack_name = test.STACK_NAME
self.template_path = self.get_template_path(test.TEMPLATE_NAME) self.template_path = self.get_template_path(test.TEMPLATE_NAME)

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Affinity."""
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from valet.tests.functional.valet_validator.common.init import CONF from valet.tests.functional.valet_validator.common.init import CONF
@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__)
class TestAffinity(FunctionalTestCase): class TestAffinity(FunctionalTestCase):
"""Test Affinity Functional Tests."""
def setUp(self): def setUp(self):
''' Adding configuration and logging mechanism ''' """Adding configuration and logging mechanism."""
super(TestAffinity, self).setUp() super(TestAffinity, self).setUp()
self.init_template(CONF.test_affinity) self.init_template(CONF.test_affinity)
def test_affinity(self): def test_affinity(self):
"""Test Affinity."""
self.run_test(self.stack_name, self.template_path) self.run_test(self.stack_name, self.template_path)
def get_name(self): def get_name(self):
"""Return Name."""
return __name__ return __name__

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Affinity 3 Instances."""
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from valet.tests.functional.valet_validator.common.init import CONF from valet.tests.functional.valet_validator.common.init import CONF
@ -29,14 +31,17 @@ LOG = logging.getLogger(__name__)
class TestAffinity_3(FunctionalTestCase): class TestAffinity_3(FunctionalTestCase):
"""Test Affinity 3 Functional Test."""
def setUp(self): def setUp(self):
''' Adding configuration and logging mechanism ''' """Adding configuration and logging mechanism."""
super(TestAffinity_3, self).setUp() super(TestAffinity_3, self).setUp()
self.init_template(CONF.test_affinity_3) self.init_template(CONF.test_affinity_3)
def test_affinity(self): def test_affinity(self):
"""Test Affinity."""
self.run_test(self.stack_name, self.template_path) self.run_test(self.stack_name, self.template_path)
def get_name(self): def get_name(self):
"""Return Name."""
return __name__ return __name__

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Diversity."""
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from valet.tests.functional.valet_validator.common.init import CONF from valet.tests.functional.valet_validator.common.init import CONF
@ -30,15 +32,17 @@ LOG = logging.getLogger(__name__)
class TestDiversity(FunctionalTestCase): class TestDiversity(FunctionalTestCase):
"""Test Diversity Functional Test."""
def setUp(self): def setUp(self):
''' Initiating template ''' """Initiating template."""
super(TestDiversity, self).setUp() super(TestDiversity, self).setUp()
self.init_template(CONF.test_diversity) self.init_template(CONF.test_diversity)
def test_diversity(self): def test_diversity(self):
"""Test diversity."""
self.run_test(self.stack_name, self.template_path) self.run_test(self.stack_name, self.template_path)
def get_name(self): def get_name(self):
"""Return Name."""
return __name__ return __name__

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Exclusivity."""
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from valet.tests.functional.valet_validator.common.init import CONF from valet.tests.functional.valet_validator.common.init import CONF
@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__)
class TestExclusivity(FunctionalTestCase): class TestExclusivity(FunctionalTestCase):
"""Test Exclusivity Function Test."""
def setUp(self): def setUp(self):
''' Initiating template ''' """Initiating template."""
super(TestExclusivity, self).setUp() super(TestExclusivity, self).setUp()
self.init_template(CONF.test_exclusivity) self.init_template(CONF.test_exclusivity)
def test_exclusivity(self): def test_exclusivity(self):
"""Nested run test on stack_name and template_path."""
self.run_test(self.stack_name, self.template_path) self.run_test(self.stack_name, self.template_path)
def get_name(self): def get_name(self):
"""Return name."""
return __name__ return __name__

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Groups."""
from valet.tests.functional.valet_validator.common.auth import Auth from valet.tests.functional.valet_validator.common.auth import Auth
from valet.tests.functional.valet_validator.common import GeneralLogger from valet.tests.functional.valet_validator.common import GeneralLogger
from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup
@ -20,32 +22,42 @@ from valet.tests.functional.valet_validator.tests.functional_base import Functio
class TestGroups(FunctionalTestCase): class TestGroups(FunctionalTestCase):
"""Test valet groups functional."""
def setUp(self): def setUp(self):
''' Adding configuration and logging mechanism ''' """Add configuration and logging mechanism."""
super(TestGroups, self).setUp() super(TestGroups, self).setUp()
self.groups = ValetGroup() self.groups = ValetGroup()
self.group_name = "test_group" self.group_name = "test_group"
self.group_type = "exclusivity" self.group_type = "exclusivity"
def test_groups(self): def test_groups(self):
"""Test groups using multiple methods and checking response codes."""
GeneralLogger.log_group("Delete all stacks") GeneralLogger.log_group("Delete all stacks")
self.load.delete_all_stacks() self.load.delete_all_stacks()
GeneralLogger.log_group("Delete all members and groups") GeneralLogger.log_group("Delete all members and groups")
respose_code = self.groups.delete_all_groups() respose_code = self.groups.delete_all_groups()
self.assertEqual(204, respose_code, "delete_all_groups failed with code %s" % respose_code) self.assertEqual(204, respose_code,
"delete_all_groups failed with code %s"
% respose_code)
self.assertEqual([], self.groups.get_list_groups(), "delete_all_groups failed") self.assertEqual([], self.groups.get_list_groups(),
"delete_all_groups failed")
GeneralLogger.log_group("Try to delete not existing group") GeneralLogger.log_group("Try to delete not existing group")
response = self.groups.delete_group("d68f62b1-4758-4ea5-a93a-8f9d9c0ae912") response = self.groups.delete_group(
self.assertEqual(404, response.status_code, "delete_group failed with code %s" % response.status_code) "d68f62b1-4758-4ea5-a93a-8f9d9c0ae912")
self.assertEqual(404, response.status_code,
"delete_group failed with code %s"
% response.status_code)
GeneralLogger.log_group("Create test_group") GeneralLogger.log_group("Create test_group")
group_info = self.groups.create_group(self.group_name, self.group_type) group_info = self.groups.create_group(self.group_name, self.group_type)
self.assertEqual(201, group_info.status_code, "create_group failed with code %s" % group_info.status_code) self.assertEqual(201, group_info.status_code,
"create_group failed with code %s"
% group_info.status_code)
grp_id = group_info.json()["id"] grp_id = group_info.json()["id"]
@ -53,30 +65,43 @@ class TestGroups(FunctionalTestCase):
GeneralLogger.log_group(str(self.groups.get_list_groups())) GeneralLogger.log_group(str(self.groups.get_list_groups()))
GeneralLogger.log_group("Create test member (NOT tenant ID)") GeneralLogger.log_group("Create test member (NOT tenant ID)")
member_respone = self.groups.update_group_members(grp_id, members="test_member") member_respone = self.groups.update_group_members(grp_id,
self.assertEqual(409, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) members="test_member")
self.assertEqual(409, member_respone.status_code,
"update_group_members failed with code %s"
% member_respone.status_code)
GeneralLogger.log_group("Add description to group") GeneralLogger.log_group("Add description to group")
desc_response = self.groups.update_group(grp_id, "new_description") desc_response = self.groups.update_group(grp_id, "new_description")
self.assertEqual(201, desc_response.status_code, "update_group failed with code %s" % desc_response.status_code) self.assertEqual(201, desc_response.status_code,
"update_group failed with code %s"
% desc_response.status_code)
GeneralLogger.log_group("Create member (tenant ID)") GeneralLogger.log_group("Create member (tenant ID)")
member_respone = self.groups.update_group_members(grp_id) member_respone = self.groups.update_group_members(grp_id)
self.assertEqual(201, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) self.assertEqual(201, member_respone.status_code,
"update_group_members failed with code %s"
% member_respone.status_code)
GeneralLogger.log_group("Return list of groups") GeneralLogger.log_group("Return list of groups")
GeneralLogger.log_group(self.groups.get_group_details(grp_id).json()) GeneralLogger.log_group(self.groups.get_group_details(grp_id).json())
GeneralLogger.log_group("Delete test member (NOT tenant ID)") GeneralLogger.log_group("Delete test member (NOT tenant ID)")
member_respone = self.groups.delete_group_member(grp_id, "test_member") member_respone = self.groups.delete_group_member(grp_id, "test_member")
self.assertEqual(404, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code) self.assertEqual(404, member_respone.status_code,
"delete_group_member failed with code %s"
% member_respone.status_code)
GeneralLogger.log_group("Delete member (tenant ID)") GeneralLogger.log_group("Delete member (tenant ID)")
member_respone = self.groups.delete_group_member(grp_id, Auth.get_project_id()) member_respone = self.groups.delete_group_member(grp_id,
self.assertEqual(204, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code) Auth.get_project_id())
self.assertEqual(204, member_respone.status_code,
"delete_group_member failed with code %s"
% member_respone.status_code)
GeneralLogger.log_group("Return list of groups") GeneralLogger.log_group("Return list of groups")
GeneralLogger.log_group(self.groups.get_group_details(grp_id).json()) GeneralLogger.log_group(self.groups.get_group_details(grp_id).json())
def get_name(self): def get_name(self):
"""Return name."""
return __name__ return __name__

View File

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Nested."""
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from valet.tests.functional.valet_validator.common.init import CONF from valet.tests.functional.valet_validator.common.init import CONF
@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__)
class TestNested(FunctionalTestCase): class TestNested(FunctionalTestCase):
"""Basic setup and functions for nested tests."""
def setUp(self): def setUp(self):
''' Adding configuration and logging mechanism ''' """Adding configuration and logging mechanism."""
super(TestNested, self).setUp() super(TestNested, self).setUp()
self.init_template(CONF.test_nested) self.init_template(CONF.test_nested)
def test_nested(self): def test_nested(self):
"""Call run_test on stack and give it the path to the template."""
self.run_test(self.stack_name, self.template_path) self.run_test(self.stack_name, self.template_path)
def get_name(self): def get_name(self):
"""Return name."""
return __name__ return __name__

Some files were not shown because too many files have changed in this diff Show More