Fix pep8 and docstring violations
Fix issues in py files in the directories valet/valet/api/common valet/api valet/cli valet/engine/optimizer/ valet/tests/ valet/tests/tempest valet_plugins
This commit is contained in:
parent
5b9c5cf5b8
commit
ad2b7fab31
@ -18,9 +18,7 @@ Valet responds to the challenges outlined above by enhancing OpenStack Nova sche
|
||||
* [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet
|
||||
* [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet
|
||||
* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine
|
||||
* Music: a data storage and persistence service
|
||||
* [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music
|
||||
* [havalet](https://github.com/att-comdev/valet/blob/master/doc/ha.md): a service that assists in providing high availability for Valet
|
||||
|
||||
## Additional documents:
|
||||
|
||||
|
11
doc/.idea/doc.iml
Normal file
11
doc/.idea/doc.iml
Normal file
@ -0,0 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
<component name="TestRunnerService">
|
||||
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
|
||||
</component>
|
||||
</module>
|
4
doc/.idea/misc.xml
Normal file
4
doc/.idea/misc.xml
Normal file
@ -0,0 +1,4 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6.0 (C:\Users\nv757p\vEnv-36\Scripts\python.exe)" project-jdk-type="Python SDK" />
|
||||
</project>
|
8
doc/.idea/modules.xml
Normal file
8
doc/.idea/modules.xml
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/doc.iml" filepath="$PROJECT_DIR$/.idea/doc.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
250
doc/.idea/workspace.xml
Normal file
250
doc/.idea/workspace.xml
Normal file
@ -0,0 +1,250 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ChangeListManager">
|
||||
<list default="true" id="c284aaf5-9cd1-41bb-acfb-a73c8e5a3cec" name="Default" comment="" />
|
||||
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
|
||||
<option name="TRACKING_ENABLED" value="true" />
|
||||
<option name="SHOW_DIALOG" value="false" />
|
||||
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
||||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
||||
<option name="LAST_RESOLUTION" value="IGNORE" />
|
||||
</component>
|
||||
<component name="CreatePatchCommitExecutor">
|
||||
<option name="PATCH_PATH" value="" />
|
||||
</component>
|
||||
<component name="ExecutionTargetManager" SELECTED_TARGET="default_target" />
|
||||
<component name="FileEditorManager">
|
||||
<leaf />
|
||||
</component>
|
||||
<component name="ProjectFrameBounds">
|
||||
<option name="x" value="722" />
|
||||
<option name="y" value="6" />
|
||||
<option name="width" value="1191" />
|
||||
<option name="height" value="1030" />
|
||||
</component>
|
||||
<component name="ProjectView">
|
||||
<navigator currentView="ProjectPane" proportions="" version="1">
|
||||
<flattenPackages />
|
||||
<showMembers />
|
||||
<showModules />
|
||||
<showLibraryContents />
|
||||
<hideEmptyPackages />
|
||||
<abbreviatePackageNames />
|
||||
<autoscrollToSource />
|
||||
<autoscrollFromSource />
|
||||
<sortByType />
|
||||
<manualOrder />
|
||||
<foldersAlwaysOnTop value="true" />
|
||||
</navigator>
|
||||
<panes>
|
||||
<pane id="ProjectPane">
|
||||
<subPane>
|
||||
<PATH>
|
||||
<PATH_ELEMENT>
|
||||
<option name="myItemId" value="doc" />
|
||||
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.ProjectViewProjectNode" />
|
||||
</PATH_ELEMENT>
|
||||
<PATH_ELEMENT>
|
||||
<option name="myItemId" value="doc" />
|
||||
<option name="myItemType" value="com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode" />
|
||||
</PATH_ELEMENT>
|
||||
</PATH>
|
||||
</subPane>
|
||||
</pane>
|
||||
<pane id="Scope" />
|
||||
<pane id="Scratches" />
|
||||
</panes>
|
||||
</component>
|
||||
<component name="PropertiesComponent">
|
||||
<property name="last_opened_file_path" value="$PROJECT_DIR$/../../hello_devstack" />
|
||||
</component>
|
||||
<component name="RunManager">
|
||||
<configuration default="true" type="PythonConfigurationType" factoryName="Python">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs>
|
||||
<env name="PYTHONUNBUFFERED" value="1" />
|
||||
</envs>
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<option name="SCRIPT_NAME" value="" />
|
||||
<option name="PARAMETERS" value="" />
|
||||
<option name="SHOW_COMMAND_LINE" value="false" />
|
||||
<method />
|
||||
</configuration>
|
||||
<configuration default="true" type="Tox" factoryName="Tox">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs />
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<method />
|
||||
</configuration>
|
||||
<configuration default="true" type="tests" factoryName="Attests">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs />
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<option name="SCRIPT_NAME" value="" />
|
||||
<option name="CLASS_NAME" value="" />
|
||||
<option name="METHOD_NAME" value="" />
|
||||
<option name="FOLDER_NAME" value="" />
|
||||
<option name="TEST_TYPE" value="TEST_SCRIPT" />
|
||||
<option name="PATTERN" value="" />
|
||||
<option name="USE_PATTERN" value="false" />
|
||||
<method />
|
||||
</configuration>
|
||||
<configuration default="true" type="tests" factoryName="Doctests">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs />
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<option name="SCRIPT_NAME" value="" />
|
||||
<option name="CLASS_NAME" value="" />
|
||||
<option name="METHOD_NAME" value="" />
|
||||
<option name="FOLDER_NAME" value="" />
|
||||
<option name="TEST_TYPE" value="TEST_SCRIPT" />
|
||||
<option name="PATTERN" value="" />
|
||||
<option name="USE_PATTERN" value="false" />
|
||||
<method />
|
||||
</configuration>
|
||||
<configuration default="true" type="tests" factoryName="Nosetests">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs />
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<option name="SCRIPT_NAME" value="" />
|
||||
<option name="CLASS_NAME" value="" />
|
||||
<option name="METHOD_NAME" value="" />
|
||||
<option name="FOLDER_NAME" value="" />
|
||||
<option name="TEST_TYPE" value="TEST_SCRIPT" />
|
||||
<option name="PATTERN" value="" />
|
||||
<option name="USE_PATTERN" value="false" />
|
||||
<option name="PARAMS" value="" />
|
||||
<option name="USE_PARAM" value="false" />
|
||||
<method />
|
||||
</configuration>
|
||||
<configuration default="true" type="tests" factoryName="Unittests">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs />
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<option name="SCRIPT_NAME" value="" />
|
||||
<option name="CLASS_NAME" value="" />
|
||||
<option name="METHOD_NAME" value="" />
|
||||
<option name="FOLDER_NAME" value="" />
|
||||
<option name="TEST_TYPE" value="TEST_SCRIPT" />
|
||||
<option name="PATTERN" value="" />
|
||||
<option name="USE_PATTERN" value="false" />
|
||||
<option name="PUREUNITTEST" value="true" />
|
||||
<option name="PARAMS" value="" />
|
||||
<option name="USE_PARAM" value="false" />
|
||||
<method />
|
||||
</configuration>
|
||||
<configuration default="true" type="tests" factoryName="py.test">
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs />
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="" />
|
||||
<option name="IS_MODULE_SDK" value="false" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<module name="doc" />
|
||||
<option name="SCRIPT_NAME" value="" />
|
||||
<option name="CLASS_NAME" value="" />
|
||||
<option name="METHOD_NAME" value="" />
|
||||
<option name="FOLDER_NAME" value="" />
|
||||
<option name="TEST_TYPE" value="TEST_SCRIPT" />
|
||||
<option name="PATTERN" value="" />
|
||||
<option name="USE_PATTERN" value="false" />
|
||||
<option name="testToRun" value="" />
|
||||
<option name="keywords" value="" />
|
||||
<option name="params" value="" />
|
||||
<option name="USE_PARAM" value="false" />
|
||||
<option name="USE_KEYWORD" value="false" />
|
||||
<method />
|
||||
</configuration>
|
||||
</component>
|
||||
<component name="ShelveChangesManager" show_recycled="false">
|
||||
<option name="remove_strategy" value="false" />
|
||||
</component>
|
||||
<component name="TaskManager">
|
||||
<task active="true" id="Default" summary="Default task">
|
||||
<changelist id="c284aaf5-9cd1-41bb-acfb-a73c8e5a3cec" name="Default" comment="" />
|
||||
<created>1485886196137</created>
|
||||
<option name="number" value="Default" />
|
||||
<option name="presentableId" value="Default" />
|
||||
<updated>1485886196137</updated>
|
||||
</task>
|
||||
<servers />
|
||||
</component>
|
||||
<component name="ToolWindowManager">
|
||||
<frame x="722" y="6" width="1191" height="1030" extended-state="0" />
|
||||
<editor active="false" />
|
||||
<layout>
|
||||
<window_info id="Project" active="true" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="true" show_stripe_button="true" weight="0.24936171" sideWeight="0.5" order="0" side_tool="false" content_ui="combo" />
|
||||
<window_info id="TODO" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="6" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Event Log" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="true" content_ui="tabs" />
|
||||
<window_info id="Version Control" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="false" weight="0.33" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Python Console" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Structure" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Terminal" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Favorites" active="false" anchor="left" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="-1" side_tool="true" content_ui="tabs" />
|
||||
<window_info id="Cvs" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="4" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Hierarchy" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="2" side_tool="false" content_ui="combo" />
|
||||
<window_info id="Message" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Commander" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="0" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Find" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Inspection" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="5" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Run" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.33" sideWeight="0.5" order="2" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Ant Build" active="false" anchor="right" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.25" sideWeight="0.5" order="1" side_tool="false" content_ui="tabs" />
|
||||
<window_info id="Debug" active="false" anchor="bottom" auto_hide="false" internal_type="DOCKED" type="DOCKED" visible="false" show_stripe_button="true" weight="0.4" sideWeight="0.5" order="3" side_tool="false" content_ui="tabs" />
|
||||
</layout>
|
||||
</component>
|
||||
<component name="VcsContentAnnotationSettings">
|
||||
<option name="myLimit" value="2678400000" />
|
||||
</component>
|
||||
<component name="XDebuggerManager">
|
||||
<breakpoint-manager />
|
||||
<watches-manager />
|
||||
</component>
|
||||
<component name="editorHistoryManager">
|
||||
<entry file="file://$PROJECT_DIR$/ostro_listener1.rst">
|
||||
<provider selected="true" editor-type-id="text-editor">
|
||||
<state relative-caret-position="0">
|
||||
<caret line="0" column="0" lean-forward="false" selection-start-line="0" selection-start-column="0" selection-end-line="0" selection-end-column="0" />
|
||||
<folding />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
</component>
|
||||
</project>
|
176
doc/LICENSE
176
doc/LICENSE
@ -1,176 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
111
doc/ha.md
111
doc/ha.md
@ -1,111 +0,0 @@
|
||||
High Availability Valet Tools
|
||||
=============================
|
||||
|
||||
This tool monitors one or more configured processes to maintain high
|
||||
availability.
|
||||
|
||||
~~~~ {.bash}
|
||||
$ python ./ha_valet.py [-p name]
|
||||
~~~~
|
||||
|
||||
ha\_valet.cfg
|
||||
-------------
|
||||
|
||||
The ha\_valet configuration file contains a list of dictionaries. List
|
||||
keys are logical process names. List values are dictionaries
|
||||
representing a monitored Valet-related process.
|
||||
|
||||
Each dictionary **must** contain the following properties:
|
||||
|
||||
host
|
||||
user
|
||||
port
|
||||
protocol
|
||||
start_command
|
||||
stop_command
|
||||
test_command
|
||||
|
||||
Optional properties include:
|
||||
|
||||
order
|
||||
priority
|
||||
standy_by_list
|
||||
|
||||
### Notes
|
||||
|
||||
- The return value of `test_command` **must not** be 0 and should
|
||||
reflect the monitored process priority (see next section).
|
||||
|
||||
- `stand_by_list` is an optional comma-delimited list of hosts used in
|
||||
conjunction with active/stand-by scenarios. ha\_valet will attempt
|
||||
to restart the instance with the lower priority. If that instance
|
||||
fails to start, ha\_valet will try restarting the process of the
|
||||
next host in the list.
|
||||
|
||||
- `priority` is used to establish the primary/secondary hierarchy. It
|
||||
**must** be greater than 0. The lower the number, the higher the
|
||||
priority.
|
||||
|
||||
### Monitored Process Priority
|
||||
|
||||
Monitored process priority is used in conjunction with active/stand-by
|
||||
scenarios. Unless a process is down, its priority **must** be greater
|
||||
than 0. The lower the number, the higher the priority.
|
||||
|
||||
For example, an instance returning `1` (in response to `test_command`)
|
||||
will take precedence over an instance returning `2`. A priority of 0
|
||||
means the process is down.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
### Host A
|
||||
|
||||
:Ostro
|
||||
host = Host_A
|
||||
stand_by_list = Host_A,Host_B
|
||||
user = stack
|
||||
port = 8091
|
||||
protocol = http
|
||||
priority = 1
|
||||
start_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py start'" % (user, host)
|
||||
stop_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py stop'" % (user, host)
|
||||
test_command="ssh %s@%s 'exit $(@OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py status ; echo $?)'" % (user, host)
|
||||
|
||||
:Allegro
|
||||
host = Host_A
|
||||
user = stack
|
||||
port = 8090
|
||||
protocol = http
|
||||
priority = 1
|
||||
start_command="sudo python @ALLEGRO_WSGI_DIR@/wsgi.py &"
|
||||
stop_command="sudo pkill -f wsgi"
|
||||
test_command="netstat -nap | grep %s | grep LISTEN | wc -l | exit $(awk \'{print $1}\')" % (port)
|
||||
|
||||
### Host B (172.20.90.130)
|
||||
|
||||
:Ostro
|
||||
host = Host_B
|
||||
stand_by_list = Host_A,Host_B
|
||||
user = stack
|
||||
port = 8091
|
||||
protocol = http
|
||||
priority = 2
|
||||
start_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py start'" % (user, host)
|
||||
stop_command="ssh %s@%s 'cd @OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py stop'" % (user, host)
|
||||
test_command="ssh %s@%s 'exit $(@OSTRO_SERVER_DIR@ ; sudo python ./ostro_daemon.py status ; echo $?)'" % (user, host)
|
||||
|
||||
:Allegro
|
||||
host = Host_B
|
||||
user = stack
|
||||
port = 8090
|
||||
protocol = http
|
||||
priority = 1
|
||||
start_command="sudo python @ALLEGRO_WSGI_DIR@/wsgi.py &"
|
||||
stop_command="sudo pkill -f wsgi"
|
||||
test_command="netstat -nap | grep %s | grep LISTEN | wc -l | exit $(awk \'{print $1}\')" % (port)
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
Joe D'Andrea <jdandrea@research.att.com>
|
@ -23,18 +23,19 @@ Throughout this document, the following installation-specific items are
|
||||
required. Have values for these prepared and ready before continuing.
|
||||
Suggestions for values are provided in this document where applicable.
|
||||
|
||||
Name Description Example
|
||||
----------------------------- --------------------------------------------------- -------------------------------------------
|
||||
`$USER` User id `user1234`
|
||||
`$VENV` Python virtual environment path (if any) `/etc/ostro-listener/venv`
|
||||
`$OSTRO_LISTENER_PATH` Local git repository's `ostro_listener` directory `/home/user1234/git/allegro/ostro_listener`
|
||||
`$CONFIG_FILE` Event Listener configuration file `/etc/ostro-listener/ostro-listener.conf`
|
||||
`$RABBITMQ_HOST` RabbitMQ hostname or IP address `localhost`
|
||||
`$RABBITMQ_USERNAME` RabbitMQ username `guest`
|
||||
`$RABBITMQ_PASSWORD_FILE` Full path to RabbitMQ password file `/etc/ostro-listener/passwd`
|
||||
`$MUSIC_URL` Music API endpoints and port in URL format `http://127.0.0.1:8080/`
|
||||
`$MUSIC_KEYSPACE` Music keyspace `valet`
|
||||
`$MUSIC_REPLICATION_FACTOR` Music replication factor `1`
|
||||
| Name | Description | Example |
|
||||
|------|-------------|---------|
|
||||
| `$USER` | User id | `user1234` |
|
||||
| `$VENV` | Python virtual environment path (if any) | `/etc/ostro-listener/venv` |
|
||||
| `$OSTRO_LISTENER_PATH` | Local git repository's `ostro_listener` directory | `/home/user1234/git/allegro/ostro_listener` |
|
||||
| `$CONFIG_FILE` | Event Listener configuration file | `/etc/ostro-listener/ostro-listener.conf` |
|
||||
| `$RABBITMQ_HOST` | RabbitMQ hostname or IP address | `localhost` |
|
||||
| `$RABBITMQ_USERNAME` | RabbitMQ username | `guest` |
|
||||
| `$RABBITMQ_PASSWORD_FILE` | Full path to RabbitMQ password file | `/etc/ostro-listener/passwd` |
|
||||
| `$MUSIC_URL` | Music API endpoints and port in URL format | `http://127.0.0.1:8080/` |
|
||||
| `$MUSIC_KEYSPACE` | Music keyspace | `valet` |
|
||||
| `$MUSIC_REPLICATION_FACTOR` | Music replication factor | `1` |
|
||||
|
||||
|
||||
Root or sufficient sudo privileges are required for some steps.
|
||||
|
||||
@ -250,8 +251,3 @@ $ sudo pip uninstall ostro-listener
|
||||
|
||||
Remove previously made configuration file changes, files, and other
|
||||
settings as needed.
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
Joe D'Andrea <jdandrea@research.att.com>
|
||||
|
@ -28,15 +28,6 @@ Valet1.0/Ostro features
|
||||
load spikes of tenant applications. Later, we will deploy more
|
||||
dynamic mechanism in the future version of Ostro.
|
||||
|
||||
- High availability Ostro replicas run as active-passive way. When
|
||||
active Ostro fails, automatically the passive one is activated via
|
||||
HAValet. All data is updated in MUSIC database at runtime whenever
|
||||
it is changed. When the passive Ostro is activated, it gets data
|
||||
from MUSIC to initialize its status rather than from OpenStack.
|
||||
Ostro also takes ping messages to show if it is alive or not.
|
||||
|
||||
- Runtime update via the Oslo message bus or RO Working on this.
|
||||
|
||||
- Migration tip Working on this.
|
||||
|
||||
|
||||
|
34
doc/valet.md
34
doc/valet.md
@ -1,34 +0,0 @@
|
||||
# Valet
|
||||
|
||||
Valet gives OpenStack the ability to optimize cloud resources while simultaneously meeting a cloud application's QoS requirements. Valet provides an api service, a placement optimizer (Ostro), a high availability data storage and persistence layer (Music), and a set of OpenStack plugins.
|
||||
|
||||
**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md).
|
||||
|
||||
Learn more about Valet:
|
||||
|
||||
* [OpenStack Newton Summit Presentation](https://www.openstack.org/videos/video/valet-holistic-data-center-optimization-for-openstack) (Austin, TX, 27 April 2016)
|
||||
* [Presentation Slides](http://www.research.att.com/export/sites/att_labs/techdocs/TD_101806.pdf) (PDF)
|
||||
|
||||
Valet consists of the following components:
|
||||
|
||||
* [valet-openstack](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md): a set of OpenStack plugins used to interact with Valet
|
||||
* [valet-api](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): an API engine used to interact with Valet
|
||||
* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md): a placement optimization engine
|
||||
* Music: a data storage and persistence service
|
||||
* [ostro-listener](https://github.com/att-comdev/valet/blob/master/doc/ostro_listener.md): a message bus listener used in conjunction with Ostro and Music
|
||||
* [havalet](https://github.com/att-comdev/valet/blob/master/doc/ha.md): a service that assists in providing high availability for Valet
|
||||
|
||||
Additional documents:
|
||||
|
||||
* [OpenStack Heat Resource Plugins](https://github.com/att-comdev/valet/blob/master/valet_plugins/valet_plugins/heat/README.md): Heat resources
|
||||
* [Placement API](https://github.com/att-comdev/valet/blob/master/doc/valet_api.md): API requests/responses
|
||||
* [Using Postman with valet-api](https://github.com/att-comdev/valet/blob/master/valet/tests/api/README.md): Postman support
|
||||
|
||||
## Thank You
|
||||
|
||||
Alicia Abella, Saar Alaluf, Bharath Balasubramanian, Roy Ben Hai, Shimon Benattar, Yael Ben Shalom, Benny Bustan, Rachel Cohen, Joe D'Andrea, Harel Dorfman, Boaz Elitzur, P.K. Esrawan, Inbal Harduf, Matti Hiltunen, Doron Honigsberg, Kaustubh Joshi, Gueyoung Jung, Gerald Karam, David Khanin, Israel Kliger, Erez Korn, Max Osipov, Chris Rice, Amnon Sagiv, Gideon Shafran, Galit Shemesh, Anna Yefimov; AT&T Advanced Technology and Architecture, AT&T Technology Development - AIC, Additional partners in AT&T Domain 2.0. Apologies if we missed anyone (please advise via email!).
|
||||
|
||||
## Contact
|
||||
|
||||
Joe D'Andrea <jdandrea@research.att.com>
|
||||
|
@ -4,7 +4,7 @@ Valet gives OpenStack the ability to optimize cloud resources while simultaneous
|
||||
|
||||
This document covers installation of valet-api, the API engine used to interact with Valet.
|
||||
|
||||
**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md). These instructions are to be used by the Bedminster and Tel Aviv development teams.
|
||||
**IMPORTANT**: [Overall Installation of Valet is covered in a separate document](https://github.com/att-comdev/valet/blob/master/doc/valet_os.md).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@ -13,7 +13,7 @@ Prior to installation:
|
||||
* Ubuntu 14.04 LTS
|
||||
* Python 2.7.6 with pip
|
||||
* An OpenStack Kilo cloud
|
||||
* Music 6.0
|
||||
* [Music](https://github.com/att-comdev/valet) 6.0
|
||||
* [Ostro](https://github.com/att-comdev/valet/blob/master/doc/ostro.md) 2.0
|
||||
|
||||
Throughout this document, the following installation-specific items are required. Have values for these prepared and ready before continuing. Suggestions for values are provided in this document where applicable.
|
||||
@ -249,7 +249,3 @@ $ sudo pip uninstall valet-api
|
||||
```
|
||||
|
||||
Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed.
|
||||
|
||||
## Contact
|
||||
|
||||
Joe D'Andrea <jdandrea@research.att.com>
|
||||
|
@ -173,7 +173,3 @@ $ sudo pip uninstall valet-openstack
|
||||
```
|
||||
|
||||
Remove previously made configuration file changes, OpenStack user accounts, and other settings as needed.
|
||||
|
||||
## Contact
|
||||
|
||||
Joe D'Andrea <jdandrea@research.att.com>
|
||||
|
@ -1,12 +1,12 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -94,8 +94,9 @@ ostro = {
|
||||
|
||||
messaging = {
|
||||
'config': {
|
||||
'transport_url': 'rabbit://' + CONF.messaging.username + ':' + CONF.messaging.password +
|
||||
'@' + CONF.messaging.host + ':' + str(CONF.messaging.port) + '/'
|
||||
'transport_url': 'rabbit://' + CONF.messaging.username + ':' +
|
||||
CONF.messaging.password + '@' + CONF.messaging.host + ':' +
|
||||
str(CONF.messaging.port) + '/'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,33 +1,39 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Notification Listener."""
|
||||
|
||||
import json
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging
|
||||
|
||||
|
||||
class NotificationEndpoint(object):
|
||||
"""Endponit for a notifcation (info, warn, error)."""
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
"""Print notifaction was received and dumb json data to print."""
|
||||
print('recv notification:')
|
||||
print(json.dumps(payload, indent=4))
|
||||
|
||||
def warn(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
"""Warn."""
|
||||
None
|
||||
|
||||
def error(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
"""Error."""
|
||||
None
|
||||
|
||||
transport = oslo_messaging.get_transport(cfg.CONF)
|
||||
|
8
setup.py
8
setup.py
@ -1,19 +1,19 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Setup'''
|
||||
""" Setup """
|
||||
|
||||
import setuptools
|
||||
|
||||
|
@ -26,4 +26,4 @@ python-heatclient<=1.2.0
|
||||
|
||||
oslo.messaging==1.8.3
|
||||
#tempest<=12.1.0 ---------- needs to be installed on Jenkins, no output when using tox
|
||||
#tempest-lib>=0.8.0
|
||||
#tempest-lib>=0.8.0
|
||||
|
1
tox.ini
1
tox.ini
@ -63,4 +63,3 @@ show-source = True
|
||||
ignore = E123,E125,E501,H401,H105,H301
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Application'''
|
||||
"""Application."""
|
||||
|
||||
from pecan.deploy import deploy
|
||||
from pecan import make_app
|
||||
@ -23,7 +23,7 @@ from valet.api.db import models
|
||||
|
||||
|
||||
def setup_app(config):
|
||||
""" App Setup """
|
||||
"""App Setup."""
|
||||
identity.init_identity()
|
||||
messaging.init_messaging()
|
||||
models.init_model()
|
||||
@ -36,6 +36,7 @@ def setup_app(config):
|
||||
|
||||
# entry point for apache2
|
||||
def load_app(config_file):
|
||||
"""App Load."""
|
||||
register_conf()
|
||||
set_domain(project='valet')
|
||||
return deploy(config_file)
|
||||
|
@ -1,19 +1,25 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Terminate thread."""
|
||||
|
||||
import ctypes
|
||||
|
||||
|
||||
def terminate_thread(thread):
|
||||
"""Terminates a python thread from another thread.
|
||||
"""Terminate a python thread from another thread.
|
||||
|
||||
:param thread: a threading.Thread instance
|
||||
"""
|
||||
@ -26,8 +32,8 @@ def terminate_thread(thread):
|
||||
if res == 0:
|
||||
raise ValueError("nonexistent thread id")
|
||||
elif res > 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
# If it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
print('valet watcher thread exits')
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Compute helper library'''
|
||||
"""Compute helper library."""
|
||||
|
||||
from novaclient import client
|
||||
from pecan import conf
|
||||
@ -23,7 +23,7 @@ VERSION = 2
|
||||
|
||||
|
||||
def nova_client():
|
||||
'''Returns a nova client'''
|
||||
"""Return a nova client."""
|
||||
sess = conf.identity.engine.session
|
||||
nova = client.Client(VERSION, session=sess)
|
||||
return nova
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Hooks'''
|
||||
"""Hooks."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@ -31,8 +31,10 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageNotificationHook(PecanHook):
|
||||
'''Send API request/responses out as Oslo msg notifications.'''
|
||||
"""Send API request/responses out as Oslo msg notifications."""
|
||||
|
||||
def after(self, state):
|
||||
"""Function sends valet notification."""
|
||||
self.dummy = True
|
||||
LOG.info('sending notification')
|
||||
notifier = conf.messaging.notifier
|
||||
@ -44,7 +46,8 @@ class MessageNotificationHook(PecanHook):
|
||||
else:
|
||||
notifier_fn = notifier.error
|
||||
|
||||
ctxt = {} # Not using this just yet.
|
||||
# Not using this just yet.
|
||||
ctxt = {}
|
||||
|
||||
request_path = state.request.path
|
||||
|
||||
@ -86,7 +89,8 @@ class MessageNotificationHook(PecanHook):
|
||||
}
|
||||
}
|
||||
|
||||
# notifier_fn blocks in case rabbit mq is down - it prevents Valet API to return its response :(
|
||||
# notifier_fn blocks in case rabbit mq is down
|
||||
# it prevents Valet API to return its response
|
||||
# send the notification in a different thread
|
||||
notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload))
|
||||
notifier_thread.start()
|
||||
@ -99,10 +103,11 @@ class MessageNotificationHook(PecanHook):
|
||||
|
||||
|
||||
class NotFoundHook(PecanHook):
|
||||
'''Catchall 'not found' hook for API'''
|
||||
"""Catchall 'not found' hook for API."""
|
||||
|
||||
def on_error(self, state, exc):
|
||||
"""Redirect to app-specific not_found endpoint if 404 only."""
|
||||
self.dummy = True
|
||||
'''Redirects to app-specific not_found endpoint if 404 only'''
|
||||
if isinstance(exc, webob.exc.WSGIHTTPException) and exc.code == 404:
|
||||
message = _('The resource could not be found.')
|
||||
error('/errors/not_found', message)
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""i18n library"""
|
||||
"""i18n library."""
|
||||
|
||||
import gettext
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Identity helper library'''
|
||||
"""Identity helper library."""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
@ -30,12 +30,13 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def utcnow():
|
||||
'''Returns the time (UTC)'''
|
||||
"""Return the time (UTC)."""
|
||||
return datetime.now(tz=pytz.utc)
|
||||
|
||||
|
||||
class Identity(object):
|
||||
'''Convenience library for all identity service-related queries.'''
|
||||
"""Convenience library for all identity service-related queries."""
|
||||
|
||||
_args = None
|
||||
_client = None
|
||||
_interface = None
|
||||
@ -43,7 +44,7 @@ class Identity(object):
|
||||
|
||||
@classmethod
|
||||
def is_token_admin(cls, token):
|
||||
'''Returns true if decoded token has an admin role'''
|
||||
"""Return true if decoded token has an admin role."""
|
||||
for role in token.user.get('roles', []):
|
||||
if role.get('name') == 'admin':
|
||||
return True
|
||||
@ -51,16 +52,16 @@ class Identity(object):
|
||||
|
||||
@classmethod
|
||||
def tenant_from_token(cls, token):
|
||||
'''Returns tenant id from decoded token'''
|
||||
"""Return tenant id from decoded token."""
|
||||
return token.tenant.get('id', None)
|
||||
|
||||
@classmethod
|
||||
def user_from_token(cls, token):
|
||||
'''Returns user id from decoded token'''
|
||||
"""Return user id from decoded token."""
|
||||
return token.user.get('id', None)
|
||||
|
||||
def __init__(self, interface='admin', **kwargs):
|
||||
'''Initializer.'''
|
||||
"""Initializer."""
|
||||
self._interface = interface
|
||||
self._args = kwargs
|
||||
self._client = None
|
||||
@ -68,7 +69,7 @@ class Identity(object):
|
||||
|
||||
@property
|
||||
def _client_expired(self):
|
||||
'''Returns True if cached client's token is expired.'''
|
||||
"""Return True if cached client's token is expired."""
|
||||
# NOTE: Keystone may auto-regen the client now (v2? v3?)
|
||||
# If so, this trip may no longer be necessary. Doesn't
|
||||
# hurt to keep it around for the time being.
|
||||
@ -84,7 +85,7 @@ class Identity(object):
|
||||
|
||||
@property
|
||||
def client(self):
|
||||
'''Returns an identity client.'''
|
||||
"""Return an identity client."""
|
||||
if not self._client or self._client_expired:
|
||||
auth = v2.Password(**self._args)
|
||||
self._session = session.Session(auth=auth)
|
||||
@ -94,11 +95,11 @@ class Identity(object):
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
'''Read-only access to the session.'''
|
||||
"""Read-only access to the session."""
|
||||
return self._session
|
||||
|
||||
def validate_token(self, auth_token):
|
||||
'''Returns validated token or None if invalid'''
|
||||
"""Return validated token or None if invalid."""
|
||||
kwargs = {
|
||||
'token': auth_token,
|
||||
}
|
||||
@ -110,7 +111,7 @@ class Identity(object):
|
||||
return None
|
||||
|
||||
def is_tenant_list_valid(self, tenant_list):
|
||||
'''Returns true if tenant list contains valid tenant IDs'''
|
||||
"""Return true if tenant list contains valid tenant IDs."""
|
||||
tenants = self.client.tenants.list()
|
||||
if isinstance(tenant_list, list):
|
||||
found = False
|
||||
@ -123,14 +124,15 @@ class Identity(object):
|
||||
|
||||
|
||||
def is_tenant_in_tenants(tenant_id, tenants):
|
||||
for tenant in tenants:
|
||||
if tenant_id == tenant.id:
|
||||
return True
|
||||
return False
|
||||
"""Return true if tenant exists."""
|
||||
for tenant in tenants:
|
||||
if tenant_id == tenant.id:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _identity_engine_from_config(config):
|
||||
'''Initialize the identity engine based on supplied config.'''
|
||||
"""Initialize the identity engine based on supplied config."""
|
||||
# Using tenant_name instead of project name due to keystone v2
|
||||
kwargs = {
|
||||
'username': config.get('username'),
|
||||
@ -144,7 +146,7 @@ def _identity_engine_from_config(config):
|
||||
|
||||
|
||||
def init_identity():
|
||||
'''Initialize the identity engine and place in the config.'''
|
||||
"""Initialize the identity engine and place in the config."""
|
||||
config = conf.identity.config
|
||||
engine = _identity_engine_from_config(config)
|
||||
conf.identity.engine = engine
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Messaging helper library'''
|
||||
"""Messaging helper library."""
|
||||
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging as messaging
|
||||
@ -22,7 +22,7 @@ from valet.api.conf import set_domain, DOMAIN
|
||||
|
||||
|
||||
def _messaging_notifier_from_config(config):
|
||||
'''Initialize the messaging engine based on supplied config.'''
|
||||
"""Initialize the messaging engine based on supplied config."""
|
||||
transport_url = config.get('transport_url')
|
||||
transport = messaging.get_transport(cfg.CONF, transport_url)
|
||||
notifier = messaging.Notifier(transport, driver='messaging',
|
||||
@ -32,7 +32,7 @@ def _messaging_notifier_from_config(config):
|
||||
|
||||
|
||||
def init_messaging():
|
||||
'''Initialize the messaging engine and place in the config.'''
|
||||
"""Initialize the messaging engine and place in the config."""
|
||||
set_domain(DOMAIN)
|
||||
config = conf.messaging.config
|
||||
notifier = _messaging_notifier_from_config(config)
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Ostro helper library'''
|
||||
"""Ostro helper library."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@ -42,13 +42,13 @@ EXCLUSIVITY = 'exclusivity'
|
||||
|
||||
|
||||
def _log(text, title="Ostro"):
|
||||
'''Log helper'''
|
||||
"""Log helper."""
|
||||
log_text = "%s: %s" % (title, text)
|
||||
LOG.debug(log_text)
|
||||
|
||||
|
||||
class Ostro(object):
|
||||
'''Ostro optimization engine helper class.'''
|
||||
"""Ostro optimization engine helper class."""
|
||||
|
||||
args = None
|
||||
request = None
|
||||
@ -56,12 +56,15 @@ class Ostro(object):
|
||||
error_uri = None
|
||||
tenant_id = None
|
||||
|
||||
tries = None # Number of times to poll for placement.
|
||||
interval = None # Interval in seconds to poll for placement.
|
||||
# Number of times to poll for placement.
|
||||
tries = None
|
||||
|
||||
# Interval in seconds to poll for placement.
|
||||
interval = None
|
||||
|
||||
@classmethod
|
||||
def _build_error(cls, message):
|
||||
'''Build an Ostro-style error message'''
|
||||
"""Build an Ostro-style error message."""
|
||||
if not message:
|
||||
message = _("Unknown error")
|
||||
error = {
|
||||
@ -74,7 +77,7 @@ class Ostro(object):
|
||||
|
||||
@classmethod
|
||||
def _build_uuid_map(cls, resources):
|
||||
'''Build a dict mapping names to UUIDs.'''
|
||||
"""Build a dict mapping names to UUIDs."""
|
||||
mapping = {}
|
||||
for key in resources.iterkeys():
|
||||
if 'name' in resources[key]:
|
||||
@ -84,7 +87,7 @@ class Ostro(object):
|
||||
|
||||
@classmethod
|
||||
def _sanitize_resources(cls, resources):
|
||||
'''Ensure lowercase keys at the top level of each resource.'''
|
||||
"""Ensure lowercase keys at the top level of each resource."""
|
||||
for res in resources.itervalues():
|
||||
for key in list(res.keys()):
|
||||
if not key.islower():
|
||||
@ -92,12 +95,12 @@ class Ostro(object):
|
||||
return resources
|
||||
|
||||
def __init__(self):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
self.tries = conf.music.get('tries', 10)
|
||||
self.interval = conf.music.get('interval', 1)
|
||||
|
||||
def _map_names_to_uuids(self, mapping, data):
|
||||
'''Map resource names to their UUID equivalents.'''
|
||||
"""Map resource names to their UUID equivalents."""
|
||||
if isinstance(data, dict):
|
||||
for key in data.iterkeys():
|
||||
if key != 'name':
|
||||
@ -110,11 +113,11 @@ class Ostro(object):
|
||||
return data
|
||||
|
||||
def _prepare_resources(self, resources):
|
||||
''' Pre-digests resource data for use by Ostro.
|
||||
"""Pre-digest resource data for use by Ostro.
|
||||
|
||||
Maps Heat resource names to Orchestration UUIDs.
|
||||
Ensures exclusivity groups exist and have tenant_id as a member.
|
||||
'''
|
||||
"""
|
||||
mapping = self._build_uuid_map(resources)
|
||||
ostro_resources = self._map_names_to_uuids(mapping, resources)
|
||||
self._sanitize_resources(ostro_resources)
|
||||
@ -126,8 +129,7 @@ class Ostro(object):
|
||||
|
||||
# TODO(JD): This really belongs in valet-engine once it exists.
|
||||
def _send(self, stack_id, request):
|
||||
'''Send request.'''
|
||||
|
||||
"""Send request."""
|
||||
# Creating the placement request effectively enqueues it.
|
||||
PlacementRequest(stack_id=stack_id, request=request) # pylint: disable=W0612
|
||||
|
||||
@ -149,13 +151,13 @@ class Ostro(object):
|
||||
return json.dumps(response)
|
||||
|
||||
def _verify_groups(self, resources, tenant_id):
|
||||
''' Verifies group settings. Returns an error status dict if the
|
||||
"""Verify group settings.
|
||||
|
||||
group type is invalid, if a group name is used when the type
|
||||
is affinity or diversity, if a nonexistant exclusivity group
|
||||
is found, or if the tenant is not a group member.
|
||||
Returns None if ok.
|
||||
'''
|
||||
Returns an error status dict if the group type is invalid, if a
|
||||
group name is used when the type is affinity or diversity, if a
|
||||
nonexistant exclusivity group is found, or if the tenant
|
||||
is not a group member. Returns None if ok.
|
||||
"""
|
||||
message = None
|
||||
for res in resources.itervalues():
|
||||
res_type = res.get('type')
|
||||
@ -167,13 +169,17 @@ class Ostro(object):
|
||||
group_type == DIVERSITY:
|
||||
if group_name:
|
||||
self.error_uri = '/errors/conflict'
|
||||
message = _("%s must not be used when {0} is '{1}'. ").format(GROUP_NAME, GROUP_TYPE, group_type)
|
||||
message = _("%s must not be used when"
|
||||
" {0} is '{1}'.").format(GROUP_NAME,
|
||||
GROUP_TYPE,
|
||||
group_type)
|
||||
break
|
||||
elif group_type == EXCLUSIVITY:
|
||||
message = self._verify_exclusivity(group_name, tenant_id)
|
||||
else:
|
||||
self.error_uri = '/errors/invalid'
|
||||
message = _("{0} '{1}' is invalid.").format(GROUP_TYPE, group_type)
|
||||
message = _("{0} '{1}' is invalid.").format(GROUP_TYPE,
|
||||
group_type)
|
||||
break
|
||||
if message:
|
||||
return self._build_error(message)
|
||||
@ -182,7 +188,9 @@ class Ostro(object):
|
||||
return_message = None
|
||||
if not group_name:
|
||||
self.error_uri = '/errors/invalid'
|
||||
return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME, GROUP_TYPE, EXCLUSIVITY)
|
||||
return _("%s must be used when {0} is '{1}'.").format(GROUP_NAME,
|
||||
GROUP_TYPE,
|
||||
EXCLUSIVITY)
|
||||
|
||||
group = Group.query.filter_by( # pylint: disable=E1101
|
||||
name=group_name).first()
|
||||
@ -191,15 +199,19 @@ class Ostro(object):
|
||||
return_message = "%s '%s' not found" % (GROUP_NAME, group_name)
|
||||
elif group and tenant_id not in group.members:
|
||||
self.error_uri = '/errors/conflict'
|
||||
return_message = _("Tenant ID %s not a member of {0} '{1}' ({2})").format(self.tenant_id, GROUP_NAME, group.name, group.id)
|
||||
return_message = _("Tenant ID %s not a member of "
|
||||
"{0} '{1}' ({2})").format(self.tenant_id,
|
||||
GROUP_NAME,
|
||||
group.name,
|
||||
group.id)
|
||||
return return_message
|
||||
|
||||
def build_request(self, **kwargs):
|
||||
''' Build an Ostro request. If False is returned,
|
||||
|
||||
the response attribute contains status as to the error.
|
||||
'''
|
||||
"""Build an Ostro request.
|
||||
|
||||
If False is returned then the response attribute contains
|
||||
status as to the error.
|
||||
"""
|
||||
# TODO(JD): Refactor this into create and update methods?
|
||||
self.args = kwargs.get('args')
|
||||
self.tenant_id = kwargs.get('tenant_id')
|
||||
@ -235,7 +247,7 @@ class Ostro(object):
|
||||
return True
|
||||
|
||||
def is_request_serviceable(self):
|
||||
''' Returns true if the request has at least one serviceable resource. '''
|
||||
"""Return true if request has at least one serviceable resource."""
|
||||
# TODO(JD): Ostro should return no placements vs throw an error.
|
||||
resources = self.request.get('resources', {})
|
||||
for res in resources.itervalues():
|
||||
@ -245,7 +257,7 @@ class Ostro(object):
|
||||
return False
|
||||
|
||||
def ping(self):
|
||||
'''Send a ping request and obtain a response.'''
|
||||
"""Send a ping request and obtain a response."""
|
||||
stack_id = str(uuid.uuid4())
|
||||
self.args = {'stack_id': stack_id}
|
||||
self.response = None
|
||||
@ -256,7 +268,7 @@ class Ostro(object):
|
||||
}
|
||||
|
||||
def replan(self, **kwargs):
|
||||
'''Replan a placement.'''
|
||||
"""Replan a placement."""
|
||||
self.args = kwargs.get('args')
|
||||
self.response = None
|
||||
self.error_uri = None
|
||||
@ -269,7 +281,7 @@ class Ostro(object):
|
||||
}
|
||||
|
||||
def migrate(self, **kwargs):
|
||||
'''Replan the placement for an existing resource.'''
|
||||
"""Replan the placement for an existing resource."""
|
||||
self.args = kwargs.get('args')
|
||||
self.response = None
|
||||
self.error_uri = None
|
||||
@ -281,7 +293,7 @@ class Ostro(object):
|
||||
}
|
||||
|
||||
def query(self, **kwargs):
|
||||
'''Send a query.'''
|
||||
"""Send a query."""
|
||||
stack_id = str(uuid.uuid4())
|
||||
self.args = kwargs.get('args')
|
||||
self.args['stack_id'] = stack_id
|
||||
@ -295,7 +307,7 @@ class Ostro(object):
|
||||
}
|
||||
|
||||
def send(self):
|
||||
'''Send the request and obtain a response.'''
|
||||
"""Send the request and obtain a response."""
|
||||
request_json = json.dumps([self.request])
|
||||
|
||||
# TODO(JD): Pass timeout value?
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Conf."""
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
|
||||
@ -70,10 +72,12 @@ music_opts = [
|
||||
|
||||
|
||||
def set_domain(project=DOMAIN):
|
||||
"""Set Domain."""
|
||||
CONF([], project)
|
||||
|
||||
|
||||
def register_conf():
|
||||
"""Register confs."""
|
||||
CONF.register_group(server_group)
|
||||
CONF.register_opts(server_opts, server_group)
|
||||
CONF.register_group(music_group)
|
||||
|
@ -1,18 +1,15 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2014-2016 AT&T
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
#
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
@ -1,22 +1,19 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2014-2016 AT&T
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
#
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Music ORM - Common Methods'''
|
||||
"""Music ORM - Common Methods"""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import inspect
|
||||
@ -28,7 +25,7 @@ from valet.api.db.models.music.music import Music
|
||||
|
||||
|
||||
def get_class(kls):
|
||||
'''Returns a class given a fully qualified class name'''
|
||||
"""Returns a class given a fully qualified class name"""
|
||||
parts = kls.split('.')
|
||||
module = ".".join(parts[:-1])
|
||||
mod = __import__(module)
|
||||
@ -38,7 +35,7 @@ def get_class(kls):
|
||||
|
||||
|
||||
class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
|
||||
'''Abstract Class Method from Python 3.3's abc module'''
|
||||
"""Abstract Class Method from Python 3.3's abc module"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
@ -48,28 +45,28 @@ class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
|
||||
|
||||
|
||||
class ClassPropertyDescriptor(object): # pylint: disable=R0903
|
||||
'''Supports the notion of a class property'''
|
||||
"""Supports the notion of a class property"""
|
||||
|
||||
def __init__(self, fget, fset=None):
|
||||
'''Initializer'''
|
||||
"""Initializer"""
|
||||
self.fget = fget
|
||||
self.fset = fset
|
||||
|
||||
def __get__(self, obj, klass=None):
|
||||
'''Get attribute'''
|
||||
"""Get attribute"""
|
||||
if klass is None:
|
||||
klass = type(obj)
|
||||
return self.fget.__get__(obj, klass)()
|
||||
|
||||
def __set__(self, obj, value):
|
||||
'''Set attribute'''
|
||||
"""Set attribute"""
|
||||
if not self.fset:
|
||||
raise AttributeError(_("Can't set attribute"))
|
||||
type_ = type(obj)
|
||||
return self.fset.__get__(obj, type_)(value)
|
||||
|
||||
def setter(self, func):
|
||||
'''Setter'''
|
||||
"""Setter"""
|
||||
if not isinstance(func, (classmethod, staticmethod)):
|
||||
func = classmethod(func)
|
||||
self.fset = func
|
||||
@ -77,7 +74,7 @@ class ClassPropertyDescriptor(object): # pylint: disable=R0903
|
||||
|
||||
|
||||
def classproperty(func):
|
||||
'''Class Property decorator'''
|
||||
"""Class Property decorator"""
|
||||
if not isinstance(func, (classmethod, staticmethod)):
|
||||
func = classmethod(func)
|
||||
|
||||
@ -85,36 +82,36 @@ def classproperty(func):
|
||||
|
||||
|
||||
class Results(list):
|
||||
'''Query results'''
|
||||
"""Query results"""
|
||||
|
||||
def __init__(self, *args, **kwargs): # pylint: disable=W0613
|
||||
'''Initializer'''
|
||||
"""Initializer"""
|
||||
super(Results, self).__init__(args[0])
|
||||
|
||||
def all(self):
|
||||
'''Return all'''
|
||||
"""Return all"""
|
||||
return self
|
||||
|
||||
def first(self):
|
||||
'''Return first'''
|
||||
"""Return first"""
|
||||
if len(self) > 0:
|
||||
return self[0]
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class Base(object):
|
||||
''' A custom declarative base that provides some Elixir-inspired shortcuts. '''
|
||||
""" A custom declarative base that provides some Elixir-inspired shortcuts. """
|
||||
|
||||
__tablename__ = None
|
||||
|
||||
@classproperty
|
||||
def query(cls): # pylint: disable=E0213
|
||||
'''Return a query object a la sqlalchemy'''
|
||||
"""Return a query object a la sqlalchemy"""
|
||||
return Query(cls)
|
||||
|
||||
@classmethod
|
||||
def __kwargs(cls):
|
||||
'''Return common keyword args'''
|
||||
"""Return common keyword args"""
|
||||
keyspace = conf.music.get('keyspace')
|
||||
kwargs = {
|
||||
'keyspace': keyspace,
|
||||
@ -124,33 +121,33 @@ class Base(object):
|
||||
|
||||
@classmethod
|
||||
def create_table(cls):
|
||||
'''Create table'''
|
||||
"""Create table"""
|
||||
kwargs = cls.__kwargs()
|
||||
kwargs['schema'] = cls.schema()
|
||||
conf.music.engine.create_table(**kwargs)
|
||||
|
||||
@abstractclassmethod
|
||||
def schema(cls):
|
||||
'''Return schema'''
|
||||
"""Return schema"""
|
||||
return cls()
|
||||
|
||||
@abstractclassmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name"""
|
||||
return cls()
|
||||
|
||||
@abstractmethod
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def values(self):
|
||||
'''Values'''
|
||||
"""Values"""
|
||||
pass
|
||||
|
||||
def insert(self):
|
||||
'''Insert row'''
|
||||
"""Insert row"""
|
||||
kwargs = self.__kwargs()
|
||||
kwargs['values'] = self.values()
|
||||
pk_name = self.pk_name()
|
||||
@ -161,7 +158,7 @@ class Base(object):
|
||||
conf.music.engine.create_row(**kwargs)
|
||||
|
||||
def update(self):
|
||||
'''Update row'''
|
||||
"""Update row"""
|
||||
kwargs = self.__kwargs()
|
||||
kwargs['pk_name'] = self.pk_name()
|
||||
kwargs['pk_value'] = self.pk_value()
|
||||
@ -169,7 +166,7 @@ class Base(object):
|
||||
conf.music.engine.update_row_eventually(**kwargs)
|
||||
|
||||
def delete(self):
|
||||
'''Delete row'''
|
||||
"""Delete row"""
|
||||
kwargs = self.__kwargs()
|
||||
kwargs['pk_name'] = self.pk_name()
|
||||
kwargs['pk_value'] = self.pk_value()
|
||||
@ -177,26 +174,26 @@ class Base(object):
|
||||
|
||||
@classmethod
|
||||
def filter_by(cls, **kwargs):
|
||||
'''Filter objects'''
|
||||
"""Filter objects"""
|
||||
return cls.query.filter_by(**kwargs) # pylint: disable=E1101
|
||||
|
||||
def flush(self, *args, **kwargs):
|
||||
'''Flush changes to storage'''
|
||||
"""Flush changes to storage"""
|
||||
# TODO(JD): Implement in music? May be a no-op
|
||||
pass
|
||||
|
||||
def as_dict(self):
|
||||
'''Return object representation as a dictionary'''
|
||||
"""Return object representation as a dictionary"""
|
||||
return dict((k, v) for k, v in self.__dict__.items()
|
||||
if not k.startswith('_'))
|
||||
|
||||
|
||||
class Query(object):
|
||||
'''Data Query'''
|
||||
"""Data Query"""
|
||||
model = None
|
||||
|
||||
def __init__(self, model):
|
||||
'''Initializer'''
|
||||
"""Initializer"""
|
||||
if inspect.isclass(model):
|
||||
self.model = model
|
||||
elif isinstance(model, basestring):
|
||||
@ -204,7 +201,7 @@ class Query(object):
|
||||
assert inspect.isclass(self.model)
|
||||
|
||||
def __kwargs(self):
|
||||
'''Return common keyword args'''
|
||||
"""Return common keyword args"""
|
||||
keyspace = conf.music.get('keyspace')
|
||||
kwargs = {
|
||||
'keyspace': keyspace,
|
||||
@ -213,7 +210,7 @@ class Query(object):
|
||||
return kwargs
|
||||
|
||||
def __rows_to_objects(self, rows):
|
||||
'''Convert query response rows to objects'''
|
||||
"""Convert query response rows to objects"""
|
||||
results = []
|
||||
pk_name = self.model.pk_name() # pylint: disable=E1101
|
||||
for __, row in rows.iteritems(): # pylint: disable=W0612
|
||||
@ -224,13 +221,13 @@ class Query(object):
|
||||
return Results(results)
|
||||
|
||||
def all(self):
|
||||
'''Return all objects'''
|
||||
"""Return all objects"""
|
||||
kwargs = self.__kwargs()
|
||||
rows = conf.music.engine.read_all_rows(**kwargs)
|
||||
return self.__rows_to_objects(rows)
|
||||
|
||||
def filter_by(self, **kwargs):
|
||||
'''Filter objects'''
|
||||
"""Filter objects"""
|
||||
# Music doesn't allow filtering on anything but the primary key.
|
||||
# We need to get all items and then go looking for what we want.
|
||||
all_items = self.all()
|
||||
@ -250,14 +247,14 @@ class Query(object):
|
||||
|
||||
|
||||
def init_model():
|
||||
'''Data Store Initialization'''
|
||||
"""Data Store Initialization"""
|
||||
conf.music.engine = _engine_from_config(conf.music)
|
||||
keyspace = conf.music.get('keyspace')
|
||||
conf.music.engine.create_keyspace(keyspace)
|
||||
|
||||
|
||||
def _engine_from_config(configuration):
|
||||
'''Create database engine object based on configuration'''
|
||||
"""Create database engine object based on configuration"""
|
||||
configuration = dict(configuration)
|
||||
kwargs = {
|
||||
'host': configuration.get('host'),
|
||||
@ -268,36 +265,30 @@ def _engine_from_config(configuration):
|
||||
|
||||
|
||||
def start():
|
||||
'''Start transaction'''
|
||||
"""Start transaction"""
|
||||
pass
|
||||
|
||||
|
||||
def start_read_only():
|
||||
'''Start read-only transaction'''
|
||||
"""Start read-only transaction"""
|
||||
start()
|
||||
|
||||
|
||||
def commit():
|
||||
'''Commit transaction'''
|
||||
"""Commit transaction"""
|
||||
pass
|
||||
|
||||
|
||||
def rollback():
|
||||
'''Rollback transaction'''
|
||||
"""Rollback transaction"""
|
||||
pass
|
||||
|
||||
|
||||
def clear():
|
||||
'''Clear transaction'''
|
||||
"""Clear transaction"""
|
||||
pass
|
||||
|
||||
|
||||
def flush():
|
||||
'''Flush to disk'''
|
||||
"""Flush to disk"""
|
||||
pass
|
||||
|
||||
|
||||
from valet.api.db.models.music.groups import Group
|
||||
from valet.api.db.models.music.ostro import PlacementRequest, PlacementResult, Event
|
||||
from valet.api.db.models.music.placements import Placement
|
||||
from valet.api.db.models.music.plans import Plan
|
||||
|
@ -13,14 +13,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Group Model'''
|
||||
"""Group Model."""
|
||||
|
||||
from . import Base
|
||||
import simplejson
|
||||
|
||||
|
||||
class Group(Base):
|
||||
'''Group model'''
|
||||
"""Group model."""
|
||||
|
||||
__tablename__ = 'groups'
|
||||
|
||||
id = None # pylint: disable=C0103
|
||||
@ -31,7 +32,7 @@ class Group(Base):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'id': 'text',
|
||||
'name': 'text',
|
||||
@ -44,16 +45,16 @@ class Group(Base):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name."""
|
||||
return 'id'
|
||||
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value."""
|
||||
return self.id
|
||||
|
||||
def values(self):
|
||||
'''Values'''
|
||||
# TODO(JD): Support lists in Music
|
||||
"""Values."""
|
||||
# TODO(UNKNOWN): Support lists in Music
|
||||
# Lists aren't directly supported in Music, so we have to
|
||||
# convert to/from json on the way out/in.
|
||||
return {
|
||||
@ -64,7 +65,7 @@ class Group(Base):
|
||||
}
|
||||
|
||||
def __init__(self, name, description, type, members, _insert=True):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
super(Group, self).__init__()
|
||||
self.name = name
|
||||
self.description = description or ""
|
||||
@ -73,15 +74,15 @@ class Group(Base):
|
||||
self.members = [] # members ignored at init time
|
||||
self.insert()
|
||||
else:
|
||||
# TODO(JD): Support lists in Music
|
||||
# TODO(UNKNOWN): Support lists in Music
|
||||
self.members = simplejson.loads(members)
|
||||
|
||||
def __repr__(self):
|
||||
'''Object representation'''
|
||||
"""Object representation."""
|
||||
return '<Group %r>' % self.name
|
||||
|
||||
def __json__(self):
|
||||
'''JSON representation'''
|
||||
"""JSON representation."""
|
||||
json_ = {}
|
||||
json_['id'] = self.id
|
||||
json_['name'] = self.name
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Music Data Store API'''
|
||||
"""Music Data Store API."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class REST(object):
|
||||
'''Helper class for REST operations.'''
|
||||
"""Helper class for REST operations."""
|
||||
|
||||
hosts = None
|
||||
port = None
|
||||
@ -37,8 +37,7 @@ class REST(object):
|
||||
_urls = None
|
||||
|
||||
def __init__(self, hosts, port, path='/', timeout='10'):
|
||||
'''Initializer. Accepts target host list, port, and path.'''
|
||||
|
||||
"""Initializer. Accepts target host list, port, and path."""
|
||||
self.hosts = hosts # List of IP or FQDNs
|
||||
self.port = port # Port Number
|
||||
self.path = path # Path starting with /
|
||||
@ -46,8 +45,7 @@ class REST(object):
|
||||
|
||||
@property
|
||||
def urls(self):
|
||||
'''Returns list of URLs using each host, plus the port/path.'''
|
||||
|
||||
"""Return list of URLs using each host, plus the port/path."""
|
||||
if not self._urls:
|
||||
urls = []
|
||||
for host in self.hosts:
|
||||
@ -62,17 +60,19 @@ class REST(object):
|
||||
|
||||
@staticmethod
|
||||
def __headers(content_type='application/json'):
|
||||
'''Returns HTTP request headers.'''
|
||||
"""Return HTTP request headers."""
|
||||
headers = {
|
||||
'accept': content_type,
|
||||
'content-type': content_type,
|
||||
}
|
||||
return headers
|
||||
|
||||
def request(self, method='get', content_type='application/json', path='/', data=None):
|
||||
''' Performs HTTP request '''
|
||||
def request(self, method='get', content_type='application/json', path='/',
|
||||
data=None):
|
||||
"""Perform HTTP request."""
|
||||
if method not in ('post', 'get', 'put', 'delete'):
|
||||
raise KeyError(_("Method must be one of post, get, put, or delete."))
|
||||
raise KeyError(_("Method must be one of post, get, put, "
|
||||
"or delete."))
|
||||
method_fn = getattr(requests, method)
|
||||
|
||||
response = None
|
||||
@ -107,7 +107,8 @@ class REST(object):
|
||||
|
||||
|
||||
class Music(object):
|
||||
'''Wrapper for Music API'''
|
||||
"""Wrapper for Music API."""
|
||||
|
||||
lock_names = None # Cache of lock names created during session
|
||||
lock_timeout = None # Maximum time in seconds to acquire a lock
|
||||
|
||||
@ -116,8 +117,7 @@ class Music(object):
|
||||
|
||||
def __init__(self, host=None, hosts=None, # pylint: disable=R0913
|
||||
port='8080', lock_timeout=10, replication_factor=3):
|
||||
'''Initializer. Accepts a lock_timeout for atomic operations.'''
|
||||
|
||||
"""Initializer. Accept a lock_timeout for atomic operations."""
|
||||
# If one host is provided, that overrides the list
|
||||
if not hosts:
|
||||
hosts = ['localhost']
|
||||
@ -137,7 +137,7 @@ class Music(object):
|
||||
self.replication_factor = replication_factor
|
||||
|
||||
def create_keyspace(self, keyspace):
|
||||
'''Creates a keyspace.'''
|
||||
"""Create a keyspace."""
|
||||
data = {
|
||||
'replicationInfo': {
|
||||
'class': 'SimpleStrategy',
|
||||
@ -154,7 +154,7 @@ class Music(object):
|
||||
return response.ok
|
||||
|
||||
def create_table(self, keyspace, table, schema):
|
||||
'''Creates a table.'''
|
||||
"""Create a table."""
|
||||
data = {
|
||||
'fields': schema,
|
||||
'consistencyInfo': {
|
||||
@ -171,14 +171,14 @@ class Music(object):
|
||||
return response.ok
|
||||
|
||||
def version(self):
|
||||
'''Returns version string.'''
|
||||
"""Return version string."""
|
||||
path = '/version'
|
||||
response = self.rest.request(method='get',
|
||||
content_type='text/plain', path=path)
|
||||
return response.text
|
||||
|
||||
def create_row(self, keyspace, table, values):
|
||||
'''Create a row.'''
|
||||
"""Create a row."""
|
||||
data = {
|
||||
'values': values,
|
||||
'consistencyInfo': {
|
||||
@ -194,14 +194,14 @@ class Music(object):
|
||||
return response.ok
|
||||
|
||||
def create_lock(self, lock_name):
|
||||
'''Returns the lock id. Use for acquiring and releasing.'''
|
||||
"""Return the lock id. Use for acquiring and releasing."""
|
||||
path = '/locks/create/%s' % lock_name
|
||||
response = self.rest.request(method='post',
|
||||
content_type='text/plain', path=path)
|
||||
return response.text
|
||||
|
||||
def acquire_lock(self, lock_id):
|
||||
'''Acquire a lock.'''
|
||||
"""Acquire a lock."""
|
||||
path = '/locks/acquire/%s' % lock_id
|
||||
response = self.rest.request(method='get',
|
||||
content_type='text/plain', path=path)
|
||||
@ -209,7 +209,7 @@ class Music(object):
|
||||
return response.text.lower() == 'true'
|
||||
|
||||
def release_lock(self, lock_id):
|
||||
'''Release a lock.'''
|
||||
"""Release a lock."""
|
||||
path = '/locks/release/%s' % lock_id
|
||||
response = self.rest.request(method='delete',
|
||||
content_type='text/plain', path=path)
|
||||
@ -217,7 +217,7 @@ class Music(object):
|
||||
|
||||
@staticmethod
|
||||
def __row_url_path(keyspace, table, pk_name, pk_value):
|
||||
'''Returns a Music-compliant row URL path.'''
|
||||
"""Return a Music-compliant row URL path."""
|
||||
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
|
||||
'keyspace': keyspace,
|
||||
'table': table,
|
||||
@ -229,7 +229,7 @@ class Music(object):
|
||||
|
||||
def update_row_eventually(self, keyspace, table, # pylint: disable=R0913
|
||||
pk_name, pk_value, values):
|
||||
'''Update a row. Not atomic.'''
|
||||
"""Update a row. Not atomic."""
|
||||
data = {
|
||||
'values': values,
|
||||
'consistencyInfo': {
|
||||
@ -243,8 +243,7 @@ class Music(object):
|
||||
|
||||
def update_row_atomically(self, keyspace, table, # pylint: disable=R0913
|
||||
pk_name, pk_value, values):
|
||||
'''Update a row atomically.'''
|
||||
|
||||
"""Update a row atomically."""
|
||||
# Create lock for the candidate. The Music API dictates that the
|
||||
# lock name must be of the form keyspace.table.primary_key
|
||||
lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % {
|
||||
@ -279,7 +278,7 @@ class Music(object):
|
||||
return response.ok
|
||||
|
||||
def delete_row_eventually(self, keyspace, table, pk_name, pk_value):
|
||||
'''Delete a row. Not atomic.'''
|
||||
"""Delete a row. Not atomic."""
|
||||
data = {
|
||||
'consistencyInfo': {
|
||||
'type': 'eventual',
|
||||
@ -291,7 +290,7 @@ class Music(object):
|
||||
return response.ok
|
||||
|
||||
def read_row(self, keyspace, table, pk_name, pk_value, log=None):
|
||||
'''Read one row based on a primary key name/value.'''
|
||||
"""Read one row based on a primary key name/value."""
|
||||
path = self.__row_url_path(keyspace, table, pk_name, pk_value)
|
||||
response = self.rest.request(path=path)
|
||||
if log:
|
||||
@ -299,11 +298,11 @@ class Music(object):
|
||||
return response.json()
|
||||
|
||||
def read_all_rows(self, keyspace, table):
|
||||
'''Read all rows.'''
|
||||
"""Read all rows."""
|
||||
return self.read_row(keyspace, table, pk_name=None, pk_value=None)
|
||||
|
||||
def drop_keyspace(self, keyspace):
|
||||
'''Drops a keyspace.'''
|
||||
"""Drop a keyspace."""
|
||||
data = {
|
||||
'consistencyInfo': {
|
||||
'type': 'eventual',
|
||||
@ -315,16 +314,15 @@ class Music(object):
|
||||
return response.ok
|
||||
|
||||
def delete_lock(self, lock_name):
|
||||
'''Deletes a lock by name.'''
|
||||
"""Delete a lock by name."""
|
||||
path = '/locks/delete/%s' % lock_name
|
||||
response = self.rest.request(content_type='text/plain',
|
||||
method='delete', path=path)
|
||||
return response.ok
|
||||
|
||||
def delete_all_locks(self):
|
||||
'''Delete all locks created during the lifetime of this object.'''
|
||||
|
||||
# TODO(JD): Shouldn't this really be part of internal cleanup?
|
||||
"""Delete all locks created during the lifetime of this object."""
|
||||
# TODO(UNKNOWN): Shouldn't this really be part of internal cleanup?
|
||||
# FIXME: It can be several API calls. Any way to do in one fell swoop?
|
||||
for lock_name in self.lock_names:
|
||||
self.delete_lock(lock_name)
|
||||
|
@ -13,13 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Ostro Models'''
|
||||
"""Ostro Models."""
|
||||
|
||||
from . import Base
|
||||
|
||||
|
||||
class PlacementRequest(Base):
|
||||
'''Placement Request Model'''
|
||||
"""Placement Request Model."""
|
||||
|
||||
__tablename__ = 'placement_requests'
|
||||
|
||||
stack_id = None
|
||||
@ -27,7 +28,7 @@ class PlacementRequest(Base):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'stack_id': 'text',
|
||||
'request': 'text',
|
||||
@ -37,22 +38,22 @@ class PlacementRequest(Base):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name."""
|
||||
return 'stack_id'
|
||||
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value."""
|
||||
return self.stack_id
|
||||
|
||||
def values(self):
|
||||
'''Values'''
|
||||
"""Values."""
|
||||
return {
|
||||
'stack_id': self.stack_id,
|
||||
'request': self.request,
|
||||
}
|
||||
|
||||
def __init__(self, request, stack_id=None, _insert=True):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
super(PlacementRequest, self).__init__()
|
||||
self.stack_id = stack_id
|
||||
self.request = request
|
||||
@ -60,11 +61,11 @@ class PlacementRequest(Base):
|
||||
self.insert()
|
||||
|
||||
def __repr__(self):
|
||||
'''Object representation'''
|
||||
"""Object representation."""
|
||||
return '<PlacementRequest %r>' % self.stack_id
|
||||
|
||||
def __json__(self):
|
||||
'''JSON representation'''
|
||||
"""JSON representation."""
|
||||
json_ = {}
|
||||
json_['stack_id'] = self.stack_id
|
||||
json_['request'] = self.request
|
||||
@ -72,7 +73,8 @@ class PlacementRequest(Base):
|
||||
|
||||
|
||||
class PlacementResult(Base):
|
||||
'''Placement Result Model'''
|
||||
"""Placement Result Model."""
|
||||
|
||||
__tablename__ = 'placement_results'
|
||||
|
||||
stack_id = None
|
||||
@ -80,7 +82,7 @@ class PlacementResult(Base):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'stack_id': 'text',
|
||||
'placement': 'text',
|
||||
@ -90,22 +92,22 @@ class PlacementResult(Base):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name."""
|
||||
return 'stack_id'
|
||||
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value."""
|
||||
return self.stack_id
|
||||
|
||||
def values(self):
|
||||
'''Values'''
|
||||
"""Values."""
|
||||
return {
|
||||
'stack_id': self.stack_id,
|
||||
'placement': self.placement,
|
||||
}
|
||||
|
||||
def __init__(self, placement, stack_id=None, _insert=True):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
super(PlacementResult, self).__init__()
|
||||
self.stack_id = stack_id
|
||||
self.placement = placement
|
||||
@ -113,11 +115,11 @@ class PlacementResult(Base):
|
||||
self.insert()
|
||||
|
||||
def __repr__(self):
|
||||
'''Object representation'''
|
||||
"""Object representation."""
|
||||
return '<PlacementResult %r>' % self.stack_id
|
||||
|
||||
def __json__(self):
|
||||
'''JSON representation'''
|
||||
"""JSON representation."""
|
||||
json_ = {}
|
||||
json_['stack_id'] = self.stack_id
|
||||
json_['placement'] = self.placement
|
||||
@ -125,7 +127,8 @@ class PlacementResult(Base):
|
||||
|
||||
|
||||
class Event(Base):
|
||||
'''Event Model'''
|
||||
"""Event Model."""
|
||||
|
||||
__tablename__ = 'events'
|
||||
|
||||
event_id = None
|
||||
@ -133,7 +136,7 @@ class Event(Base):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'event_id': 'text',
|
||||
'event': 'text',
|
||||
@ -143,22 +146,22 @@ class Event(Base):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name."""
|
||||
return 'event_id'
|
||||
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value."""
|
||||
return self.event_id
|
||||
|
||||
def values(self):
|
||||
'''Values'''
|
||||
"""Values."""
|
||||
return {
|
||||
'event_id': self.event_id,
|
||||
'event': self.event,
|
||||
}
|
||||
|
||||
def __init__(self, event, event_id=None, _insert=True):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
super(Event, self).__init__()
|
||||
self.event_id = event_id
|
||||
self.event = event
|
||||
@ -166,11 +169,11 @@ class Event(Base):
|
||||
self.insert()
|
||||
|
||||
def __repr__(self):
|
||||
'''Object representation'''
|
||||
"""Object representation."""
|
||||
return '<Event %r>' % self.event_id
|
||||
|
||||
def __json__(self):
|
||||
'''JSON representation'''
|
||||
"""JSON representation."""
|
||||
json_ = {}
|
||||
json_['event_id'] = self.event_id
|
||||
json_['event'] = self.event
|
||||
|
@ -13,13 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Placement Model'''
|
||||
"""Placement Model."""
|
||||
|
||||
from . import Base, Query
|
||||
|
||||
|
||||
class Placement(Base):
|
||||
'''Placement Model'''
|
||||
"""Placement Model."""
|
||||
|
||||
__tablename__ = 'placements'
|
||||
|
||||
id = None # pylint: disable=C0103
|
||||
@ -32,7 +33,7 @@ class Placement(Base):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'id': 'text',
|
||||
'name': 'text',
|
||||
@ -47,15 +48,15 @@ class Placement(Base):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name."""
|
||||
return 'id'
|
||||
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value."""
|
||||
return self.id
|
||||
|
||||
def values(self):
|
||||
'''Values'''
|
||||
"""Values."""
|
||||
return {
|
||||
'name': self.name,
|
||||
'orchestration_id': self.orchestration_id,
|
||||
@ -67,7 +68,7 @@ class Placement(Base):
|
||||
|
||||
def __init__(self, name, orchestration_id, resource_id=None, plan=None,
|
||||
plan_id=None, location=None, reserved=False, _insert=True):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
super(Placement, self).__init__()
|
||||
self.name = name
|
||||
self.orchestration_id = orchestration_id
|
||||
@ -82,11 +83,11 @@ class Placement(Base):
|
||||
self.insert()
|
||||
|
||||
def __repr__(self):
|
||||
'''Object representation'''
|
||||
"""Object representation."""
|
||||
return '<Placement %r>' % self.name
|
||||
|
||||
def __json__(self):
|
||||
'''JSON representation'''
|
||||
"""JSON representation."""
|
||||
json_ = {}
|
||||
json_['id'] = self.id
|
||||
json_['name'] = self.name
|
||||
|
@ -13,13 +13,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Plan Model'''
|
||||
"""Plan Model."""
|
||||
|
||||
from . import Base, Query
|
||||
|
||||
|
||||
class Plan(Base):
|
||||
'''Plan model'''
|
||||
"""Plan model."""
|
||||
|
||||
__tablename__ = 'plans'
|
||||
|
||||
id = None # pylint: disable=C0103
|
||||
@ -28,7 +29,7 @@ class Plan(Base):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'id': 'text',
|
||||
'name': 'text',
|
||||
@ -39,22 +40,22 @@ class Plan(Base):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
'''Primary key name'''
|
||||
"""Primary key name."""
|
||||
return 'id'
|
||||
|
||||
def pk_value(self):
|
||||
'''Primary key value'''
|
||||
"""Primary key value."""
|
||||
return self.id
|
||||
|
||||
def values(self):
|
||||
'''Values'''
|
||||
"""Values."""
|
||||
return {
|
||||
'name': self.name,
|
||||
'stack_id': self.stack_id,
|
||||
}
|
||||
|
||||
def __init__(self, name, stack_id, _insert=True):
|
||||
'''Initializer'''
|
||||
"""Initializer."""
|
||||
super(Plan, self).__init__()
|
||||
self.name = name
|
||||
self.stack_id = stack_id
|
||||
@ -62,9 +63,8 @@ class Plan(Base):
|
||||
self.insert()
|
||||
|
||||
def placements(self):
|
||||
'''Return list of placements'''
|
||||
|
||||
# TODO(JD): Make this a property?
|
||||
"""Return list of placements."""
|
||||
# TODO(UNKNOWN): Make this a property?
|
||||
all_results = Query("Placement").all()
|
||||
results = []
|
||||
for placement in all_results:
|
||||
@ -74,15 +74,15 @@ class Plan(Base):
|
||||
|
||||
@property
|
||||
def orchestration_ids(self):
|
||||
'''Return list of orchestration IDs'''
|
||||
"""Return list of orchestration IDs."""
|
||||
return list(set([p.orchestration_id for p in self.placements()]))
|
||||
|
||||
def __repr__(self):
|
||||
'''Object representation'''
|
||||
"""Object representation."""
|
||||
return '<Plan %r>' % self.name
|
||||
|
||||
def __json__(self):
|
||||
'''JSON representation'''
|
||||
"""JSON representation."""
|
||||
json_ = {}
|
||||
json_['id'] = self.id
|
||||
json_['stack_id'] = self.stack_id
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Populate command'''
|
||||
"""Populate command."""
|
||||
|
||||
from pecan.commands.base import BaseCommand
|
||||
|
||||
@ -29,14 +29,15 @@ from valet.api.db.models import Plan
|
||||
|
||||
|
||||
def out(string):
|
||||
'''Output helper'''
|
||||
"""Output helper."""
|
||||
print("==> %s" % string)
|
||||
|
||||
|
||||
class PopulateCommand(BaseCommand):
|
||||
'''Load a pecan environment and initializate the database.'''
|
||||
"""Load a pecan environment and initializate the database."""
|
||||
|
||||
def run(self, args):
|
||||
"""Function creates and initializes database and environment."""
|
||||
super(PopulateCommand, self).run(args)
|
||||
out(_("Loading environment"))
|
||||
register_conf()
|
||||
|
@ -1,22 +1,19 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2014-2016 AT&T
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
#
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Controllers Package'''
|
||||
"""Controllers Package."""
|
||||
|
||||
import logging
|
||||
from notario.decorators import instance_of
|
||||
@ -36,7 +33,7 @@ LOG = logging.getLogger(__name__)
|
||||
#
|
||||
|
||||
def valid_group_name(value):
|
||||
'''Validator for group name type.'''
|
||||
"""Validator for group name type."""
|
||||
if not value or not set(value) <= set(string.letters + string.digits + "-._~"):
|
||||
LOG.error("group name is not valid")
|
||||
LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \
|
||||
@ -45,12 +42,12 @@ def valid_group_name(value):
|
||||
|
||||
@instance_of((list, dict))
|
||||
def valid_plan_resources(value):
|
||||
'''Validator for plan resources.'''
|
||||
"""Validator for plan resources."""
|
||||
ensure(len(value) > 0)
|
||||
|
||||
|
||||
def valid_plan_update_action(value):
|
||||
'''Validator for plan update action.'''
|
||||
"""Validator for plan update action."""
|
||||
assert value in ['update', 'migrate'], _("must be update or migrate")
|
||||
|
||||
#
|
||||
@ -59,7 +56,7 @@ def valid_plan_update_action(value):
|
||||
|
||||
|
||||
def set_placements(plan, resources, placements):
|
||||
'''Set placements'''
|
||||
"""Set placements."""
|
||||
for uuid in placements.iterkeys():
|
||||
name = resources[uuid]['name']
|
||||
properties = placements[uuid]['properties']
|
||||
@ -70,11 +67,11 @@ def set_placements(plan, resources, placements):
|
||||
|
||||
|
||||
def reserve_placement(placement, resource_id=None, reserve=True, update=True):
|
||||
''' Reserve placement. Can optionally set the physical resource id.
|
||||
"""Reserve placement. Can optionally set the physical resource id.
|
||||
|
||||
Set reserve=False to unreserve. Set update=False to not update
|
||||
the data store (if the update will be made later).
|
||||
'''
|
||||
"""
|
||||
if placement:
|
||||
LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'),
|
||||
{'rsrv': _("Reserving") if reserve else _("Unreserving"),
|
||||
@ -92,7 +89,7 @@ def reserve_placement(placement, resource_id=None, reserve=True, update=True):
|
||||
|
||||
|
||||
def update_placements(placements, reserve_id=None, unlock_all=False):
|
||||
'''Update placements. Optionally reserve one placement.'''
|
||||
"""Update placements. Optionally reserve one placement."""
|
||||
for uuid in placements.iterkeys():
|
||||
placement = Placement.query.filter_by( # pylint: disable=E1101
|
||||
orchestration_id=uuid).first()
|
||||
@ -119,7 +116,7 @@ def update_placements(placements, reserve_id=None, unlock_all=False):
|
||||
#
|
||||
|
||||
def error(url, msg=None, **kwargs):
|
||||
'''Error handler'''
|
||||
"""Error handler."""
|
||||
if msg:
|
||||
request.context['error_message'] = msg
|
||||
if kwargs:
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Errors'''
|
||||
"""Errors."""
|
||||
|
||||
import logging
|
||||
from pecan import expose, request, response
|
||||
@ -26,10 +26,9 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def error_wrapper(func):
|
||||
'''Error decorator.'''
|
||||
"""Error decorator."""
|
||||
def func_wrapper(self, **kw):
|
||||
'''Wrapper.'''
|
||||
|
||||
"""Wrapper."""
|
||||
kwargs = func(self, **kw)
|
||||
status = status_map.get(response.status_code)
|
||||
message = getattr(status, 'explanation', '')
|
||||
@ -56,12 +55,12 @@ def error_wrapper(func):
|
||||
|
||||
# pylint: disable=W0613
|
||||
class ErrorsController(object):
|
||||
''' Errors Controller /errors/{error_name} '''
|
||||
"""Error Controller /errors/{error_name}."""
|
||||
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def schema(self, **kw):
|
||||
'''400'''
|
||||
"""400."""
|
||||
request.context['error_message'] = str(request.validation_error)
|
||||
response.status = 400
|
||||
return request.context.get('kwargs')
|
||||
@ -69,13 +68,13 @@ class ErrorsController(object):
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def invalid(self, **kw):
|
||||
'''400'''
|
||||
"""400."""
|
||||
response.status = 400
|
||||
return request.context.get('kwargs')
|
||||
|
||||
@expose()
|
||||
def unauthorized(self, **kw):
|
||||
'''401'''
|
||||
"""401."""
|
||||
# This error is terse and opaque on purpose.
|
||||
# Don't give any clues to help AuthN along.
|
||||
response.status = 401
|
||||
@ -92,21 +91,21 @@ class ErrorsController(object):
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def forbidden(self, **kw):
|
||||
'''403'''
|
||||
"""403."""
|
||||
response.status = 403
|
||||
return request.context.get('kwargs')
|
||||
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def not_found(self, **kw):
|
||||
'''404'''
|
||||
"""404."""
|
||||
response.status = 404
|
||||
return request.context.get('kwargs')
|
||||
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def not_allowed(self, **kw):
|
||||
'''405'''
|
||||
"""405."""
|
||||
kwargs = request.context.get('kwargs')
|
||||
if kwargs:
|
||||
allow = kwargs.get('allow', None)
|
||||
@ -118,20 +117,20 @@ class ErrorsController(object):
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def conflict(self, **kw):
|
||||
'''409'''
|
||||
"""409."""
|
||||
response.status = 409
|
||||
return request.context.get('kwargs')
|
||||
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def server_error(self, **kw):
|
||||
'''500'''
|
||||
"""500."""
|
||||
response.status = 500
|
||||
return request.context.get('kwargs')
|
||||
|
||||
@expose('json')
|
||||
@error_wrapper
|
||||
def unavailable(self, **kw):
|
||||
'''503'''
|
||||
"""503."""
|
||||
response.status = 503
|
||||
return request.context.get('kwargs')
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Groups'''
|
||||
"""Groups."""
|
||||
|
||||
import logging
|
||||
|
||||
@ -48,7 +48,7 @@ MEMBERS_SCHEMA = (
|
||||
|
||||
|
||||
def server_list_for_group(group):
|
||||
'''Returns a list of VMs associated with a member/group.'''
|
||||
"""Return a list of VMs associated with a member/group."""
|
||||
args = {
|
||||
"type": "group_vms",
|
||||
"parameters": {
|
||||
@ -72,7 +72,7 @@ def server_list_for_group(group):
|
||||
|
||||
|
||||
def tenant_servers_in_group(tenant_id, group):
|
||||
''' Returns a list of servers the current tenant has in group_name '''
|
||||
"""Return a list of servers the current tenant has in group_name."""
|
||||
servers = []
|
||||
server_list = server_list_for_group(group)
|
||||
nova = nova_client()
|
||||
@ -89,20 +89,23 @@ def tenant_servers_in_group(tenant_id, group):
|
||||
|
||||
|
||||
def no_tenant_servers_in_group(tenant_id, group):
|
||||
''' Verify no servers from tenant_id are in group.
|
||||
"""Verify no servers from tenant_id are in group.
|
||||
|
||||
Throws a 409 Conflict if any are found.
|
||||
'''
|
||||
"""
|
||||
server_list = tenant_servers_in_group(tenant_id, group)
|
||||
if server_list:
|
||||
error('/errors/conflict', _('Tenant Member {0} has servers in group "{1}": {2}').format(tenant_id, group.name, server_list))
|
||||
error('/errors/conflict', _('Tenant Member {0} has servers in group '
|
||||
'"{1}": {2}').format(tenant_id,
|
||||
group.name,
|
||||
server_list))
|
||||
|
||||
|
||||
class MembersItemController(object):
|
||||
''' Members Item Controller /v1/groups/{group_id}/members/{member_id} '''
|
||||
"""Member Item Controller /v1/groups/{group_id}/members/{member_id}."""
|
||||
|
||||
def __init__(self, member_id):
|
||||
'''Initialize group member'''
|
||||
"""Initialize group member."""
|
||||
group = request.context['group']
|
||||
if member_id not in group.members:
|
||||
error('/errors/not_found', _('Member not found in group'))
|
||||
@ -110,30 +113,30 @@ class MembersItemController(object):
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET,DELETE'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catch all for unallowed methods'''
|
||||
"""Catch all for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Verify group member'''
|
||||
"""Verify group member."""
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='DELETE', template='json')
|
||||
def index_delete(self):
|
||||
'''Delete group member'''
|
||||
"""Delete group member."""
|
||||
group = request.context['group']
|
||||
member_id = request.context['member_id']
|
||||
|
||||
@ -146,34 +149,35 @@ class MembersItemController(object):
|
||||
|
||||
|
||||
class MembersController(object):
|
||||
''' Members Controller /v1/groups/{group_id}/members '''
|
||||
"""Members Controller /v1/groups/{group_id}/members."""
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'PUT,DELETE'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='PUT', template='json')
|
||||
@validate(MEMBERS_SCHEMA, '/errors/schema')
|
||||
def index_put(self, **kwargs):
|
||||
'''Add one or more members to a group'''
|
||||
"""Add one or more members to a group."""
|
||||
new_members = kwargs.get('members', None)
|
||||
|
||||
if not conf.identity.engine.is_tenant_list_valid(new_members):
|
||||
error('/errors/conflict', _('Member list contains invalid tenant IDs'))
|
||||
error('/errors/conflict', _('Member list contains '
|
||||
'invalid tenant IDs'))
|
||||
|
||||
group = request.context['group']
|
||||
group.members = list(set(group.members + new_members))
|
||||
@ -186,7 +190,7 @@ class MembersController(object):
|
||||
|
||||
@index.when(method='DELETE', template='json')
|
||||
def index_delete(self):
|
||||
'''Delete all group members'''
|
||||
"""Delete all group members."""
|
||||
group = request.context['group']
|
||||
|
||||
# Can't delete a member if it has associated VMs.
|
||||
@ -199,49 +203,50 @@ class MembersController(object):
|
||||
|
||||
@expose()
|
||||
def _lookup(self, member_id, *remainder):
|
||||
'''Pecan subcontroller routing callback'''
|
||||
"""Pecan subcontroller routing callback."""
|
||||
return MembersItemController(member_id), remainder
|
||||
|
||||
|
||||
class GroupsItemController(object):
|
||||
''' Groups Item Controller /v1/groups/{group_id} '''
|
||||
"""Group Item Controller /v1/groups/{group_id}."""
|
||||
|
||||
members = MembersController()
|
||||
|
||||
def __init__(self, group_id):
|
||||
'''Initialize group'''
|
||||
group = Group.query.filter_by(id=group_id).first() # pylint: disable=E1101
|
||||
"""Initialize group."""
|
||||
# pylint:disable=E1101
|
||||
group = Group.query.filter_by(id=group_id).first()
|
||||
if not group:
|
||||
error('/errors/not_found', _('Group not found'))
|
||||
request.context['group'] = group
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
''' Allowed methods '''
|
||||
"""Allowed methods."""
|
||||
return 'GET,PUT,DELETE'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Display a group'''
|
||||
"""Display a group."""
|
||||
return {"group": request.context['group']}
|
||||
|
||||
@index.when(method='PUT', template='json')
|
||||
@validate(UPDATE_GROUPS_SCHEMA, '/errors/schema')
|
||||
def index_put(self, **kwargs):
|
||||
'''Update a group'''
|
||||
"""Update a group."""
|
||||
# Name and type are immutable.
|
||||
# Group Members are updated in MembersController.
|
||||
group = request.context['group']
|
||||
@ -255,7 +260,7 @@ class GroupsItemController(object):
|
||||
|
||||
@index.when(method='DELETE', template='json')
|
||||
def index_delete(self):
|
||||
'''Delete a group'''
|
||||
"""Delete a group."""
|
||||
group = request.context['group']
|
||||
if isinstance(group.members, list) and len(group.members) > 0:
|
||||
error('/errors/conflict', _('Unable to delete a Group with members.'))
|
||||
@ -264,29 +269,29 @@ class GroupsItemController(object):
|
||||
|
||||
|
||||
class GroupsController(object):
|
||||
''' Groups Controller /v1/groups '''
|
||||
"""Group Controller /v1/groups."""
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET,POST'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catch all for unallowed methods'''
|
||||
"""Catch all for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''List groups'''
|
||||
"""List groups."""
|
||||
groups_array = []
|
||||
for group in Group.query.all(): # pylint: disable=E1101
|
||||
groups_array.append(group)
|
||||
@ -295,7 +300,7 @@ class GroupsController(object):
|
||||
@index.when(method='POST', template='json')
|
||||
@validate(GROUPS_SCHEMA, '/errors/schema')
|
||||
def index_post(self, **kwargs):
|
||||
'''Create a group'''
|
||||
"""Create a group."""
|
||||
group_name = kwargs.get('name', None)
|
||||
description = kwargs.get('description', None)
|
||||
group_type = kwargs.get('type', None)
|
||||
@ -314,5 +319,5 @@ class GroupsController(object):
|
||||
|
||||
@expose()
|
||||
def _lookup(self, group_id, *remainder):
|
||||
'''Pecan subcontroller routing callback'''
|
||||
"""Pecan subcontroller routing callback."""
|
||||
return GroupsItemController(group_id), remainder
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Placements'''
|
||||
"""Placements."""
|
||||
|
||||
import logging
|
||||
|
||||
@ -32,50 +32,53 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlacementsItemController(object):
|
||||
''' Placements Item Controller /v1/placements/{placement_id} '''
|
||||
"""Placements Item Controller /v1/placements/{placement_id}."""
|
||||
|
||||
def __init__(self, uuid4):
|
||||
'''Initializer.'''
|
||||
"""Initializer."""
|
||||
self.uuid = uuid4
|
||||
self.placement = Placement.query.filter_by(id=self.uuid).first() # pylint: disable=E1101
|
||||
self.placement = Placement.query.filter_by(id=self.uuid).first()
|
||||
# pylint: disable=E1101
|
||||
if not self.placement:
|
||||
self.placement = Placement.query.filter_by(orchestration_id=self.uuid).first() # disable=E1101
|
||||
self.placement = Placement.query.filter_by(
|
||||
orchestration_id=self.uuid).first()
|
||||
# disable=E1101
|
||||
if not self.placement:
|
||||
error('/errors/not_found', _('Placement not found'))
|
||||
request.context['placement_id'] = self.placement.id
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET,POST,DELETE'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
''' Inspect a placement.
|
||||
"""Inspect a placement.
|
||||
|
||||
Use POST for reserving placements made by a scheduler.
|
||||
'''
|
||||
"""
|
||||
return {"placement": self.placement}
|
||||
|
||||
@index.when(method='POST', template='json')
|
||||
def index_post(self, **kwargs):
|
||||
''' Reserve a placement. This and other placements may be replanned.
|
||||
"""Reserve a placement. This and other placements may be replanned.
|
||||
|
||||
Once reserved, the location effectively becomes immutable.
|
||||
'''
|
||||
"""
|
||||
res_id = kwargs.get('resource_id')
|
||||
LOG.info(_('Placement reservation request for resource id '
|
||||
'%(res_id)s, orchestration id %(orch_id)s.'),
|
||||
@ -122,7 +125,8 @@ class PlacementsItemController(object):
|
||||
# We may get one or more updated placements in return.
|
||||
# One of those will be the original placement
|
||||
# we are trying to reserve.
|
||||
plan = Plan.query.filter_by(id=self.placement.plan_id).first() # pylint: disable=E1101
|
||||
plan = Plan.query.filter_by(id=self.placement.plan_id).first()
|
||||
# pylint: disable=E1101
|
||||
|
||||
args = {
|
||||
"stack_id": plan.stack_id,
|
||||
@ -151,7 +155,7 @@ class PlacementsItemController(object):
|
||||
|
||||
@index.when(method='DELETE', template='json')
|
||||
def index_delete(self):
|
||||
'''Delete a Placement'''
|
||||
"""Delete a Placement."""
|
||||
orch_id = self.placement.orchestration_id
|
||||
self.placement.delete()
|
||||
LOG.info(_('Placement with orchestration id %s deleted.'), orch_id)
|
||||
@ -159,29 +163,29 @@ class PlacementsItemController(object):
|
||||
|
||||
|
||||
class PlacementsController(object):
|
||||
''' Placements Controller /v1/placements '''
|
||||
"""Placements Controller /v1/placements."""
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Get placements.'''
|
||||
"""Get placements."""
|
||||
placements_array = []
|
||||
for placement in Placement.query.all(): # pylint: disable=E1101
|
||||
placements_array.append(placement)
|
||||
@ -189,5 +193,5 @@ class PlacementsController(object):
|
||||
|
||||
@expose()
|
||||
def _lookup(self, uuid4, *remainder):
|
||||
'''Pecan subcontroller routing callback'''
|
||||
"""Pecan subcontroller routing callback."""
|
||||
return PlacementsItemController(uuid4), remainder
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Plans'''
|
||||
"""Plans."""
|
||||
|
||||
import logging
|
||||
|
||||
@ -48,19 +48,22 @@ UPDATE_SCHEMA = (
|
||||
(decorators.optional('timeout'), types.string)
|
||||
)
|
||||
|
||||
|
||||
# pylint: disable=R0201
|
||||
|
||||
|
||||
class PlansItemController(object):
|
||||
''' Plans Item Controller /v1/plans/{plan_id} '''
|
||||
"""Plan Item Controller /v1/plans/{plan_id}."""
|
||||
|
||||
def __init__(self, uuid4):
|
||||
'''Initializer.'''
|
||||
"""Initializer."""
|
||||
self.uuid = uuid4
|
||||
self.plan = Plan.query.filter_by(id=self.uuid).first() # pylint: disable=E1101
|
||||
self.plan = Plan.query.filter_by(id=self.uuid).first()
|
||||
# pylint: disable=E1101
|
||||
|
||||
if not self.plan:
|
||||
self.plan = Plan.query.filter_by(stack_id=self.uuid).first() # pylint: disable=E1101
|
||||
self.plan = Plan.query.filter_by(stack_id=self.uuid).first()
|
||||
# pylint: disable=E1101
|
||||
|
||||
if not self.plan:
|
||||
error('/errors/not_found', _('Plan not found'))
|
||||
@ -68,32 +71,31 @@ class PlansItemController(object):
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET,PUT,DELETE'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Get plan'''
|
||||
"""Get plan."""
|
||||
return {"plan": self.plan}
|
||||
|
||||
@index.when(method='PUT', template='json')
|
||||
@validate(UPDATE_SCHEMA, '/errors/schema')
|
||||
def index_put(self, **kwargs):
|
||||
'''Update a Plan'''
|
||||
|
||||
"""Update a Plan."""
|
||||
action = kwargs.get('action')
|
||||
if action == 'migrate':
|
||||
# Replan the placement of an existing resource.
|
||||
@ -102,17 +104,24 @@ class PlansItemController(object):
|
||||
|
||||
# TODO(JD): Support replan of more than one existing resource
|
||||
if not isinstance(resources, list) or len(resources) != 1:
|
||||
error('/errors/invalid', _('resources must be a list of length 1.'))
|
||||
error('/errors/invalid',
|
||||
_('resources must be a list of length 1.'))
|
||||
|
||||
# We either got a resource or orchestration id.
|
||||
the_id = resources[0]
|
||||
placement = Placement.query.filter_by(resource_id=the_id).first() # pylint: disable=E1101
|
||||
placement = Placement.query.filter_by(resource_id=the_id).first()
|
||||
# pylint: disable=E1101
|
||||
if not placement:
|
||||
placement = Placement.query.filter_by(orchestration_id=the_id).first() # pylint: disable=E1101
|
||||
placement = Placement.query.filter_by(
|
||||
orchestration_id=the_id).first() # pylint: disable=E1101
|
||||
if not placement:
|
||||
error('/errors/invalid', _('Unknown resource or orchestration id: %s') % the_id)
|
||||
error('/errors/invalid', _('Unknown resource or '
|
||||
'orchestration id: %s') % the_id)
|
||||
|
||||
LOG.info(_('Migration request for resource id {0}, '
|
||||
'orchestration id {1}.').format(
|
||||
placement.resource_id, placement.orchestration_id))
|
||||
|
||||
LOG.info(_('Migration request for resource id {0}, orchestration id {1}.').format(placement.resource_id, placement.orchestration_id))
|
||||
args = {
|
||||
"stack_id": self.plan.stack_id,
|
||||
"excluded_hosts": excluded_hosts,
|
||||
@ -136,7 +145,8 @@ class PlansItemController(object):
|
||||
|
||||
# Flush so that the DB is current.
|
||||
self.plan.flush()
|
||||
self.plan = Plan.query.filter_by(stack_id=self.plan.stack_id).first() # pylint: disable=E1101
|
||||
self.plan = Plan.query.filter_by(
|
||||
stack_id=self.plan.stack_id).first() # pylint: disable=E1101
|
||||
LOG.info(_('Plan with stack id %s updated.'), self.plan.stack_id)
|
||||
return {"plan": self.plan}
|
||||
|
||||
@ -186,7 +196,7 @@ class PlansItemController(object):
|
||||
|
||||
@index.when(method='DELETE', template='json')
|
||||
def index_delete(self):
|
||||
'''Delete a Plan'''
|
||||
"""Delete a Plan."""
|
||||
for placement in self.plan.placements():
|
||||
placement.delete()
|
||||
stack_id = self.plan.stack_id
|
||||
@ -196,29 +206,29 @@ class PlansItemController(object):
|
||||
|
||||
|
||||
class PlansController(object):
|
||||
''' Plans Controller /v1/plans '''
|
||||
"""Plans Controller /v1/plans."""
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET,POST'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Get all the plans'''
|
||||
"""Get all the plans."""
|
||||
plans_array = []
|
||||
for plan in Plan.query.all(): # pylint: disable=E1101
|
||||
plans_array.append(plan)
|
||||
@ -227,7 +237,7 @@ class PlansController(object):
|
||||
@index.when(method='POST', template='json')
|
||||
@validate(CREATE_SCHEMA, '/errors/schema')
|
||||
def index_post(self):
|
||||
'''Create a Plan'''
|
||||
"""Create a Plan."""
|
||||
ostro = Ostro()
|
||||
args = request.json
|
||||
|
||||
@ -277,5 +287,5 @@ class PlansController(object):
|
||||
|
||||
@expose()
|
||||
def _lookup(self, uuid4, *remainder):
|
||||
'''Pecan subcontroller routing callback'''
|
||||
"""Pecan subcontroller routing callback."""
|
||||
return PlansItemController(uuid4), remainder
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Root'''
|
||||
"""Root."""
|
||||
|
||||
import logging
|
||||
|
||||
@ -31,32 +31,32 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RootController(object):
|
||||
''' Root Controller / '''
|
||||
"""Root Controller."""
|
||||
|
||||
errors = ErrorsController()
|
||||
v1 = V1Controller() # pylint: disable=C0103
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Get canonical URL for each version'''
|
||||
"""Get canonical URL for each version."""
|
||||
ver = {
|
||||
"versions":
|
||||
[
|
||||
@ -78,7 +78,7 @@ class RootController(object):
|
||||
|
||||
@error_wrapper
|
||||
def error(self, status):
|
||||
'''Error handler'''
|
||||
"""Error handler."""
|
||||
try:
|
||||
status = int(status)
|
||||
except ValueError: # pragma: no cover
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''Status'''
|
||||
"""Status."""
|
||||
|
||||
import logging
|
||||
|
||||
@ -28,11 +28,11 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StatusController(object):
|
||||
''' Status Controller /v1/status '''
|
||||
"""Status Controller /v1/status."""
|
||||
|
||||
@classmethod
|
||||
def _ping_ostro(cls):
|
||||
'''Ping Ostro'''
|
||||
"""Ping Ostro."""
|
||||
ostro = Ostro()
|
||||
ostro.ping()
|
||||
ostro.send()
|
||||
@ -40,7 +40,7 @@ class StatusController(object):
|
||||
|
||||
@classmethod
|
||||
def _ping(cls):
|
||||
'''Ping each subsystem.'''
|
||||
"""Ping each subsystem."""
|
||||
ostro_response = StatusController._ping_ostro()
|
||||
# TODO(JD): Ping Music plus any others.
|
||||
|
||||
@ -54,32 +54,31 @@ class StatusController(object):
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'HEAD,GET'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='HEAD', template='json')
|
||||
def index_head(self):
|
||||
'''Ping each subsystem and return summary response'''
|
||||
"""Ping each subsystem and return summary response."""
|
||||
self._ping() # pylint: disable=W0612
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Ping each subsystem and return detailed response'''
|
||||
|
||||
"""Ping each subsystem and return detailed response."""
|
||||
_response = self._ping()
|
||||
response.status = 200
|
||||
return _response
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''v1'''
|
||||
"""v1."""
|
||||
|
||||
import logging
|
||||
|
||||
@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class V1Controller(SecureController):
|
||||
''' v1 Controller /v1 '''
|
||||
"""v1 Controller /v1."""
|
||||
|
||||
groups = GroupsController()
|
||||
placements = PlacementsController()
|
||||
@ -46,7 +46,7 @@ class V1Controller(SecureController):
|
||||
|
||||
@classmethod
|
||||
def check_permissions(cls):
|
||||
'''SecureController permission check callback'''
|
||||
"""SecureController permission check callback."""
|
||||
token = None
|
||||
auth_token = request.headers.get('X-Auth-Token')
|
||||
msg = "Unauthorized - No auth token"
|
||||
@ -74,7 +74,10 @@ class V1Controller(SecureController):
|
||||
|
||||
@classmethod
|
||||
def _action_is_migrate(cls, request):
|
||||
return "plan" in request.path and hasattr(request, "json") and "action" in request.json and request.json["action"] == "migrate"
|
||||
return "plan" in request.path \
|
||||
and hasattr(request, "json") \
|
||||
and "action" in request.json \
|
||||
and request.json["action"] == "migrate"
|
||||
|
||||
@classmethod
|
||||
def _permission_granted(cls, request, token):
|
||||
@ -84,25 +87,25 @@ class V1Controller(SecureController):
|
||||
|
||||
@classmethod
|
||||
def allow(cls):
|
||||
'''Allowed methods'''
|
||||
"""Allowed methods."""
|
||||
return 'GET'
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
'''Catchall for unallowed methods'''
|
||||
"""Catchall for unallowed methods."""
|
||||
message = _('The %s method is not allowed.') % request.method
|
||||
kwargs = {'allow': self.allow()}
|
||||
error('/errors/not_allowed', message, **kwargs)
|
||||
|
||||
@index.when(method='OPTIONS', template='json')
|
||||
def index_options(self):
|
||||
'''Options'''
|
||||
"""Index Options."""
|
||||
response.headers['Allow'] = self.allow()
|
||||
response.status = 204
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def index_get(self):
|
||||
'''Get canonical URL for each endpoint'''
|
||||
"""Get canonical URL for each endpoint."""
|
||||
links = []
|
||||
for endpoint in V1Controller.endpoints:
|
||||
links.append({
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''WSGI Wrapper'''
|
||||
"""WSGI Wrapper."""
|
||||
|
||||
from common.i18n import _
|
||||
import os
|
||||
@ -21,7 +21,7 @@ from pecan.deploy import deploy
|
||||
|
||||
|
||||
def config_file(file_name=None):
|
||||
"""Returns absolute location of the config file"""
|
||||
"""Return absolute location of the config file."""
|
||||
file_name = file_name or 'config.py'
|
||||
_file = os.path.abspath(__file__)
|
||||
|
||||
@ -32,7 +32,7 @@ def config_file(file_name=None):
|
||||
|
||||
|
||||
def application(environ, start_response):
|
||||
"""Returns a WSGI app object"""
|
||||
"""Return a WSGI app object."""
|
||||
wsgi_app = deploy(config_file('prod.py'))
|
||||
return wsgi_app(environ, start_response)
|
||||
|
||||
@ -45,7 +45,8 @@ if __name__ == '__main__':
|
||||
from valet.api.conf import register_conf, set_domain
|
||||
register_conf()
|
||||
set_domain()
|
||||
HTTPD = make_server('', 8090, deploy(config_file('/var/www/valet/config.py')))
|
||||
HTTPD = make_server('', 8090,
|
||||
deploy(config_file('/var/www/valet/config.py')))
|
||||
print(_("Serving HTTP on port 8090..."))
|
||||
|
||||
# Respond to requests until process is killed
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Group Cli."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from oslo_config import cfg
|
||||
@ -23,81 +25,120 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class ResponseError(Exception):
|
||||
"""Response Error Exception."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(Exception):
|
||||
"""Connection Error Exception."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def print_verbose(verbose, url, headers, body, rest_cmd, timeout):
|
||||
"""Print verbose data."""
|
||||
# TODO(Chris Martin): Replace prints with logs
|
||||
if verbose:
|
||||
print("Sending Request:\nurl: %s\nheaders: %s\nbody: %s\ncmd: %s\ntimeout: %d\n"
|
||||
% (url, headers, body, rest_cmd.__name__ if rest_cmd is not None else None, timeout))
|
||||
print("Sending Request:\nurl: %s\nheaders: "
|
||||
"%s\nbody: %s\ncmd: %s\ntimeout: %d\n"
|
||||
% (url, headers, body,
|
||||
rest_cmd.__name__ if rest_cmd is not None else None, timeout))
|
||||
|
||||
|
||||
def pretty_print_json(json_thing, sort=True, indents=4):
|
||||
"""Print parser in nice format."""
|
||||
# TODO(Chris Martin): Replace prints with logs
|
||||
if type(json_thing) is str:
|
||||
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))
|
||||
print(json.dumps(json.loads(json_thing), sort_keys=sort,
|
||||
indent=indents))
|
||||
else:
|
||||
print(json.dumps(json_thing, sort_keys=sort, indent=indents))
|
||||
return None
|
||||
|
||||
|
||||
def add_to_parser(service_sub):
|
||||
"""Return parser."""
|
||||
parser = service_sub.add_parser('group', help='Group Management',
|
||||
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30,
|
||||
width=120))
|
||||
formatter_class=lambda
|
||||
prog: argparse.HelpFormatter(
|
||||
prog, max_help_position=30,
|
||||
width=120))
|
||||
parser.add_argument('--version', action='version', version='%(prog)s 1.1')
|
||||
parser.add_argument('--timeout', type=int, help='Set request timeout in seconds (default: 10)')
|
||||
parser.add_argument('--host', type=str, help='Hostname or ip of valet server')
|
||||
parser.add_argument('--timeout', type=int,
|
||||
help='Set request timeout in seconds (default: 10)')
|
||||
parser.add_argument('--host', type=str,
|
||||
help='Hostname or ip of valet server')
|
||||
parser.add_argument('--port', type=str, help='Port number of valet server')
|
||||
parser.add_argument('--os-tenant-name', type=str, help='Tenant name')
|
||||
parser.add_argument('--os-user-name', dest='os_username', type=str, help='Username')
|
||||
parser.add_argument('--os-user-name', dest='os_username', type=str,
|
||||
help='Username')
|
||||
parser.add_argument('--os-password', type=str, help="User's password")
|
||||
parser.add_argument('--verbose', '-v', help='Show details', action="store_true")
|
||||
parser.add_argument('--verbose', '-v', help='Show details',
|
||||
action="store_true")
|
||||
subparsers = parser.add_subparsers(dest='subcmd', metavar='<subcommand>')
|
||||
|
||||
# create group
|
||||
parser_create_group = subparsers.add_parser('create', help='Create new group.')
|
||||
parser_create_group = subparsers.add_parser('create',
|
||||
help='Create new group.')
|
||||
parser_create_group.add_argument('name', type=str, help='<GROUP_NAME>')
|
||||
parser_create_group.add_argument('type', type=str, help='<GROUP_TYPE> (exclusivity)')
|
||||
parser_create_group.add_argument('--description', type=str, help='<GROUP_DESCRIPTION>')
|
||||
parser_create_group.add_argument('type', type=str,
|
||||
help='<GROUP_TYPE> (exclusivity)')
|
||||
parser_create_group.add_argument('--description', type=str,
|
||||
help='<GROUP_DESCRIPTION>')
|
||||
|
||||
# delete group
|
||||
parser_delete_group = subparsers.add_parser('delete', help='Delete specified group.')
|
||||
parser_delete_group = subparsers.add_parser('delete',
|
||||
help='Delete specified group.')
|
||||
parser_delete_group.add_argument('groupid', type=str, help='<GROUP_ID>')
|
||||
|
||||
# delete group member
|
||||
parser_delete_group_member = subparsers.add_parser('delete-member', help='Delete members from specified group.')
|
||||
parser_delete_group_member.add_argument('groupid', type=str, help='<GROUP_ID>')
|
||||
parser_delete_group_member.add_argument('memberid', type=str, help='<MEMBER_ID>')
|
||||
parser_delete_group_member = subparsers.add_parser('delete-member',
|
||||
help='Delete member from'
|
||||
'specified group.')
|
||||
parser_delete_group_member.add_argument('groupid', type=str,
|
||||
help='<GROUP_ID>')
|
||||
parser_delete_group_member.add_argument('memberid', type=str,
|
||||
help='<MEMBER_ID>')
|
||||
|
||||
# delete all group members
|
||||
parser_delete_all_group_members = subparsers.add_parser('delete-all-members', help='Delete all members from '
|
||||
'specified group.')
|
||||
parser_delete_all_group_members.add_argument('groupid', type=str, help='<GROUP_ID>')
|
||||
parser_delete_all_group_members = subparsers.add_parser(
|
||||
'delete-all-members', help='Delete all members from '
|
||||
'specified group.')
|
||||
parser_delete_all_group_members.add_argument('groupid', type=str,
|
||||
help='<GROUP_ID>')
|
||||
|
||||
# list group
|
||||
subparsers.add_parser('list', help='List all groups.')
|
||||
|
||||
# show group details
|
||||
parser_show_group_details = subparsers.add_parser('show', help='Show details about the given group.')
|
||||
parser_show_group_details.add_argument('groupid', type=str, help='<GROUP_ID>')
|
||||
parser_show_group_details = subparsers.add_parser('show',
|
||||
help='Show details about'
|
||||
'the given group.')
|
||||
parser_show_group_details.add_argument('groupid', type=str,
|
||||
help='<GROUP_ID>')
|
||||
|
||||
# update group
|
||||
parser_update_group = subparsers.add_parser('update', help='Update group description.')
|
||||
parser_update_group = subparsers.add_parser('update',
|
||||
help='Update group'
|
||||
'description.')
|
||||
parser_update_group.add_argument('groupid', type=str, help='<GROUP_ID>')
|
||||
parser_update_group.add_argument('--description', type=str, help='<GROUP_DESCRIPTION>')
|
||||
parser_update_group.add_argument('--description', type=str,
|
||||
help='<GROUP_DESCRIPTION>')
|
||||
|
||||
parser_update_group_members = subparsers.add_parser('update-member', help='Update group members.')
|
||||
parser_update_group_members.add_argument('groupid', type=str, help='<GROUP_ID>')
|
||||
parser_update_group_members.add_argument('members', type=str, help='<MEMBER_ID>')
|
||||
parser_update_group_members = subparsers.add_parser('update-member',
|
||||
help='Update'
|
||||
'group members.')
|
||||
parser_update_group_members.add_argument('groupid', type=str,
|
||||
help='<GROUP_ID>')
|
||||
parser_update_group_members.add_argument('members', type=str,
|
||||
help='<MEMBER_ID>')
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def cmd_details(args):
|
||||
"""Command details."""
|
||||
if args.subcmd == 'create':
|
||||
return requests.post, ''
|
||||
elif args.subcmd == 'update':
|
||||
@ -105,21 +146,25 @@ def cmd_details(args):
|
||||
elif args.subcmd == 'update-member':
|
||||
return requests.put, '/%s/members' % args.groupid
|
||||
elif args.subcmd == 'delete':
|
||||
return requests.delete, '/%s' % (args.groupid)
|
||||
return requests.delete, '/%s' % args.groupid
|
||||
elif args.subcmd == 'delete-all-members':
|
||||
return requests.delete, '/%s/members' % (args.groupid)
|
||||
return requests.delete, '/%s/members' % args.groupid
|
||||
elif args.subcmd == 'delete-member':
|
||||
return requests.delete, '/%s/members/%s' % (args.groupid, args.memberid)
|
||||
elif args.subcmd == 'show':
|
||||
return requests.get, '/%s' % (args.groupid)
|
||||
return requests.get, '/%s' % args.groupid
|
||||
elif args.subcmd == 'list':
|
||||
return requests.get, ''
|
||||
|
||||
|
||||
def get_token(timeout, args):
|
||||
tenant_name = args.os_tenant_name if args.os_tenant_name else CONF.identity.project_name
|
||||
auth_name = args.os_username if args.os_username else CONF.identity.username
|
||||
password = args.os_password if args.os_password else CONF.identity.password
|
||||
"""Return JSON of access token id."""
|
||||
tenant_name = args.os_tenant_name if args.os_tenant_name \
|
||||
else CONF.identity.project_name
|
||||
auth_name = args.os_username if args.os_username \
|
||||
else CONF.identity.username
|
||||
password = args.os_password if args.os_password \
|
||||
else CONF.identity.password
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
@ -149,19 +194,23 @@ def get_token(timeout, args):
|
||||
|
||||
|
||||
def populate_args_request_body(args):
|
||||
"""Return JSON of filtered body dictionary."""
|
||||
body_args_list = ['name', 'type', 'description', 'members']
|
||||
# assign values to dictionary (if val exist). members will be assign as a list
|
||||
# assign values to dict (if val exist) members will be assign as a list
|
||||
body_dict = {}
|
||||
for body_arg in body_args_list:
|
||||
if hasattr(args, body_arg):
|
||||
body_dict[body_arg] = getattr(args, body_arg) if body_arg != 'members' else [getattr(args, body_arg)]
|
||||
body_dict[body_arg] = getattr(args, body_arg) \
|
||||
if body_arg != 'members' else [getattr(args, body_arg)]
|
||||
# remove keys without values
|
||||
filtered_body_dict = dict((k, v) for k, v in body_dict.iteritems() if v is not None)
|
||||
filtered_body_dict = dict(
|
||||
(k, v) for k, v in body_dict.iteritems() if v is not None)
|
||||
# check if dictionary is not empty, convert body dictionary to json format
|
||||
return json.dumps(filtered_body_dict) if bool(filtered_body_dict) else None
|
||||
|
||||
|
||||
def run(args):
|
||||
"""Run."""
|
||||
register_conf()
|
||||
set_domain(project='valet')
|
||||
args.host = args.host or CONF.server.host
|
||||
@ -177,23 +226,27 @@ def run(args):
|
||||
args.body = populate_args_request_body(args)
|
||||
|
||||
try:
|
||||
print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd, args.timeout)
|
||||
print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd,
|
||||
args.timeout)
|
||||
if args.body:
|
||||
resp = rest_cmd(args.url, timeout=args.timeout, data=args.body, headers=args.headers)
|
||||
resp = rest_cmd(args.url, timeout=args.timeout, data=args.body,
|
||||
headers=args.headers)
|
||||
else:
|
||||
resp = rest_cmd(args.url, timeout=args.timeout, headers=args.headers)
|
||||
resp = rest_cmd(args.url, timeout=args.timeout,
|
||||
headers=args.headers)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
exit(1)
|
||||
|
||||
if not 200 <= resp.status_code < 300:
|
||||
content = resp.json() if resp.status_code == 500 else ''
|
||||
print('API error: %s %s (Reason: %d)\n%s' % (rest_cmd.func_name.upper(), args.url, resp.status_code, content))
|
||||
print('API error: %s %s (Reason: %d)\n%s' % (
|
||||
rest_cmd.func_name.upper(), args.url, resp.status_code, content))
|
||||
exit(1)
|
||||
try:
|
||||
if resp.content:
|
||||
rj = resp.json()
|
||||
pretty_print_json(rj)
|
||||
except Exception as e:
|
||||
print (e)
|
||||
print(e)
|
||||
exit(1)
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Valet cli."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import valet.cli.groupcli as groupcli
|
||||
@ -20,27 +22,36 @@ import valet.cli.groupcli as groupcli
|
||||
|
||||
|
||||
class Cli(object):
|
||||
"""Cli."""
|
||||
|
||||
def __init__(self):
|
||||
"""Init cli."""
|
||||
self.args = None
|
||||
self.submod = None
|
||||
self.parser = None
|
||||
|
||||
def create_parser(self):
|
||||
self.parser = argparse.ArgumentParser(prog='valet', description='VALET REST CLI')
|
||||
service_sub = self.parser.add_subparsers(dest='service', metavar='<service>')
|
||||
"""Create parser."""
|
||||
self.parser = argparse.ArgumentParser(prog='valet',
|
||||
description='VALET REST CLI')
|
||||
service_sub = self.parser.add_subparsers(dest='service',
|
||||
metavar='<service>')
|
||||
self.submod = {'group': groupcli}
|
||||
for s in self.submod.values():
|
||||
s.add_to_parser(service_sub)
|
||||
|
||||
def parse(self, argv=sys.argv):
|
||||
"""Parse args."""
|
||||
sys.argv = argv
|
||||
self.args = self.parser.parse_args()
|
||||
|
||||
def logic(self):
|
||||
"""Logic."""
|
||||
self.submod[self.args.service].run(self.args)
|
||||
|
||||
|
||||
def main(argv):
|
||||
"""Main."""
|
||||
cli = Cli()
|
||||
cli.create_parser()
|
||||
cli.parse(argv)
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Conf."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from valet.api import conf as api
|
||||
|
||||
@ -28,53 +30,178 @@ ostro_cli_opts = [
|
||||
|
||||
engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf')
|
||||
engine_opts = [
|
||||
cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'),
|
||||
cfg.StrOpt('mode', default='live',
|
||||
help='sim will let Ostro simulate datacenter, while live will let it handle a real datacenter'),
|
||||
cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'),
|
||||
cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'),
|
||||
cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'),
|
||||
cfg.StrOpt('ip', default='localhost'),
|
||||
cfg.IntOpt('priority', default=1, help='this instance priority (master=1)'),
|
||||
cfg.StrOpt('rpc_server_ip', default='localhost',
|
||||
help='Set RPC server ip and port if used. Otherwise, ignore these parameters'),
|
||||
cfg.StrOpt('rpc_server_port', default='8002'),
|
||||
cfg.StrOpt('logger_name', default='engine.log'),
|
||||
cfg.StrOpt('logging_level', default='debug'),
|
||||
cfg.StrOpt('logging_dir', default='/var/log/valet/'),
|
||||
cfg.StrOpt('max_main_log_size', default=5000000),
|
||||
cfg.IntOpt('max_log_size', default=1000000),
|
||||
cfg.IntOpt('max_num_of_logs', default=20),
|
||||
cfg.StrOpt('datacenter_name', default='bigsite',
|
||||
help='Inform the name of datacenter (region name), where Valet/Ostro is deployed.'),
|
||||
cfg.IntOpt('num_of_region_chars', default='3', help='number of chars that indicates the region code'),
|
||||
cfg.StrOpt('rack_code_list', default='r', help='rack indicator.'),
|
||||
cfg.ListOpt('node_code_list', default='a,c,u,f,o,p,s',
|
||||
help='indicates the node type. a: network, c KVM compute, u: ESXi compute, f: ?, o: operation, '
|
||||
'p: power, s: storage.'),
|
||||
cfg.StrOpt('compute_trigger_time', default='1:00',
|
||||
help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'),
|
||||
cfg.IntOpt('compute_trigger_frequency', default=3600,
|
||||
help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'),
|
||||
cfg.StrOpt('topology_trigger_time', default='2:00',
|
||||
help='Set trigger time or frequency for checking datacenter topology'),
|
||||
cfg.IntOpt('topology_trigger_frequency', default=3600,
|
||||
help='Set trigger time or frequency for checking datacenter topology'),
|
||||
cfg.IntOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. '
|
||||
'Note that each compute node can have its own ratios'),
|
||||
cfg.IntOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. '
|
||||
'Note that each compute node can have its own ratios'),
|
||||
cfg.IntOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. '
|
||||
'Note that each compute node can have its own ratios'),
|
||||
cfg.IntOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||
'that are set aside for applications workload spikes.'),
|
||||
cfg.IntOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||
'that are set aside for applications workload spikes.'),
|
||||
cfg.IntOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||
'that are set aside for applications workload spikes.'),
|
||||
cfg.StrOpt(
|
||||
'pid',
|
||||
default='/var/run/valet/ostro-daemon.pid'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'mode',
|
||||
default='live',
|
||||
help="""
|
||||
Sim will let Ostro simulate datacenter, while live will
|
||||
let it handle a real datacenter.
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'sim_cfg_loc',
|
||||
default='/etc/valet/engine/ostro_sim.cfg'),
|
||||
cfg.BoolOpt(
|
||||
'network_control',
|
||||
default=False,
|
||||
help="""
|
||||
Whether network controller (i.e., Tegu) has been deployed
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'network_control_url',
|
||||
default='http://network_control:29444/tegu/api'),
|
||||
cfg.StrOpt(
|
||||
'ip',
|
||||
default='localhost'),
|
||||
cfg.IntOpt(
|
||||
'priority',
|
||||
default=1,
|
||||
help="""
|
||||
This instance priority (master=1)
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'rpc_server_ip',
|
||||
default='localhost',
|
||||
help="""
|
||||
Set RPC server ip and port if used. Otherwise, ignore these parameters
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'rpc_server_port',
|
||||
default='8002'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'logger_name',
|
||||
default='engine.log'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'logging_level',
|
||||
default='debug'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'logging_dir',
|
||||
default='/var/log/valet/'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'max_main_log_size',
|
||||
default=5000000
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'max_log_size',
|
||||
default=1000000
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'max_num_of_logs',
|
||||
default=20
|
||||
),
|
||||
cfg.StrOpt(
|
||||
'datacenter_name',
|
||||
default='bigsite',
|
||||
help="""
|
||||
Inform the name of datacenter (region name), where Valet/Ostro is deployed.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'num_of_region_chars',
|
||||
default='3',
|
||||
help="""
|
||||
Number of chars that indicates the region code
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'rack_code_list',
|
||||
default='r',
|
||||
help="""
|
||||
Rack indicator.
|
||||
"""),
|
||||
cfg.ListOpt(
|
||||
'node_code_list',
|
||||
default='a,c,u,f,o,p,s',
|
||||
help="""
|
||||
Indicates the node type.
|
||||
|
||||
Values:
|
||||
|
||||
* a: network
|
||||
* c KVM compute
|
||||
* u: ESXi compute
|
||||
* f: ?
|
||||
* o: operation
|
||||
* p: power
|
||||
* s: storage.
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'compute_trigger_time',
|
||||
default='1:00',
|
||||
help="""
|
||||
Trigger time or frequency for checking compute hosting server status
|
||||
(i.e., call Nova)
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'compute_trigger_frequency',
|
||||
default=3600,
|
||||
help="""
|
||||
Trigger time or frequency for checking compute hosting server status
|
||||
(i.e., call Nova).
|
||||
"""),
|
||||
cfg.StrOpt(
|
||||
'topology_trigger_time',
|
||||
default='2:00',
|
||||
help="""
|
||||
Set trigger time or frequency for checking datacenter topology.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'topology_trigger_frequency',
|
||||
default=3600,
|
||||
help="""
|
||||
Set trigger time or frequency for checking datacenter topology.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'default_cpu_allocation_ratio',
|
||||
default=16,
|
||||
help="""
|
||||
Set default overbooking ratios.
|
||||
Note that each compute node can have its own ratios.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'default_ram_allocation_ratio',
|
||||
default=1.5,
|
||||
help="""
|
||||
Set default overbooking ratios.
|
||||
Note that each compute node can have its own ratios.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'default_disk_allocation_ratio',
|
||||
default=1,
|
||||
help="""
|
||||
Set default overbooking ratios.
|
||||
Note that each compute node can have its own ratios.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'static_cpu_standby_ratio',
|
||||
default=20,
|
||||
help="""
|
||||
Unused percentages of resources (i.e. standby) that are set
|
||||
aside for applications workload spikes.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'static_mem_standby_ratio',
|
||||
default=20,
|
||||
help="""
|
||||
Unused percentages of resources (i.e. standby) that are set
|
||||
aside for applications workload spikes.
|
||||
"""),
|
||||
cfg.IntOpt(
|
||||
'static_local_disk_standby_ratio',
|
||||
default=20,
|
||||
help="""
|
||||
Unused percentages of resources (i.e. standby) that are set
|
||||
aside for applications workload spikes.
|
||||
"""),
|
||||
]
|
||||
|
||||
listener_group = cfg.OptGroup(name='events_listener', title='Valet Engine listener')
|
||||
listener_group = cfg.OptGroup(name='events_listener',
|
||||
title='Valet Engine listener')
|
||||
listener_opts = [
|
||||
cfg.StrOpt('exchange', default='nova'),
|
||||
cfg.StrOpt('exchange_type', default='topic'),
|
||||
@ -89,6 +216,7 @@ listener_opts = [
|
||||
|
||||
|
||||
def register_conf():
|
||||
"""Function calls api and registers configs opts."""
|
||||
api.register_conf()
|
||||
CONF.register_group(engine_group)
|
||||
CONF.register_opts(engine_opts, engine_group)
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2015-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Listener Manager."""
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
import pika
|
||||
@ -26,8 +28,10 @@ import yaml
|
||||
|
||||
|
||||
class ListenerManager(threading.Thread):
|
||||
"""Listener Manager Thread Class."""
|
||||
|
||||
def __init__(self, _t_id, _t_name, _config):
|
||||
"""Init."""
|
||||
threading.Thread.__init__(self)
|
||||
self.thread_id = _t_id
|
||||
self.thread_name = _t_name
|
||||
@ -36,14 +40,16 @@ class ListenerManager(threading.Thread):
|
||||
self.MUSIC = None
|
||||
|
||||
def run(self):
|
||||
'''Entry point
|
||||
"""Entry point.
|
||||
|
||||
Connect to localhost rabbitmq servers, use username:password@ipaddress:port.
|
||||
The port is typically 5672, and the default username and password are guest and guest.
|
||||
credentials = pika.PlainCredentials("guest", "PASSWORD")
|
||||
'''
|
||||
Connect to localhost rabbitmq servers, use
|
||||
username:password@ipaddress:port. The port is typically 5672,
|
||||
and the default username and password are guest and guest.
|
||||
credentials = pika.PlainCredentials("guest", "PASSWORD").
|
||||
"""
|
||||
try:
|
||||
self.listener_logger.info("ListenerManager: start " + self.thread_name + " ......")
|
||||
self.listener_logger.info("ListenerManager: start " +
|
||||
self.thread_name + " ......")
|
||||
|
||||
if self.config.events_listener.store:
|
||||
|
||||
@ -54,12 +60,20 @@ class ListenerManager(threading.Thread):
|
||||
}
|
||||
engine = Music(**kwargs)
|
||||
engine.create_keyspace(self.config.music.keyspace)
|
||||
self.MUSIC = {'engine': engine, 'keyspace': self.config.music.keyspace}
|
||||
self.listener_logger.debug('Storing in music on %s, keyspace %s' % (self.config.music.host, self.config.music.keyspace))
|
||||
self.MUSIC = {'engine': engine,
|
||||
'keyspace': self.config.music.keyspace}
|
||||
self.listener_logger.debug('Storing in music on %s, keyspace %s'
|
||||
% (self.config.music.host,
|
||||
self.config.music.keyspace))
|
||||
|
||||
self.listener_logger.debug('Connecting to %s, with %s' % (self.config.messaging.host, self.config.messaging.username))
|
||||
credentials = pika.PlainCredentials(self.config.messaging.username, self.config.messaging.password)
|
||||
parameters = pika.ConnectionParameters(self.config.messaging.host, self.config.messaging.port, '/', credentials)
|
||||
self.listener_logger.debug('Connecting to %s, with %s' %
|
||||
(self.config.messaging.host,
|
||||
self.config.messaging.username))
|
||||
credentials = pika.PlainCredentials(self.config.messaging.username,
|
||||
self.config.messaging.password)
|
||||
parameters = pika.ConnectionParameters(self.config.messaging.host,
|
||||
self.config.messaging.port,
|
||||
'/', credentials)
|
||||
|
||||
connection = pika.BlockingConnection(parameters)
|
||||
channel = connection.channel()
|
||||
@ -73,9 +87,9 @@ class ListenerManager(threading.Thread):
|
||||
# to receive. '#' is a wild card -- meaning receive all messages
|
||||
binding_key = "#"
|
||||
|
||||
# Check whether or not an exchange with the given name and type exists.
|
||||
# Check whether an exchange with the given name and type exists.
|
||||
# Make sure that the exchange is multicast "fanout" or "topic" type
|
||||
# otherwise our queue will consume the messages intended for other queues
|
||||
# otherwise queue will consume messages intended for other queues
|
||||
channel.exchange_declare(exchange=exchange_name,
|
||||
exchange_type=exchange_type,
|
||||
auto_delete=auto_delete)
|
||||
@ -85,8 +99,11 @@ class ListenerManager(threading.Thread):
|
||||
queue_name = result.method.queue
|
||||
|
||||
# Bind the queue to the selected exchange
|
||||
channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=binding_key)
|
||||
self.listener_logger.info('Channel is bound, listening on %s exchange %s', self.config.messaging.host, self.config.events_listener.exchange)
|
||||
channel.queue_bind(exchange=exchange_name, queue=queue_name,
|
||||
routing_key=binding_key)
|
||||
self.listener_logger.info('Channel is bound,listening on%s '
|
||||
'exchange %s', self.config.messaging.host,
|
||||
self.config.events_listener.exchange)
|
||||
|
||||
# Start consuming messages
|
||||
channel.basic_consume(self.on_message, queue_name)
|
||||
@ -103,8 +120,9 @@ class ListenerManager(threading.Thread):
|
||||
channel.close()
|
||||
connection.close()
|
||||
|
||||
def on_message(self, channel, method_frame, _, body): # pylint: disable=W0613
|
||||
'''Specify the action to be taken on a message received'''
|
||||
def on_message(self, channel, method_frame, _, body):
|
||||
"""Specify the action to be taken on a message received."""
|
||||
# pylint: disable=W0613
|
||||
message = yaml.load(body)
|
||||
try:
|
||||
if 'oslo.message' in message.keys():
|
||||
@ -115,12 +133,14 @@ class ListenerManager(threading.Thread):
|
||||
else:
|
||||
return
|
||||
|
||||
self.listener_logger.debug("\nMessage No: %s\n", method_frame.delivery_tag)
|
||||
self.listener_logger.debug("\nMessage No: %s\n",
|
||||
method_frame.delivery_tag)
|
||||
message_obj = yaml.load(body)
|
||||
if 'oslo.message' in message_obj.keys():
|
||||
message_obj = yaml.load(message_obj['oslo.message'])
|
||||
if self.config.events_listener.output_format == 'json':
|
||||
self.listener_logger.debug(json.dumps(message_obj, sort_keys=True, indent=2))
|
||||
self.listener_logger.debug(json.dumps(message_obj,
|
||||
sort_keys=True, indent=2))
|
||||
elif self.config.events_listener.output_format == 'yaml':
|
||||
self.listener_logger.debug(yaml.dump(message_obj))
|
||||
else:
|
||||
@ -131,25 +151,34 @@ class ListenerManager(threading.Thread):
|
||||
return
|
||||
|
||||
def is_message_wanted(self, message):
|
||||
''' Based on markers from Ostro, determine if this is a wanted message. '''
|
||||
"""Based on markers from Ostro.
|
||||
|
||||
Determine if this is a wanted message.
|
||||
"""
|
||||
method = message.get('method', None)
|
||||
args = message.get('args', None)
|
||||
|
||||
nova_props = {'nova_object.changes', 'nova_object.data', 'nova_object.name'}
|
||||
nova_props = {'nova_object.changes', 'nova_object.data',
|
||||
'nova_object.name'}
|
||||
args_props = {'filter_properties', 'instance'}
|
||||
|
||||
is_data = method and args
|
||||
is_nova = is_data and 'objinst' in args and nova_props.issubset(args['objinst'])
|
||||
is_nova = is_data and 'objinst' in args \
|
||||
and nova_props.issubset(args['objinst'])
|
||||
|
||||
action_instance = is_nova and method == 'object_action' and self.is_nova_name(args) and self.is_nova_state(args)
|
||||
action_instance = is_nova and method == 'object_action' \
|
||||
and self.is_nova_name(args) \
|
||||
and self.is_nova_state(args)
|
||||
|
||||
action_compute = is_nova and self.is_compute_name(args)
|
||||
create_instance = is_data and method == 'build_and_run_instance' and args_props.issubset(args) and 'nova_object.data' in args['instance']
|
||||
create_instance = is_data and method == 'build_and_run_instance' \
|
||||
and args_props.issubset(args) \
|
||||
and 'nova_object.data' in args['instance']
|
||||
|
||||
return action_instance or action_compute or create_instance
|
||||
|
||||
def store_message(self, message):
|
||||
'''Store message in Music'''
|
||||
"""Store message in Music."""
|
||||
timestamp = datetime.now().isoformat()
|
||||
args = json.dumps(message.get('args', None))
|
||||
exchange = self.config.events_listener.exchange
|
||||
@ -165,10 +194,14 @@ class ListenerManager(threading.Thread):
|
||||
OsloMessage(**kwargs) # pylint: disable=W0612
|
||||
|
||||
def is_nova_name(self, args):
|
||||
"""Return True if object name is Instance."""
|
||||
return args['objinst']['nova_object.name'] == 'Instance'
|
||||
|
||||
def is_nova_state(self, args):
|
||||
return args['objinst']['nova_object.data']['vm_state'] in ['deleted', 'active']
|
||||
"""Return True if object vm_state is deleted or active."""
|
||||
return args['objinst']['nova_object.data']['vm_state'] \
|
||||
in ['deleted', 'active']
|
||||
|
||||
def is_compute_name(self, args):
|
||||
"""Return True if object name is ComputeNode."""
|
||||
return args['objinst']['nova_object.name'] == 'ComputeNode'
|
||||
|
@ -1,19 +1,19 @@
|
||||
#
|
||||
# Copyright 2015-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''OsloMessage Database Model'''
|
||||
"""OsloMessage Database Model."""
|
||||
|
||||
# This is based on Music models used in Valet.
|
||||
|
||||
@ -21,6 +21,8 @@ import uuid
|
||||
|
||||
|
||||
class OsloMessage(object):
|
||||
"""OsloMessage class."""
|
||||
|
||||
__tablename__ = 'oslo_messages'
|
||||
|
||||
_database = None
|
||||
@ -32,7 +34,7 @@ class OsloMessage(object):
|
||||
|
||||
@classmethod
|
||||
def schema(cls):
|
||||
'''Return schema.'''
|
||||
"""Return schema."""
|
||||
schema = {
|
||||
'timestamp': 'text',
|
||||
'args': 'text',
|
||||
@ -44,13 +46,15 @@ class OsloMessage(object):
|
||||
|
||||
@classmethod
|
||||
def pk_name(cls):
|
||||
"""Return timestamp string."""
|
||||
return 'timestamp'
|
||||
|
||||
def pk_value(self):
|
||||
"""Return self.timestamp."""
|
||||
return self.timestamp
|
||||
|
||||
def insert(self):
|
||||
'''Insert row.'''
|
||||
"""Insert row."""
|
||||
keyspace = self._database.get('keyspace')
|
||||
kwargs = {
|
||||
'keyspace': keyspace,
|
||||
@ -66,6 +70,7 @@ class OsloMessage(object):
|
||||
engine.create_row(**kwargs)
|
||||
|
||||
def values(self):
|
||||
"""Return values."""
|
||||
return {
|
||||
'timestamp': self.timestamp,
|
||||
'args': self.args,
|
||||
@ -75,6 +80,7 @@ class OsloMessage(object):
|
||||
|
||||
def __init__(self, timestamp, args, exchange,
|
||||
method, database, _insert=True):
|
||||
"""Init."""
|
||||
self._database = database
|
||||
self.timestamp = timestamp
|
||||
self.args = args
|
||||
@ -84,6 +90,7 @@ class OsloMessage(object):
|
||||
self.insert()
|
||||
|
||||
def __json__(self):
|
||||
"""Return json."""
|
||||
json_ = {}
|
||||
json_['timestamp'] = self.timestamp
|
||||
json_['args'] = self.args
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""App Handler."""
|
||||
|
||||
import json
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology import AppTopology
|
||||
@ -22,14 +24,21 @@ from valet.engine.optimizer.util import util as util
|
||||
|
||||
|
||||
class AppHandler(object):
|
||||
"""App Handler Class.
|
||||
|
||||
This class handles operations for the management of applications.
|
||||
Functions related to adding apps and adding/removing them from
|
||||
placement and updating topology info.
|
||||
"""
|
||||
|
||||
def __init__(self, _resource, _db, _config, _logger):
|
||||
"""Init App Handler Class."""
|
||||
self.resource = _resource
|
||||
self.db = _db
|
||||
self.config = _config
|
||||
self.logger = _logger
|
||||
|
||||
''' current app requested, a temporary copy '''
|
||||
""" current app requested, a temporary copy """
|
||||
self.apps = {}
|
||||
|
||||
self.last_log_index = 0
|
||||
@ -37,6 +46,7 @@ class AppHandler(object):
|
||||
self.status = "success"
|
||||
|
||||
def add_app(self, _app_data):
|
||||
"""Add app and set or regenerate topology, return updated topology."""
|
||||
self.apps.clear()
|
||||
|
||||
app_topology = AppTopology(self.resource, self.logger)
|
||||
@ -60,10 +70,12 @@ class AppHandler(object):
|
||||
if action == "ping":
|
||||
self.logger.debug("AppHandler: got ping")
|
||||
elif action == "replan" or action == "migrate":
|
||||
re_app = self._regenerate_app_topology(stack_id, app, app_topology, action)
|
||||
re_app = self._regenerate_app_topology(stack_id, app,
|
||||
app_topology, action)
|
||||
if re_app is None:
|
||||
self.apps[stack_id] = None
|
||||
self.status = "cannot locate the original plan for stack = " + stack_id
|
||||
self.status = "cannot locate the original plan for " \
|
||||
"stack = " + stack_id
|
||||
return None
|
||||
|
||||
if action == "replan":
|
||||
@ -93,6 +105,7 @@ class AppHandler(object):
|
||||
return app_topology
|
||||
|
||||
def add_placement(self, _placement_map, _timestamp):
|
||||
"""Change requested apps to scheduled and place them."""
|
||||
for v in _placement_map.keys():
|
||||
if self.apps[v.app_uuid].status == "requested":
|
||||
self.apps[v.app_uuid].status = "scheduled"
|
||||
@ -116,11 +129,12 @@ class AppHandler(object):
|
||||
|
||||
def _store_app_placements(self):
|
||||
(app_logfile, last_index, mode) = util.get_last_logfile(
|
||||
self.config.app_log_loc, self.config.max_log_size, self.config.max_num_of_logs,
|
||||
self.resource.datacenter.name, self.last_log_index)
|
||||
self.config.app_log_loc, self.config.max_log_size,
|
||||
self.config.max_num_of_logs, self.resource.datacenter.name,
|
||||
self.last_log_index)
|
||||
self.last_log_index = last_index
|
||||
|
||||
# TODO: error handling
|
||||
# TODO(UNKNOWN): error handling
|
||||
|
||||
logging = open(self.config.app_log_loc + app_logfile, mode)
|
||||
|
||||
@ -141,19 +155,23 @@ class AppHandler(object):
|
||||
if self.db.add_app(appk, json_info) is False:
|
||||
return False
|
||||
|
||||
if self.db.update_app_log_index(self.resource.datacenter.name, self.last_log_index) is False:
|
||||
if self.db.update_app_log_index(self.resource.datacenter.name,
|
||||
self.last_log_index) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def remove_placement(self):
|
||||
"""Remove App from placement."""
|
||||
if self.db is not None:
|
||||
for appk, _ in self.apps.iteritems():
|
||||
if self.db.add_app(appk, None) is False:
|
||||
self.logger.error("AppHandler: error while adding app info to MUSIC")
|
||||
self.logger.error("AppHandler: error while adding app "
|
||||
"info to MUSIC")
|
||||
# NOTE: ignore?
|
||||
|
||||
def get_vm_info(self, _s_uuid, _h_uuid, _host):
|
||||
"""Return vm_info from database."""
|
||||
vm_info = {}
|
||||
|
||||
if _h_uuid is not None and _h_uuid != "none" and \
|
||||
@ -163,6 +181,7 @@ class AppHandler(object):
|
||||
return vm_info
|
||||
|
||||
def update_vm_info(self, _s_uuid, _h_uuid):
|
||||
"""Update vm info (the ids) in the database."""
|
||||
s_uuid_exist = bool(_s_uuid is not None and _s_uuid != "none")
|
||||
h_uuid_exist = bool(_h_uuid is not None and _h_uuid != "none")
|
||||
if s_uuid_exist and h_uuid_exist:
|
||||
@ -216,26 +235,32 @@ class AppHandler(object):
|
||||
|
||||
if _action == "replan":
|
||||
if vmk == _app["orchestration_id"]:
|
||||
_app_topology.candidate_list_map[vmk] = _app["locations"]
|
||||
_app_topology.candidate_list_map[vmk] = \
|
||||
_app["locations"]
|
||||
|
||||
self.logger.debug("AppHandler: re-requested vm = " + vm["name"] + " in")
|
||||
self.logger.debug("AppHandler: re-requested vm = " +
|
||||
vm["name"] + " in")
|
||||
for hk in _app["locations"]:
|
||||
self.logger.debug(" " + hk)
|
||||
|
||||
elif vmk in _app["exclusions"]:
|
||||
_app_topology.planned_vm_map[vmk] = vm["host"]
|
||||
|
||||
self.logger.debug("AppHandler: exception from replan = " + vm["name"])
|
||||
self.logger.debug("AppHandler: exception from "
|
||||
"replan = " + vm["name"])
|
||||
|
||||
elif _action == "migrate":
|
||||
if vmk == _app["orchestration_id"]:
|
||||
_app_topology.exclusion_list_map[vmk] = _app["excluded_hosts"]
|
||||
_app_topology.exclusion_list_map[vmk] = _app[
|
||||
"excluded_hosts"]
|
||||
if vm["host"] not in _app["excluded_hosts"]:
|
||||
_app_topology.exclusion_list_map[vmk].append(vm["host"])
|
||||
_app_topology.exclusion_list_map[vmk].append(
|
||||
vm["host"])
|
||||
else:
|
||||
_app_topology.planned_vm_map[vmk] = vm["host"]
|
||||
|
||||
_app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"], vm["mem"], vm["local_volume"])
|
||||
_app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"],
|
||||
vm["mem"], vm["local_volume"])
|
||||
|
||||
if "VGroups" in old_app.keys():
|
||||
for gk, affinity in old_app["VGroups"].iteritems():
|
||||
@ -251,14 +276,16 @@ class AppHandler(object):
|
||||
resources[gk]["properties"] = properties
|
||||
|
||||
if len(affinity["diversity_groups"]) > 0:
|
||||
for divk, level_name in affinity["diversity_groups"].iteritems():
|
||||
for divk, level_name in \
|
||||
affinity["diversity_groups"].iteritems():
|
||||
div_id = divk + ":" + level_name
|
||||
if div_id not in diversity_groups.keys():
|
||||
diversity_groups[div_id] = []
|
||||
diversity_groups[div_id].append(gk)
|
||||
|
||||
if len(affinity["exclusivity_groups"]) > 0:
|
||||
for exk, level_name in affinity["exclusivity_groups"].iteritems():
|
||||
for exk, level_name in \
|
||||
affinity["exclusivity_groups"].iteritems():
|
||||
ex_id = exk + ":" + level_name
|
||||
if ex_id not in exclusivity_groups.keys():
|
||||
exclusivity_groups[ex_id] = []
|
||||
@ -269,7 +296,8 @@ class AppHandler(object):
|
||||
for div_id, resource_list in diversity_groups.iteritems():
|
||||
divk_level_name = div_id.split(":")
|
||||
resources[divk_level_name[0]] = {}
|
||||
resources[divk_level_name[0]]["type"] = "ATT::Valet::GroupAssignment"
|
||||
resources[divk_level_name[0]]["type"] = \
|
||||
"ATT::Valet::GroupAssignment"
|
||||
properties = {}
|
||||
properties["group_type"] = "diversity"
|
||||
properties["group_name"] = divk_level_name[2]
|
||||
|
@ -1,41 +1,49 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""App Topology."""
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import VM, VGroup
|
||||
from valet.engine.optimizer.app_manager.app_topology_parser import Parser
|
||||
|
||||
|
||||
class AppTopology(object):
|
||||
"""App Topology Class.
|
||||
|
||||
This class contains functions for parsing and setting each app, as well as
|
||||
calculating and setting optimization.
|
||||
"""
|
||||
|
||||
def __init__(self, _resource, _logger):
|
||||
"""Init App Topology Class."""
|
||||
self.vgroups = {}
|
||||
self.vms = {}
|
||||
self.volumes = {}
|
||||
|
||||
''' for replan '''
|
||||
""" for replan """
|
||||
self.old_vm_map = {}
|
||||
self.planned_vm_map = {}
|
||||
self.candidate_list_map = {}
|
||||
|
||||
''' for migration-tip '''
|
||||
""" for migration-tip """
|
||||
self.exclusion_list_map = {}
|
||||
|
||||
self.resource = _resource
|
||||
self.logger = _logger
|
||||
|
||||
''' restriction of host naming convention '''
|
||||
""" restriction of host naming convention """
|
||||
high_level_allowed = True
|
||||
if "none" in self.resource.datacenter.region_code_list:
|
||||
high_level_allowed = False
|
||||
@ -51,15 +59,19 @@ class AppTopology(object):
|
||||
|
||||
self.status = "success"
|
||||
|
||||
''' parse and set each app '''
|
||||
def set_app_topology(self, _app_graph):
|
||||
"""Set app topology (Parse and set each app).
|
||||
|
||||
Set app topology by calling parser to determine vgroups,
|
||||
vms and volumes. Then return parsed stack_id, app_name and action.
|
||||
"""
|
||||
(vgroups, vms, volumes) = self.parser.set_topology(_app_graph)
|
||||
|
||||
if len(vgroups) == 0 and len(vms) == 0 and len(volumes) == 0:
|
||||
self.status = self.parser.status
|
||||
return None
|
||||
|
||||
''' cumulate virtual resources '''
|
||||
""" cumulate virtual resources """
|
||||
for _, vgroup in vgroups.iteritems():
|
||||
self.vgroups[vgroup.uuid] = vgroup
|
||||
for _, vm in vms.iteritems():
|
||||
@ -67,9 +79,11 @@ class AppTopology(object):
|
||||
for _, vol in volumes.iteritems():
|
||||
self.volumes[vol.uuid] = vol
|
||||
|
||||
return self.parser.stack_id, self.parser.application_name, self.parser.action
|
||||
return self.parser.stack_id, self.parser.application_name, \
|
||||
self.parser.action
|
||||
|
||||
def set_weight(self):
|
||||
"""Set weight of vms and vgroups."""
|
||||
for _, vm in self.vms.iteritems():
|
||||
self._set_vm_weight(vm)
|
||||
for _, vg in self.vgroups.iteritems():
|
||||
@ -87,19 +101,22 @@ class AppTopology(object):
|
||||
self._set_vm_weight(sg)
|
||||
else:
|
||||
if self.resource.CPU_avail > 0:
|
||||
_v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail)
|
||||
_v.vCPU_weight = float(_v.vCPUs) / \
|
||||
float(self.resource.CPU_avail)
|
||||
else:
|
||||
_v.vCPU_weight = 1.0
|
||||
self.total_CPU += _v.vCPUs
|
||||
|
||||
if self.resource.mem_avail > 0:
|
||||
_v.mem_weight = float(_v.mem) / float(self.resource.mem_avail)
|
||||
_v.mem_weight = float(_v.mem) / \
|
||||
float(self.resource.mem_avail)
|
||||
else:
|
||||
_v.mem_weight = 1.0
|
||||
self.total_mem += _v.mem
|
||||
|
||||
if self.resource.local_disk_avail > 0:
|
||||
_v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail)
|
||||
_v.local_volume_weight = float(_v.local_volume_size) / \
|
||||
float(self.resource.local_disk_avail)
|
||||
else:
|
||||
if _v.local_volume_size > 0:
|
||||
_v.local_volume_weight = 1.0
|
||||
@ -110,7 +127,8 @@ class AppTopology(object):
|
||||
bandwidth = _v.nw_bandwidth + _v.io_bandwidth
|
||||
|
||||
if self.resource.nw_bandwidth_avail > 0:
|
||||
_v.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail)
|
||||
_v.bandwidth_weight = float(bandwidth) / \
|
||||
float(self.resource.nw_bandwidth_avail)
|
||||
else:
|
||||
if bandwidth > 0:
|
||||
_v.bandwidth_weight = 1.0
|
||||
@ -129,8 +147,10 @@ class AppTopology(object):
|
||||
_vg.local_volume_size += sg.local_volume_size
|
||||
|
||||
def _set_vgroup_weight(self, _vgroup):
|
||||
"""Calculate weights for vgroup."""
|
||||
if self.resource.CPU_avail > 0:
|
||||
_vgroup.vCPU_weight = float(_vgroup.vCPUs) / float(self.resource.CPU_avail)
|
||||
_vgroup.vCPU_weight = float(_vgroup.vCPUs) / \
|
||||
float(self.resource.CPU_avail)
|
||||
else:
|
||||
if _vgroup.vCPUs > 0:
|
||||
_vgroup.vCPU_weight = 1.0
|
||||
@ -138,7 +158,8 @@ class AppTopology(object):
|
||||
_vgroup.vCPU_weight = 0.0
|
||||
|
||||
if self.resource.mem_avail > 0:
|
||||
_vgroup.mem_weight = float(_vgroup.mem) / float(self.resource.mem_avail)
|
||||
_vgroup.mem_weight = float(_vgroup.mem) / \
|
||||
float(self.resource.mem_avail)
|
||||
else:
|
||||
if _vgroup.mem > 0:
|
||||
_vgroup.mem_weight = 1.0
|
||||
@ -146,7 +167,8 @@ class AppTopology(object):
|
||||
_vgroup.mem_weight = 0.0
|
||||
|
||||
if self.resource.local_disk_avail > 0:
|
||||
_vgroup.local_volume_weight = float(_vgroup.local_volume_size) / float(self.resource.local_disk_avail)
|
||||
_vgroup.local_volume_weight = float(_vgroup.local_volume_size) / \
|
||||
float(self.resource.local_disk_avail)
|
||||
else:
|
||||
if _vgroup.local_volume_size > 0:
|
||||
_vgroup.local_volume_weight = 1.0
|
||||
@ -156,7 +178,8 @@ class AppTopology(object):
|
||||
bandwidth = _vgroup.nw_bandwidth + _vgroup.io_bandwidth
|
||||
|
||||
if self.resource.nw_bandwidth_avail > 0:
|
||||
_vgroup.bandwidth_weight = float(bandwidth) / float(self.resource.nw_bandwidth_avail)
|
||||
_vgroup.bandwidth_weight = float(bandwidth) / \
|
||||
float(self.resource.nw_bandwidth_avail)
|
||||
else:
|
||||
if bandwidth > 0:
|
||||
_vgroup.bandwidth_weight = 1.0
|
||||
@ -168,12 +191,20 @@ class AppTopology(object):
|
||||
self._set_vgroup_weight(svg)
|
||||
|
||||
def set_optimization_priority(self):
|
||||
if len(self.vgroups) == 0 and len(self.vms) == 0 and len(self.volumes) == 0:
|
||||
"""Set Optimization Priority.
|
||||
|
||||
This function calculates weights for bandwidth, cpu, memory, local
|
||||
and overall volume for an app. Then Sorts the results and sets
|
||||
optimization order accordingly.
|
||||
"""
|
||||
if len(self.vgroups) == 0 and len(self.vms) == 0 and \
|
||||
len(self.volumes) == 0:
|
||||
return
|
||||
|
||||
app_nw_bandwidth_weight = -1
|
||||
if self.resource.nw_bandwidth_avail > 0:
|
||||
app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / float(self.resource.nw_bandwidth_avail)
|
||||
app_nw_bandwidth_weight = float(self.total_nw_bandwidth) / \
|
||||
float(self.resource.nw_bandwidth_avail)
|
||||
else:
|
||||
if self.total_nw_bandwidth > 0:
|
||||
app_nw_bandwidth_weight = 1.0
|
||||
@ -182,7 +213,8 @@ class AppTopology(object):
|
||||
|
||||
app_CPU_weight = -1
|
||||
if self.resource.CPU_avail > 0:
|
||||
app_CPU_weight = float(self.total_CPU) / float(self.resource.CPU_avail)
|
||||
app_CPU_weight = float(self.total_CPU) / \
|
||||
float(self.resource.CPU_avail)
|
||||
else:
|
||||
if self.total_CPU > 0:
|
||||
app_CPU_weight = 1.0
|
||||
@ -191,7 +223,8 @@ class AppTopology(object):
|
||||
|
||||
app_mem_weight = -1
|
||||
if self.resource.mem_avail > 0:
|
||||
app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail)
|
||||
app_mem_weight = float(self.total_mem) / \
|
||||
float(self.resource.mem_avail)
|
||||
else:
|
||||
if self.total_mem > 0:
|
||||
app_mem_weight = 1.0
|
||||
@ -200,7 +233,8 @@ class AppTopology(object):
|
||||
|
||||
app_local_vol_weight = -1
|
||||
if self.resource.local_disk_avail > 0:
|
||||
app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail)
|
||||
app_local_vol_weight = float(self.total_local_vol) / \
|
||||
float(self.resource.local_disk_avail)
|
||||
else:
|
||||
if self.total_local_vol > 0:
|
||||
app_local_vol_weight = 1.0
|
||||
@ -213,7 +247,8 @@ class AppTopology(object):
|
||||
|
||||
app_vol_weight = -1
|
||||
if self.resource.disk_avail > 0:
|
||||
app_vol_weight = float(sum(total_vol_list)) / float(self.resource.disk_avail)
|
||||
app_vol_weight = float(sum(total_vol_list)) / \
|
||||
float(self.resource.disk_avail)
|
||||
else:
|
||||
if sum(total_vol_list) > 0:
|
||||
app_vol_weight = 1.0
|
||||
@ -226,4 +261,6 @@ class AppTopology(object):
|
||||
("lvol", app_local_vol_weight),
|
||||
("vol", app_vol_weight)]
|
||||
|
||||
self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True)
|
||||
self.optimization_priority = sorted(opt,
|
||||
key=lambda resource: resource[1],
|
||||
reverse=True)
|
||||
|
@ -1,24 +1,38 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""App Topology Base.
|
||||
|
||||
This file contains different datatype base classes to be used when
|
||||
buliding out app topology. These classes include VGroups, Volumes and Vms,
|
||||
as well as 'Link' classes for each.
|
||||
"""
|
||||
|
||||
LEVELS = ["host", "rack", "cluster"]
|
||||
|
||||
|
||||
class VGroup(object):
|
||||
"""VGroup Class.
|
||||
|
||||
This class represents a VGroup object (virtual group). It contains
|
||||
data about the volumes or vms it contains (such as compute resources),
|
||||
and data about the group itself (group type, etc).
|
||||
"""
|
||||
|
||||
def __init__(self, _app_uuid, _uuid):
|
||||
"""Init VGroup Class."""
|
||||
self.app_uuid = _app_uuid
|
||||
self.uuid = _uuid
|
||||
self.name = None
|
||||
@ -55,6 +69,7 @@ class VGroup(object):
|
||||
self.host = None
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info of VGroup Object."""
|
||||
survgroup_id = None
|
||||
if self.survgroup is None:
|
||||
survgroup_id = "none"
|
||||
@ -95,8 +110,14 @@ class VGroup(object):
|
||||
|
||||
|
||||
class VM(object):
|
||||
"""VM Class.
|
||||
|
||||
This class represents a Virtual Machine object. Examples of data this
|
||||
class contains are compute resources, the host, and status.
|
||||
"""
|
||||
|
||||
def __init__(self, _app_uuid, _uuid):
|
||||
"""Init VM Class."""
|
||||
self.app_uuid = _app_uuid
|
||||
self.uuid = _uuid
|
||||
self.name = None
|
||||
@ -129,6 +150,7 @@ class VM(object):
|
||||
self.host = None # where this vm is placed
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info for VM object."""
|
||||
survgroup_id = None
|
||||
if self.survgroup is None:
|
||||
survgroup_id = "none"
|
||||
@ -172,8 +194,15 @@ class VM(object):
|
||||
|
||||
|
||||
class Volume(object):
|
||||
"""Volume Class.
|
||||
|
||||
This class represents a volume, containing an app id and name, as well as
|
||||
a list of links to VMs and the groups it belongs to. This also contains
|
||||
data about the resources needed such as size, bandwidth and weight.
|
||||
"""
|
||||
|
||||
def __init__(self, _app_uuid, _uuid):
|
||||
"""Init Volume Class."""
|
||||
self.app_uuid = _app_uuid
|
||||
self.uuid = _uuid
|
||||
self.name = None
|
||||
@ -198,6 +227,7 @@ class Volume(object):
|
||||
self.storage_host = None
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info for a Volume."""
|
||||
survgroup_id = None
|
||||
if self.survgroup is None:
|
||||
survgroup_id = "none"
|
||||
@ -229,35 +259,53 @@ class Volume(object):
|
||||
|
||||
|
||||
class VGroupLink(object):
|
||||
"""VGroup Link Class.
|
||||
|
||||
This class represents a link between VGroups.
|
||||
"""
|
||||
|
||||
def __init__(self, _n):
|
||||
"""Init VGroup Link."""
|
||||
self.node = _n # target VM or Volume
|
||||
self.nw_bandwidth = 0
|
||||
self.io_bandwidth = 0
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info of VGroup Link Object."""
|
||||
return {'target': self.node.uuid,
|
||||
'nw_bandwidth': self.nw_bandwidth,
|
||||
'io_bandwidth': self.io_bandwidth}
|
||||
|
||||
|
||||
class VMLink(object):
|
||||
"""VM Link Class.
|
||||
|
||||
This class represents a link between VMs.
|
||||
"""
|
||||
|
||||
def __init__(self, _n):
|
||||
"""Init VM Link."""
|
||||
self.node = _n # target VM
|
||||
self.nw_bandwidth = 0 # Mbps
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info of VM Link Object."""
|
||||
return {'target': self.node.uuid,
|
||||
'nw_bandwidth': self.nw_bandwidth}
|
||||
|
||||
|
||||
class VolumeLink(object):
|
||||
"""Volume Link Class.
|
||||
|
||||
This class represents a link between volumes.
|
||||
"""
|
||||
|
||||
def __init__(self, _n):
|
||||
"""Init Volume Link."""
|
||||
self.node = _n # target Volume
|
||||
self.io_bandwidth = 0 # Mbps
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info of Volume Link Object."""
|
||||
return {'target': self.node.uuid,
|
||||
'io_bandwidth': self.io_bandwidth}
|
||||
|
@ -1,22 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupLink, VM, VMLink, LEVELS
|
||||
"""App Topology Parser.
|
||||
|
||||
|
||||
'''
|
||||
- Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV
|
||||
- VM/group cannot exist in multiple EX groups
|
||||
- Nested group's level cannot be higher than nesting group
|
||||
@ -26,12 +24,21 @@ from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VGroupL
|
||||
OS::Heat::Stack
|
||||
OS::Heat::ResourceGroup
|
||||
OS::Heat::ResourceGroup
|
||||
'''
|
||||
"""
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base \
|
||||
import VGroup, VGroupLink, VM, VMLink, LEVELS
|
||||
|
||||
|
||||
class Parser(object):
|
||||
"""Parser Class.
|
||||
|
||||
This class handles parsing out the data related to the desired
|
||||
topology from a template.
|
||||
"""
|
||||
|
||||
def __init__(self, _high_level_allowed, _logger):
|
||||
"""Init Parser Class."""
|
||||
self.logger = _logger
|
||||
|
||||
self.high_level_allowed = _high_level_allowed
|
||||
@ -44,6 +51,7 @@ class Parser(object):
|
||||
self.status = "success"
|
||||
|
||||
def set_topology(self, _graph):
|
||||
"""Return result of set_topology which parses input to get topology."""
|
||||
if "version" in _graph.keys():
|
||||
self.format_version = _graph["version"]
|
||||
else:
|
||||
@ -71,7 +79,7 @@ class Parser(object):
|
||||
vgroup_captured = False
|
||||
vms = {}
|
||||
|
||||
''' empty at this version '''
|
||||
""" empty at this version """
|
||||
volumes = {}
|
||||
|
||||
for rk, r in _elements.iteritems():
|
||||
@ -96,7 +104,8 @@ class Parser(object):
|
||||
self.logger.debug("Parser: get a vm = " + vm.name)
|
||||
|
||||
elif r["type"] == "OS::Cinder::Volume":
|
||||
self.logger.warn("Parser: do nothing for volume at this version")
|
||||
self.logger.warn("Parser: do nothing for volume at this "
|
||||
"version")
|
||||
|
||||
elif r["type"] == "ATT::Valet::GroupAssignment":
|
||||
vgroup = VGroup(self.stack_id, rk)
|
||||
@ -110,7 +119,8 @@ class Parser(object):
|
||||
elif r["properties"]["group_type"] == "exclusivity":
|
||||
vgroup.vgroup_type = "EX"
|
||||
else:
|
||||
self.status = "unknown group = " + r["properties"]["group_type"]
|
||||
self.status = "unknown group = " + \
|
||||
r["properties"]["group_type"]
|
||||
return {}, {}, {}
|
||||
else:
|
||||
self.status = "no group type"
|
||||
@ -129,8 +139,9 @@ class Parser(object):
|
||||
vgroup.level = r["properties"]["level"]
|
||||
if vgroup.level != "host":
|
||||
if self.high_level_allowed is False:
|
||||
self.status = "only host level of affinity group allowed " + \
|
||||
"due to the mis-match of host naming convention"
|
||||
self.status = "only host level of affinity group " \
|
||||
"allowed due to the mis-match of " \
|
||||
"host naming convention"
|
||||
return {}, {}, {}
|
||||
else:
|
||||
self.status = "no grouping level"
|
||||
@ -150,16 +161,19 @@ class Parser(object):
|
||||
|
||||
self.logger.debug("Parser: all vms parsed")
|
||||
|
||||
if self._merge_diversity_groups(_elements, vgroups, vms, volumes) is False:
|
||||
if self._merge_diversity_groups(_elements, vgroups, vms, volumes) \
|
||||
is False:
|
||||
return {}, {}, {}
|
||||
|
||||
if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) is False:
|
||||
if self._merge_exclusivity_groups(_elements, vgroups, vms, volumes) \
|
||||
is False:
|
||||
return {}, {}, {}
|
||||
|
||||
if self._merge_affinity_groups(_elements, vgroups, vms, volumes) is False:
|
||||
if self._merge_affinity_groups(_elements, vgroups, vms, volumes) \
|
||||
is False:
|
||||
return {}, {}, {}
|
||||
|
||||
''' delete all EX and DIV vgroups after merging '''
|
||||
""" delete all EX and DIV vgroups after merging """
|
||||
for vgk in vgroups.keys():
|
||||
vg = vgroups[vgk]
|
||||
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
|
||||
@ -186,13 +200,15 @@ class Parser(object):
|
||||
if vk2 in _vms.keys():
|
||||
link = VMLink(_vms[vk2])
|
||||
if "bandwidth" in r["properties"].keys():
|
||||
link.nw_bandwidth = r["properties"]["bandwidth"]["min"]
|
||||
link.nw_bandwidth = \
|
||||
r["properties"]["bandwidth"]["min"]
|
||||
vm.vm_list.append(link)
|
||||
|
||||
def _set_volume_links(self, _elements, _vms, _volumes):
|
||||
for rk, r in _elements.iteritems():
|
||||
if r["type"] == "OS::Cinder::VolumeAttachment":
|
||||
self.logger.warn("Parser: do nothing for volume attachment at this version")
|
||||
self.logger.warn("Parser: do nothing for volume attachment at "
|
||||
"this version")
|
||||
|
||||
return True
|
||||
|
||||
@ -219,23 +235,31 @@ class Parser(object):
|
||||
for vk in r["properties"]["resources"]:
|
||||
if vk in _vms.keys():
|
||||
vgroup.subvgroups[vk] = _vms[vk]
|
||||
_vms[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name
|
||||
_vms[vk].diversity_groups[rk] = \
|
||||
vgroup.level + ":" + vgroup.name
|
||||
elif vk in _volumes.keys():
|
||||
vgroup.subvgroups[vk] = _volumes[vk]
|
||||
_volumes[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name
|
||||
_volumes[vk].diversity_groups[rk] = \
|
||||
vgroup.level + ":" + vgroup.name
|
||||
elif vk in _vgroups.keys():
|
||||
vg = _vgroups[vk]
|
||||
|
||||
if LEVELS.index(vg.level) > LEVELS.index(level):
|
||||
self.status = "grouping scope: nested group's level is higher"
|
||||
self.status = "grouping scope: nested " \
|
||||
"group's level is higher"
|
||||
return False
|
||||
|
||||
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
|
||||
self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in diversity group at this version"
|
||||
if vg.vgroup_type == "DIV" or \
|
||||
vg.vgroup_type == "EX":
|
||||
self.status = "group type (" + \
|
||||
vg.vgroup_type + ") not allowd " \
|
||||
"to be nested in diversity " \
|
||||
"group at this version"
|
||||
return False
|
||||
|
||||
vgroup.subvgroups[vk] = vg
|
||||
vg.diversity_groups[rk] = vgroup.level + ":" + vgroup.name
|
||||
vg.diversity_groups[rk] = vgroup.level + ":" + \
|
||||
vgroup.name
|
||||
else:
|
||||
self.status = "invalid resource = " + vk
|
||||
return False
|
||||
@ -254,23 +278,34 @@ class Parser(object):
|
||||
for vk in r["properties"]["resources"]:
|
||||
if vk in _vms.keys():
|
||||
vgroup.subvgroups[vk] = _vms[vk]
|
||||
_vms[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name
|
||||
_vms[vk].exclusivity_groups[rk] = \
|
||||
vgroup.level + ":" + vgroup.name
|
||||
elif vk in _volumes.keys():
|
||||
vgroup.subvgroups[vk] = _volumes[vk]
|
||||
_volumes[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name
|
||||
_volumes[vk].exclusivity_groups[rk] = \
|
||||
vgroup.level + ":" + vgroup.name
|
||||
elif vk in _vgroups.keys():
|
||||
vg = _vgroups[vk]
|
||||
|
||||
if LEVELS.index(vg.level) > LEVELS.index(level):
|
||||
self.status = "grouping scope: nested group's level is higher"
|
||||
self.status = "grouping scope: nested " \
|
||||
"group's level is higher"
|
||||
return False
|
||||
|
||||
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
|
||||
self.status = "group type (" + vg.vgroup_type + ") not allowd to be nested in exclusivity group at this version"
|
||||
if vg.vgroup_type == "DIV" or \
|
||||
vg.vgroup_type == "EX":
|
||||
self.status = "group type (" + \
|
||||
vg.vgroup_type + ") not allowd " \
|
||||
"to be nested " \
|
||||
"in " \
|
||||
"exclusivity " \
|
||||
"group at " \
|
||||
"this version"
|
||||
return False
|
||||
|
||||
vgroup.subvgroups[vk] = vg
|
||||
vg.exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name
|
||||
vg.exclusivity_groups[rk] = vgroup.level + ":" + \
|
||||
vgroup.name
|
||||
else:
|
||||
self.status = "invalid resource = " + vk
|
||||
return False
|
||||
@ -278,7 +313,8 @@ class Parser(object):
|
||||
return True
|
||||
|
||||
def _merge_affinity_groups(self, _elements, _vgroups, _vms, _volumes):
|
||||
affinity_map = {} # key is uuid of vm, volume, or vgroup & value is its parent vgroup
|
||||
# key is uuid of vm, volume, or vgroup & value is its parent vgroup
|
||||
affinity_map = {}
|
||||
|
||||
for level in LEVELS:
|
||||
for rk, r in _elements.iteritems():
|
||||
@ -292,7 +328,8 @@ class Parser(object):
|
||||
else:
|
||||
continue
|
||||
|
||||
self.logger.debug("Parser: merge for affinity = " + vgroup.name)
|
||||
self.logger.debug("Parser: merge for affinity = " +
|
||||
vgroup.name)
|
||||
|
||||
for vk in r["properties"]["resources"]:
|
||||
|
||||
@ -302,8 +339,10 @@ class Parser(object):
|
||||
|
||||
affinity_map[vk] = vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(vgroup, _vms[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(vgroup, _vms[vk].exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
vgroup, _vms[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
vgroup, _vms[vk].exclusivity_groups)
|
||||
self._add_memberships(vgroup, _vms[vk])
|
||||
|
||||
del _vms[vk]
|
||||
@ -314,8 +353,10 @@ class Parser(object):
|
||||
|
||||
affinity_map[vk] = vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(vgroup, _volumes[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(vgroup, _volumes[vk].exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
vgroup, _volumes[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
vgroup, _volumes[vk].exclusivity_groups)
|
||||
self._add_memberships(vgroup, _volumes[vk])
|
||||
|
||||
del _volumes[vk]
|
||||
@ -324,19 +365,23 @@ class Parser(object):
|
||||
vg = _vgroups[vk]
|
||||
|
||||
if LEVELS.index(vg.level) > LEVELS.index(level):
|
||||
self.status = "grouping scope: nested group's level is higher"
|
||||
self.status = "grouping scope: nested " \
|
||||
"group's level is higher"
|
||||
return False
|
||||
|
||||
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
|
||||
if self._merge_subgroups(vgroup, vg.subvgroups, _vms, _volumes, _vgroups,
|
||||
_elements, affinity_map) is False:
|
||||
if self._merge_subgroups(
|
||||
vgroup, vg.subvgroups, _vms, _volumes,
|
||||
_vgroups, _elements, affinity_map) \
|
||||
is False:
|
||||
return False
|
||||
del _vgroups[vk]
|
||||
else:
|
||||
if self._exist_in_subgroups(vk, vgroup) is None:
|
||||
if self._get_subgroups(vg, _elements,
|
||||
_vgroups, _vms, _volumes,
|
||||
affinity_map) is False:
|
||||
if self._get_subgroups(
|
||||
vg, _elements, _vgroups, _vms,
|
||||
_volumes, affinity_map) \
|
||||
is False:
|
||||
return False
|
||||
|
||||
vgroup.subvgroups[vk] = vg
|
||||
@ -344,24 +389,29 @@ class Parser(object):
|
||||
|
||||
affinity_map[vk] = vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(vgroup, vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(vgroup, vg.exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
vgroup, vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
vgroup, vg.exclusivity_groups)
|
||||
self._add_memberships(vgroup, vg)
|
||||
|
||||
del _vgroups[vk]
|
||||
|
||||
else: # vk belongs to the other vgroup already or refer to invalid resource
|
||||
else:
|
||||
# vk belongs to the other vgroup already
|
||||
# or refer to invalid resource
|
||||
if vk not in affinity_map.keys():
|
||||
self.status = "invalid resource = " + vk
|
||||
return False
|
||||
|
||||
if affinity_map[vk].uuid != vgroup.uuid:
|
||||
if self._exist_in_subgroups(vk, vgroup) is None:
|
||||
self._set_implicit_grouping(vk, vgroup, affinity_map, _vgroups)
|
||||
self._set_implicit_grouping(
|
||||
vk, vgroup, affinity_map, _vgroups)
|
||||
|
||||
return True
|
||||
|
||||
def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups, _elements, _affinity_map):
|
||||
def _merge_subgroups(self, _vgroup, _subgroups, _vms, _volumes, _vgroups,
|
||||
_elements, _affinity_map):
|
||||
for vk, _ in _subgroups.iteritems():
|
||||
if vk in _vms.keys():
|
||||
_vgroup.subvgroups[vk] = _vms[vk]
|
||||
@ -369,8 +419,10 @@ class Parser(object):
|
||||
|
||||
_affinity_map[vk] = _vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_vgroup, _vms[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_vgroup, _vms[vk].exclusivity_groups)
|
||||
self._add_memberships(_vgroup, _vms[vk])
|
||||
|
||||
del _vms[vk]
|
||||
@ -381,8 +433,10 @@ class Parser(object):
|
||||
|
||||
_affinity_map[vk] = _vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_vgroup, _volumes[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_vgroup, _volumes[vk].exclusivity_groups)
|
||||
self._add_memberships(_vgroup, _volumes[vk])
|
||||
|
||||
del _volumes[vk]
|
||||
@ -391,7 +445,8 @@ class Parser(object):
|
||||
vg = _vgroups[vk]
|
||||
|
||||
if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level):
|
||||
self.status = "grouping scope: nested group's level is higher"
|
||||
self.status = "grouping scope: nested group's level is " \
|
||||
"higher"
|
||||
return False
|
||||
|
||||
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
|
||||
@ -402,7 +457,9 @@ class Parser(object):
|
||||
del _vgroups[vk]
|
||||
else:
|
||||
if self._exist_in_subgroups(vk, _vgroup) is None:
|
||||
if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False:
|
||||
if self._get_subgroups(vg, _elements, _vgroups, _vms,
|
||||
_volumes, _affinity_map) \
|
||||
is False:
|
||||
return False
|
||||
|
||||
_vgroup.subvgroups[vk] = vg
|
||||
@ -410,13 +467,16 @@ class Parser(object):
|
||||
|
||||
_affinity_map[vk] = _vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_vgroup, vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_vgroup, vg.exclusivity_groups)
|
||||
self._add_memberships(_vgroup, vg)
|
||||
|
||||
del _vgroups[vk]
|
||||
|
||||
else: # vk belongs to the other vgroup already or refer to invalid resource
|
||||
else:
|
||||
# vk belongs to the other vgroup already
|
||||
# or refer to invalid resource
|
||||
if vk not in _affinity_map.keys():
|
||||
self.status = "invalid resource = " + vk
|
||||
return False
|
||||
@ -427,7 +487,8 @@ class Parser(object):
|
||||
|
||||
return True
|
||||
|
||||
def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes, _affinity_map):
|
||||
def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _volumes,
|
||||
_affinity_map):
|
||||
|
||||
for vk in _elements[_vgroup.uuid]["properties"]["resources"]:
|
||||
|
||||
@ -437,8 +498,10 @@ class Parser(object):
|
||||
|
||||
_affinity_map[vk] = _vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_vgroup, _vms[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_vgroup, _vms[vk].exclusivity_groups)
|
||||
self._add_memberships(_vgroup, _vms[vk])
|
||||
|
||||
del _vms[vk]
|
||||
@ -449,8 +512,10 @@ class Parser(object):
|
||||
|
||||
_affinity_map[vk] = _vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(_vgroup, _volumes[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_vgroup, _volumes[vk].exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_vgroup, _volumes[vk].diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_vgroup, _volumes[vk].exclusivity_groups)
|
||||
self._add_memberships(_vgroup, _volumes[vk])
|
||||
|
||||
del _volumes[vk]
|
||||
@ -459,7 +524,8 @@ class Parser(object):
|
||||
vg = _vgroups[vk]
|
||||
|
||||
if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level):
|
||||
self.status = "grouping scope: nested group's level is higher"
|
||||
self.status = "grouping scope: nested group's level is " \
|
||||
"higher"
|
||||
return False
|
||||
|
||||
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
|
||||
@ -470,7 +536,9 @@ class Parser(object):
|
||||
del _vgroups[vk]
|
||||
else:
|
||||
if self._exist_in_subgroups(vk, _vgroup) is None:
|
||||
if self._get_subgroups(vg, _elements, _vgroups, _vms, _volumes, _affinity_map) is False:
|
||||
if self._get_subgroups(
|
||||
vg, _elements, _vgroups, _vms, _volumes,
|
||||
_affinity_map) is False:
|
||||
return False
|
||||
|
||||
_vgroup.subvgroups[vk] = vg
|
||||
@ -478,8 +546,10 @@ class Parser(object):
|
||||
|
||||
_affinity_map[vk] = _vgroup
|
||||
|
||||
self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_vgroup, vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_vgroup, vg.exclusivity_groups)
|
||||
self._add_memberships(_vgroup, vg)
|
||||
|
||||
del _vgroups[vk]
|
||||
@ -490,7 +560,8 @@ class Parser(object):
|
||||
|
||||
if _affinity_map[vk].uuid != _vgroup.uuid:
|
||||
if self._exist_in_subgroups(vk, _vgroup) is None:
|
||||
self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups)
|
||||
self._set_implicit_grouping(
|
||||
vk, _vgroup, _affinity_map, _vgroups)
|
||||
|
||||
return True
|
||||
|
||||
@ -529,26 +600,25 @@ class Parser(object):
|
||||
def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _vgroups):
|
||||
t_vg = _affinity_map[_vk] # where _vk currently belongs to
|
||||
|
||||
if t_vg.uuid in _affinity_map.keys(): # if the parent belongs to the other parent vgroup
|
||||
self._set_implicit_grouping(t_vg.uuid, _s_vg, _affinity_map, _vgroups)
|
||||
# if the parent belongs to the other parent vgroup
|
||||
if t_vg.uuid in _affinity_map.keys():
|
||||
self._set_implicit_grouping(
|
||||
t_vg.uuid, _s_vg, _affinity_map, _vgroups)
|
||||
|
||||
else:
|
||||
if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level):
|
||||
t_vg.level = _s_vg.level
|
||||
|
||||
'''
|
||||
self.status = "Grouping scope: sub-group's level is larger"
|
||||
return False
|
||||
'''
|
||||
|
||||
if self._exist_in_subgroups(t_vg.uuid, _s_vg) is None:
|
||||
_s_vg.subvgroups[t_vg.uuid] = t_vg
|
||||
t_vg.survgroup = _s_vg
|
||||
|
||||
_affinity_map[t_vg.uuid] = _s_vg
|
||||
|
||||
self._add_implicit_diversity_groups(_s_vg, t_vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(_s_vg, t_vg.exclusivity_groups)
|
||||
self._add_implicit_diversity_groups(
|
||||
_s_vg, t_vg.diversity_groups)
|
||||
self._add_implicit_exclusivity_groups(
|
||||
_s_vg, t_vg.exclusivity_groups)
|
||||
self._add_memberships(_s_vg, t_vg)
|
||||
|
||||
del _vgroups[t_vg.uuid]
|
||||
@ -567,16 +637,19 @@ class Parser(object):
|
||||
return containing_vg_uuid
|
||||
|
||||
def _set_vgroup_links(self, _vgroup, _vgroups, _vms, _volumes):
|
||||
for _, svg in _vgroup.subvgroups.iteritems(): # currently, not define vgroup itself in pipe
|
||||
for _, svg in _vgroup.subvgroups.iteritems():
|
||||
# currently, not define vgroup itself in pipe
|
||||
if isinstance(svg, VM):
|
||||
for vml in svg.vm_list:
|
||||
found = False
|
||||
for _, tvgroup in _vgroups.iteritems():
|
||||
containing_vg_uuid = self._exist_in_subgroups(vml.node.uuid, tvgroup)
|
||||
containing_vg_uuid = self._exist_in_subgroups(
|
||||
vml.node.uuid, tvgroup)
|
||||
if containing_vg_uuid is not None:
|
||||
found = True
|
||||
if containing_vg_uuid != _vgroup.uuid and \
|
||||
self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None:
|
||||
self._exist_in_subgroups(
|
||||
containing_vg_uuid, _vgroup) is None:
|
||||
self._add_nw_link(vml, _vgroup)
|
||||
break
|
||||
if found is False:
|
||||
@ -587,11 +660,13 @@ class Parser(object):
|
||||
for voll in svg.volume_list:
|
||||
found = False
|
||||
for _, tvgroup in _vgroups.iteritems():
|
||||
containing_vg_uuid = self._exist_in_subgroups(voll.node.uuid, tvgroup)
|
||||
containing_vg_uuid = self._exist_in_subgroups(
|
||||
voll.node.uuid, tvgroup)
|
||||
if containing_vg_uuid is not None:
|
||||
found = True
|
||||
if containing_vg_uuid != _vgroup.uuid and \
|
||||
self._exist_in_subgroups(containing_vg_uuid, _vgroup) is None:
|
||||
self._exist_in_subgroups(
|
||||
containing_vg_uuid, _vgroup) is None:
|
||||
self._add_io_link(voll, _vgroup)
|
||||
break
|
||||
if found is False:
|
||||
@ -603,7 +678,8 @@ class Parser(object):
|
||||
self._set_vgroup_links(svg, _vgroups, _vms, _volumes)
|
||||
|
||||
for svgl in svg.vgroup_list: # svgl is a link to VM or Volume
|
||||
if self._exist_in_subgroups(svgl.node.uuid, _vgroup) is None:
|
||||
if self._exist_in_subgroups(svgl.node.uuid, _vgroup) \
|
||||
is None:
|
||||
self._add_nw_link(svgl, _vgroup)
|
||||
self._add_io_link(svgl, _vgroup)
|
||||
|
||||
|
@ -1,21 +1,30 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""App."""
|
||||
|
||||
|
||||
class App(object):
|
||||
"""App Class.
|
||||
|
||||
This class represents an app object that consists of the name and id of
|
||||
the app, as well as the status and vms/volumes/vgroups it belogns to.
|
||||
"""
|
||||
|
||||
def __init__(self, _app_id, _app_name, _action):
|
||||
"""Init App."""
|
||||
self.app_id = _app_id
|
||||
self.app_name = _app_name
|
||||
|
||||
@ -30,21 +39,25 @@ class App(object):
|
||||
self.status = 'requested' # Moved to "scheduled" (and then "placed")
|
||||
|
||||
def add_vm(self, _vm, _host_name):
|
||||
"""Add vm to app, set status to scheduled."""
|
||||
self.vms[_vm.uuid] = _vm
|
||||
self.vms[_vm.uuid].status = "scheduled"
|
||||
self.vms[_vm.uuid].host = _host_name
|
||||
|
||||
def add_volume(self, _vol, _host_name):
|
||||
"""Add volume to app, set status to scheduled."""
|
||||
self.vms[_vol.uuid] = _vol
|
||||
self.vms[_vol.uuid].status = "scheduled"
|
||||
self.vms[_vol.uuid].storage_host = _host_name
|
||||
|
||||
def add_vgroup(self, _vg, _host_name):
|
||||
"""Add vgroup to app, set status to scheduled."""
|
||||
self.vgroups[_vg.uuid] = _vg
|
||||
self.vgroups[_vg.uuid].status = "scheduled"
|
||||
self.vgroups[_vg.uuid].host = _host_name
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info of App including vms, vols and vgs."""
|
||||
vms = {}
|
||||
for vmk, vm in self.vms.iteritems():
|
||||
vms[vmk] = vm.get_json_info()
|
||||
@ -66,6 +79,7 @@ class App(object):
|
||||
'VGroups': vgs}
|
||||
|
||||
def log_in_info(self):
|
||||
"""Return in info related to login (time of login, app name, etc)."""
|
||||
return {'action': self.request_type,
|
||||
'timestamp': self.timestamp_scheduled,
|
||||
'stack_id': self.app_id,
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Version 2.0.2: Feb. 9, 2016
|
||||
# Version 2.0.2:
|
||||
|
||||
# Set database keyspace
|
||||
db_keyspace=valet_test
|
||||
@ -12,6 +12,3 @@ db_app_table=app
|
||||
db_uuid_table=uuid_map
|
||||
|
||||
#replication_factor=3
|
||||
|
||||
|
||||
|
||||
|
@ -1,24 +1,32 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Configuration."""
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
class Config(object):
|
||||
"""Config Class.
|
||||
|
||||
This class consists of one function that reads client config options
|
||||
from a file and sets the corresponding config variables of this class.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Init Config class."""
|
||||
self.mode = None
|
||||
|
||||
self.db_keyspace = None
|
||||
@ -32,6 +40,7 @@ class Config(object):
|
||||
self.db_uuid_table = None
|
||||
|
||||
def configure(self):
|
||||
"""Read client config file for config options and return success."""
|
||||
try:
|
||||
f = open("./client.cfg", "r")
|
||||
line = f.readline()
|
||||
|
@ -1,24 +1,33 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Event."""
|
||||
|
||||
import json
|
||||
|
||||
|
||||
class Event(object):
|
||||
"""Event Class.
|
||||
|
||||
This class represents an event and all the necessary metadata to
|
||||
properly track it and set the data for the event. Handles object_action
|
||||
events and build and run instance events.
|
||||
"""
|
||||
|
||||
def __init__(self, _id):
|
||||
"""Init Event Class."""
|
||||
self.event_id = _id
|
||||
self.exchange = None
|
||||
self.method = None
|
||||
@ -56,6 +65,13 @@ class Event(object):
|
||||
self.uuid = None
|
||||
|
||||
def set_data(self):
|
||||
"""Set event data depending on method(action) performed.
|
||||
|
||||
- If object_action, change data and calculate correct
|
||||
compute resources for instance or Compute Node.
|
||||
- If building_and_run_instance, get data from scheduler
|
||||
and set heat values.
|
||||
"""
|
||||
if self.method == 'object_action':
|
||||
self.change_list = self.args['objinst']['nova_object.changes']
|
||||
self.change_data = self.args['objinst']['nova_object.data']
|
||||
@ -103,33 +119,43 @@ class Event(object):
|
||||
if 'host' in self.change_data.keys():
|
||||
self.host = self.change_data['host']
|
||||
|
||||
if 'deleted' in self.change_list and 'deleted' in self.change_data.keys():
|
||||
if self.change_data['deleted'] == "true" or self.change_data['deleted'] is True:
|
||||
if 'deleted' in self.change_list and 'deleted' in \
|
||||
self.change_data.keys():
|
||||
if self.change_data['deleted'] == "true" or \
|
||||
self.change_data['deleted'] is True:
|
||||
self.status = "disabled"
|
||||
|
||||
if 'vcpus' in self.change_list and 'vcpus' in self.change_data.keys():
|
||||
if 'vcpus' in self.change_list and 'vcpus' in \
|
||||
self.change_data.keys():
|
||||
self.vcpus = self.change_data['vcpus']
|
||||
|
||||
if 'vcpus_used' in self.change_list and 'vcpus_used' in self.change_data.keys():
|
||||
if 'vcpus_used' in self.change_list and 'vcpus_used' in \
|
||||
self.change_data.keys():
|
||||
self.vcpus_used = self.change_data['vcpus_used']
|
||||
|
||||
if 'memory_mb' in self.change_list and 'memory_mb' in self.change_data.keys():
|
||||
if 'memory_mb' in self.change_list and 'memory_mb' in \
|
||||
self.change_data.keys():
|
||||
self.mem = self.change_data['memory_mb']
|
||||
|
||||
if 'free_ram_mb' in self.change_list and 'free_ram_mb' in self.change_data.keys():
|
||||
if 'free_ram_mb' in self.change_list and 'free_ram_mb' in \
|
||||
self.change_data.keys():
|
||||
self.free_mem = self.change_data['free_ram_mb']
|
||||
|
||||
if 'local_gb' in self.change_list and 'local_gb' in self.change_data.keys():
|
||||
if 'local_gb' in self.change_list and 'local_gb' in \
|
||||
self.change_data.keys():
|
||||
self.local_disk = self.change_data['local_gb']
|
||||
|
||||
if 'free_disk_gb' in self.change_list and 'free_disk_gb' in self.change_data.keys():
|
||||
if 'free_disk_gb' in self.change_list and 'free_disk_gb' in \
|
||||
self.change_data.keys():
|
||||
self.free_local_disk = self.change_data['free_disk_gb']
|
||||
|
||||
if 'disk_available_least' in self.change_list and \
|
||||
'disk_available_least' in self.change_data.keys():
|
||||
self.disk_available_least = self.change_data['disk_available_least']
|
||||
self.disk_available_least = \
|
||||
self.change_data['disk_available_least']
|
||||
|
||||
if 'numa_topology' in self.change_list and 'numa_topology' in self.change_data.keys():
|
||||
if 'numa_topology' in self.change_list and 'numa_topology' in \
|
||||
self.change_data.keys():
|
||||
str_numa_topology = self.change_data['numa_topology']
|
||||
try:
|
||||
numa_topology = json.loads(str_numa_topology)
|
||||
@ -137,7 +163,10 @@ class Event(object):
|
||||
|
||||
if 'nova_object.data' in numa_topology.keys():
|
||||
if 'cells' in numa_topology['nova_object.data']:
|
||||
for cell in numa_topology['nova_object.data']['cells']:
|
||||
for cell in \
|
||||
numa_topology[
|
||||
'nova_object.data'
|
||||
]['cells']:
|
||||
self.numa_cell_list.append(cell)
|
||||
|
||||
except (ValueError, KeyError, TypeError):
|
||||
@ -146,13 +175,18 @@ class Event(object):
|
||||
|
||||
elif self.method == 'build_and_run_instance':
|
||||
if 'scheduler_hints' in self.args['filter_properties'].keys():
|
||||
scheduler_hints = self.args['filter_properties']['scheduler_hints']
|
||||
scheduler_hints = self.args[
|
||||
'filter_properties'
|
||||
]['scheduler_hints']
|
||||
if 'heat_resource_name' in scheduler_hints.keys():
|
||||
self.heat_resource_name = scheduler_hints['heat_resource_name']
|
||||
self.heat_resource_name = \
|
||||
scheduler_hints['heat_resource_name']
|
||||
if 'heat_resource_uuid' in scheduler_hints.keys():
|
||||
self.heat_resource_uuid = scheduler_hints['heat_resource_uuid']
|
||||
self.heat_resource_uuid = \
|
||||
scheduler_hints['heat_resource_uuid']
|
||||
if 'heat_root_stack_id' in scheduler_hints.keys():
|
||||
self.heat_root_stack_id = scheduler_hints['heat_root_stack_id']
|
||||
self.heat_root_stack_id = \
|
||||
scheduler_hints['heat_root_stack_id']
|
||||
if 'heat_stack_name' in scheduler_hints.keys():
|
||||
self.heat_stack_name = scheduler_hints['heat_stack_name']
|
||||
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Music Handler."""
|
||||
|
||||
import json
|
||||
import operator
|
||||
from valet.api.db.models.music import Music
|
||||
@ -20,8 +22,14 @@ from valet.engine.optimizer.db_connect.event import Event
|
||||
|
||||
|
||||
class MusicHandler(object):
|
||||
"""Music Handler Class.
|
||||
|
||||
This Class consists of functions that interact with the music
|
||||
database for valet and returns/deletes/updates objects within it.
|
||||
"""
|
||||
|
||||
def __init__(self, _config, _logger):
|
||||
"""Init Music Handler."""
|
||||
self.config = _config
|
||||
self.logger = _logger
|
||||
|
||||
@ -32,9 +40,17 @@ class MusicHandler(object):
|
||||
if self.config.mode.startswith("sim"):
|
||||
self.music = Music()
|
||||
elif self.config.mode.startswith("live"):
|
||||
self.music = Music(hosts=self.config.db_hosts, replication_factor=self.config.replication_factor)
|
||||
self.music = Music(
|
||||
hosts=self.config.db_hosts,
|
||||
replication_factor=self.config.replication_factor)
|
||||
|
||||
def init_db(self):
|
||||
"""Init Database.
|
||||
|
||||
This function initializes a database in Music by creating all the
|
||||
necessary tables with the proper schemas in Music using API calls.
|
||||
Return True if no exceptions are caught.
|
||||
"""
|
||||
self.logger.info("MusicHandler.init_db: create table")
|
||||
|
||||
try:
|
||||
@ -51,7 +67,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(stack_id)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_request_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_request_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -62,7 +79,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(stack_id)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_response_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_response_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -75,7 +93,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(timestamp)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_event_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_event_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -86,7 +105,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(site_name)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_resource_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_resource_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -97,7 +117,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(stack_id)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_app_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_app_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -108,7 +129,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(site_name)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_app_index_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_app_index_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -119,7 +141,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(site_name)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_resource_index_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_resource_index_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -131,7 +154,8 @@ class MusicHandler(object):
|
||||
'PRIMARY KEY': '(uuid)'
|
||||
}
|
||||
try:
|
||||
self.music.create_table(self.config.db_keyspace, self.config.db_uuid_table, schema)
|
||||
self.music.create_table(self.config.db_keyspace,
|
||||
self.config.db_uuid_table, schema)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -139,11 +163,18 @@ class MusicHandler(object):
|
||||
return True
|
||||
|
||||
def get_events(self):
|
||||
"""Get Events.
|
||||
|
||||
This function obtains all events from the database and then
|
||||
iterates through all of them to check the method and perform the
|
||||
corresponding action on them. Return Event list.
|
||||
"""
|
||||
event_list = []
|
||||
|
||||
events = {}
|
||||
try:
|
||||
events = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table)
|
||||
events = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_event_table)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while reading events: " + str(e))
|
||||
return None
|
||||
@ -155,30 +186,37 @@ class MusicHandler(object):
|
||||
method = row['method']
|
||||
args_data = row['args']
|
||||
|
||||
self.logger.debug("MusicHandler.get_events: event (" + event_id + ") is entered")
|
||||
self.logger.debug("MusicHandler.get_events: event (" +
|
||||
event_id + ") is entered")
|
||||
|
||||
if exchange != "nova":
|
||||
if self.delete_event(event_id) is False:
|
||||
return None
|
||||
self.logger.debug("MusicHandler.get_events: event exchange (" + exchange + ") is not supported")
|
||||
self.logger.debug("MusicHandler.get_events: event exchange "
|
||||
"(" + exchange + ") is not supported")
|
||||
continue
|
||||
|
||||
if method != 'object_action' and method != 'build_and_run_instance':
|
||||
if method != 'object_action' and method != 'build_and_run_' \
|
||||
'instance':
|
||||
if self.delete_event(event_id) is False:
|
||||
return None
|
||||
self.logger.debug("MusicHandler.get_events: event method (" + method + ") is not considered")
|
||||
self.logger.debug("MusicHandler.get_events: event method "
|
||||
"(" + method + ") is not considered")
|
||||
continue
|
||||
|
||||
if len(args_data) == 0:
|
||||
if self.delete_event(event_id) is False:
|
||||
return None
|
||||
self.logger.debug("MusicHandler.get_events: event does not have args")
|
||||
self.logger.debug("MusicHandler.get_events: event does not "
|
||||
"have args")
|
||||
continue
|
||||
|
||||
try:
|
||||
args = json.loads(args_data)
|
||||
except (ValueError, KeyError, TypeError):
|
||||
self.logger.warn("MusicHandler.get_events: error while decoding to JSON event = " + method + ":" + event_id)
|
||||
self.logger.warn("MusicHandler.get_events: error while "
|
||||
"decoding to JSON event = " + method +
|
||||
":" + event_id)
|
||||
continue
|
||||
|
||||
if method == 'object_action':
|
||||
@ -193,15 +231,19 @@ class MusicHandler(object):
|
||||
change_data = objinst['nova_object.data']
|
||||
if 'vm_state' in change_list and \
|
||||
'vm_state' in change_data.keys():
|
||||
if change_data['vm_state'] == 'deleted' or \
|
||||
change_data['vm_state'] == 'active':
|
||||
if change_data['vm_state'] == \
|
||||
'deleted' \
|
||||
or change_data[
|
||||
'vm_state'
|
||||
] == 'active':
|
||||
e = Event(event_id)
|
||||
e.exchange = exchange
|
||||
e.method = method
|
||||
e.args = args
|
||||
event_list.append(e)
|
||||
else:
|
||||
if self.delete_event(event_id) is False:
|
||||
if self.delete_event(event_id) \
|
||||
is False:
|
||||
return None
|
||||
else:
|
||||
if self.delete_event(event_id) is False:
|
||||
@ -257,7 +299,8 @@ class MusicHandler(object):
|
||||
for e in event_list:
|
||||
e.set_data()
|
||||
|
||||
self.logger.debug("MusicHandler.get_events: event (" + e.event_id + ") is parsed")
|
||||
self.logger.debug("MusicHandler.get_events: event (" +
|
||||
e.event_id + ") is parsed")
|
||||
|
||||
if e.method == "object_action":
|
||||
if e.object_name == 'Instance':
|
||||
@ -265,17 +308,20 @@ class MusicHandler(object):
|
||||
e.host is None or e.host == "none" or \
|
||||
e.vcpus == -1 or e.mem == -1:
|
||||
error_event_list.append(e)
|
||||
self.logger.warn("MusicHandler.get_events: data missing in instance object event")
|
||||
self.logger.warn("MusicHandler.get_events: data "
|
||||
"missing in instance object event")
|
||||
|
||||
elif e.object_name == 'ComputeNode':
|
||||
if e.host is None or e.host == "none":
|
||||
error_event_list.append(e)
|
||||
self.logger.warn("MusicHandler.get_events: data missing in compute object event")
|
||||
self.logger.warn("MusicHandler.get_events: data "
|
||||
"missing in compute object event")
|
||||
|
||||
elif e.method == "build_and_run_instance":
|
||||
if e.uuid is None or e.uuid == "none":
|
||||
error_event_list.append(e)
|
||||
self.logger.warn("MusicHandler.get_events: data missing in build event")
|
||||
self.logger.warn("MusicHandler.get_events: data missing "
|
||||
"in build event")
|
||||
|
||||
if len(error_event_list) > 0:
|
||||
event_list[:] = [e for e in event_list if e not in error_event_list]
|
||||
@ -286,6 +332,7 @@ class MusicHandler(object):
|
||||
return event_list
|
||||
|
||||
def delete_event(self, _event_id):
|
||||
"""Return True after deleting corresponding event row in db."""
|
||||
try:
|
||||
self.music.delete_row_eventually(self.config.db_keyspace,
|
||||
self.config.db_event_table,
|
||||
@ -297,12 +344,14 @@ class MusicHandler(object):
|
||||
return True
|
||||
|
||||
def get_uuid(self, _uuid):
|
||||
"""Return h_uuid and s_uuid from matching _uuid row in music db."""
|
||||
h_uuid = "none"
|
||||
s_uuid = "none"
|
||||
|
||||
row = {}
|
||||
try:
|
||||
row = self.music.read_row(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _uuid)
|
||||
row = self.music.read_row(self.config.db_keyspace,
|
||||
self.config.db_uuid_table, 'uuid', _uuid)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while reading uuid: " + str(e))
|
||||
return None
|
||||
@ -311,18 +360,22 @@ class MusicHandler(object):
|
||||
h_uuid = row[row.keys()[0]]['h_uuid']
|
||||
s_uuid = row[row.keys()[0]]['s_uuid']
|
||||
|
||||
self.logger.info("MusicHandler.get_uuid: get heat uuid (" + h_uuid + ") for uuid = " + _uuid)
|
||||
self.logger.info("MusicHandler.get_uuid: get heat uuid (" +
|
||||
h_uuid + ") for uuid = " + _uuid)
|
||||
else:
|
||||
self.logger.debug("MusicHandler.get_uuid: heat uuid not found")
|
||||
|
||||
return h_uuid, s_uuid
|
||||
|
||||
def put_uuid(self, _e):
|
||||
"""Insert uuid, h_uuid and s_uuid from event into new row in db."""
|
||||
heat_resource_uuid = "none"
|
||||
heat_root_stack_id = "none"
|
||||
if _e.heat_resource_uuid is not None and _e.heat_resource_uuid != "none":
|
||||
if _e.heat_resource_uuid is not None and \
|
||||
_e.heat_resource_uuid != "none":
|
||||
heat_resource_uuid = _e.heat_resource_uuid
|
||||
if _e.heat_root_stack_id is not None and _e.heat_root_stack_id != "none":
|
||||
if _e.heat_root_stack_id is not None and \
|
||||
_e.heat_root_stack_id != "none":
|
||||
heat_root_stack_id = _e.heat_root_stack_id
|
||||
|
||||
data = {
|
||||
@ -332,7 +385,8 @@ class MusicHandler(object):
|
||||
}
|
||||
|
||||
try:
|
||||
self.music.create_row(self.config.db_keyspace, self.config.db_uuid_table, data)
|
||||
self.music.create_row(self.config.db_keyspace,
|
||||
self.config.db_uuid_table, data)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while inserting uuid: " + str(e))
|
||||
return False
|
||||
@ -342,8 +396,11 @@ class MusicHandler(object):
|
||||
return True
|
||||
|
||||
def delete_uuid(self, _k):
|
||||
"""Return True after deleting row corresponding to event uuid."""
|
||||
try:
|
||||
self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_uuid_table, 'uuid', _k)
|
||||
self.music.delete_row_eventually(self.config.db_keyspace,
|
||||
self.config.db_uuid_table, 'uuid',
|
||||
_k)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while deleting uuid: " + str(e))
|
||||
return False
|
||||
@ -351,17 +408,20 @@ class MusicHandler(object):
|
||||
return True
|
||||
|
||||
def get_requests(self):
|
||||
"""Return list of requests that consists of all rows in a db table."""
|
||||
request_list = []
|
||||
|
||||
requests = {}
|
||||
try:
|
||||
requests = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table)
|
||||
requests = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_request_table)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while reading requests: " + str(e))
|
||||
return None
|
||||
|
||||
if len(requests) > 0:
|
||||
self.logger.info("MusicHandler.get_requests: placement request arrived")
|
||||
self.logger.info("MusicHandler.get_requests: placement request "
|
||||
"arrived")
|
||||
|
||||
for _, row in requests.iteritems():
|
||||
self.logger.info(" request_id = " + row['stack_id'])
|
||||
@ -373,6 +433,7 @@ class MusicHandler(object):
|
||||
return request_list
|
||||
|
||||
def put_result(self, _result):
|
||||
"""Return True after putting result in db(create and delete rows)."""
|
||||
for appk, app_placement in _result.iteritems():
|
||||
data = {
|
||||
'stack_id': appk,
|
||||
@ -380,12 +441,15 @@ class MusicHandler(object):
|
||||
}
|
||||
|
||||
try:
|
||||
self.music.create_row(self.config.db_keyspace, self.config.db_response_table, data)
|
||||
self.music.create_row(self.config.db_keyspace,
|
||||
self.config.db_response_table, data)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while putting placement result: " + str(e))
|
||||
self.logger.error("MUSIC error while putting placement "
|
||||
"result: " + str(e))
|
||||
return False
|
||||
|
||||
self.logger.info("MusicHandler.put_result: " + appk + " placement result added")
|
||||
self.logger.info("MusicHandler.put_result: " + appk +
|
||||
" placement result added")
|
||||
|
||||
for appk in _result.keys():
|
||||
try:
|
||||
@ -393,37 +457,48 @@ class MusicHandler(object):
|
||||
self.config.db_request_table,
|
||||
'stack_id', appk)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while deleting handled request: " + str(e))
|
||||
self.logger.error("MUSIC error while deleting handled "
|
||||
"request: " + str(e))
|
||||
return False
|
||||
|
||||
self.logger.info("MusicHandler.put_result: " + appk + " placement request deleted")
|
||||
self.logger.info("MusicHandler.put_result: " +
|
||||
appk + " placement request deleted")
|
||||
|
||||
return True
|
||||
|
||||
def get_resource_status(self, _k):
|
||||
"""Get Row of resource related to '_k' and return resource as json."""
|
||||
json_resource = {}
|
||||
|
||||
row = {}
|
||||
try:
|
||||
row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k, self.logger)
|
||||
row = self.music.read_row(self.config.db_keyspace,
|
||||
self.config.db_resource_table,
|
||||
'site_name', _k, self.logger)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while reading resource status: " + str(e))
|
||||
self.logger.error("MUSIC error while reading resource status: " +
|
||||
str(e))
|
||||
return None
|
||||
|
||||
if len(row) > 0:
|
||||
str_resource = row[row.keys()[0]]['resource']
|
||||
json_resource = json.loads(str_resource)
|
||||
|
||||
self.logger.info("MusicHandler.get_resource_status: get resource status")
|
||||
self.logger.info("MusicHandler.get_resource_status: get resource "
|
||||
"status")
|
||||
|
||||
return json_resource
|
||||
|
||||
def update_resource_status(self, _k, _status):
|
||||
"""Update resource _k to the new _status (flavors, lgs, hosts, etc)."""
|
||||
row = {}
|
||||
try:
|
||||
row = self.music.read_row(self.config.db_keyspace, self.config.db_resource_table, 'site_name', _k)
|
||||
row = self.music.read_row(self.config.db_keyspace,
|
||||
self.config.db_resource_table,
|
||||
'site_name', _k)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while reading resource status: " + str(e))
|
||||
self.logger.error("MUSIC error while reading resource status: " +
|
||||
str(e))
|
||||
return False
|
||||
|
||||
json_resource = {}
|
||||
@ -485,7 +560,8 @@ class MusicHandler(object):
|
||||
self.config.db_resource_table,
|
||||
'site_name', _k)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while deleting resource status: " + str(e))
|
||||
self.logger.error("MUSIC error while deleting resource "
|
||||
"status: " + str(e))
|
||||
return False
|
||||
|
||||
else:
|
||||
@ -497,34 +573,40 @@ class MusicHandler(object):
|
||||
}
|
||||
|
||||
try:
|
||||
self.music.create_row(self.config.db_keyspace, self.config.db_resource_table, data)
|
||||
self.music.create_row(self.config.db_keyspace,
|
||||
self.config.db_resource_table, data)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
|
||||
self.logger.info("MusicHandler.update_resource_status: resource status updated")
|
||||
self.logger.info("MusicHandler.update_resource_status: resource status "
|
||||
"updated")
|
||||
|
||||
return True
|
||||
|
||||
def update_resource_log_index(self, _k, _index):
|
||||
"""Update resource log index in database and return True."""
|
||||
data = {
|
||||
'site_name': _k,
|
||||
'resource_log_index': str(_index)
|
||||
}
|
||||
|
||||
try:
|
||||
self.music.update_row_eventually(self.config.db_keyspace,
|
||||
self.config.db_resource_index_table,
|
||||
'site_name', _k, data)
|
||||
self.music.update_row_eventually(
|
||||
self.config.db_keyspace, self.config.db_resource_index_table,
|
||||
'site_name', _k, data)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while updating resource log index: " + str(e))
|
||||
self.logger.error("MUSIC error while updating resource log "
|
||||
"index: " + str(e))
|
||||
return False
|
||||
|
||||
self.logger.info("MusicHandler.update_resource_log_index: resource log index updated")
|
||||
self.logger.info("MusicHandler.update_resource_log_index: resource log "
|
||||
"index updated")
|
||||
|
||||
return True
|
||||
|
||||
def update_app_log_index(self, _k, _index):
|
||||
"""Update app log index in database and return True."""
|
||||
data = {
|
||||
'site_name': _k,
|
||||
'app_log_index': str(_index)
|
||||
@ -535,16 +617,21 @@ class MusicHandler(object):
|
||||
self.config.db_app_index_table,
|
||||
'site_name', _k, data)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while updating app log index: " + str(e))
|
||||
self.logger.error("MUSIC error while updating app log index: " +
|
||||
str(e))
|
||||
return False
|
||||
|
||||
self.logger.info("MusicHandler.update_app_log_index: app log index updated")
|
||||
self.logger.info("MusicHandler.update_app_log_index: app log index "
|
||||
"updated")
|
||||
|
||||
return True
|
||||
|
||||
def add_app(self, _k, _app_data):
|
||||
"""Add app to database in music and return True."""
|
||||
try:
|
||||
self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _k)
|
||||
self.music.delete_row_eventually(
|
||||
self.config.db_keyspace, self.config.db_app_table,
|
||||
'stack_id', _k)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while deleting app: " + str(e))
|
||||
return False
|
||||
@ -558,7 +645,8 @@ class MusicHandler(object):
|
||||
}
|
||||
|
||||
try:
|
||||
self.music.create_row(self.config.db_keyspace, self.config.db_app_table, data)
|
||||
self.music.create_row(self.config.db_keyspace,
|
||||
self.config.db_app_table, data)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while inserting app: " + str(e))
|
||||
return False
|
||||
@ -568,11 +656,14 @@ class MusicHandler(object):
|
||||
return True
|
||||
|
||||
def get_app_info(self, _s_uuid):
|
||||
"""Get app info for stack id and return as json object."""
|
||||
json_app = {}
|
||||
|
||||
row = {}
|
||||
try:
|
||||
row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid)
|
||||
row = self.music.read_row(self.config.db_keyspace,
|
||||
self.config.db_app_table, 'stack_id',
|
||||
_s_uuid)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error while reading app info: " + str(e))
|
||||
return None
|
||||
@ -583,8 +674,9 @@ class MusicHandler(object):
|
||||
|
||||
return json_app
|
||||
|
||||
# TODO: get all other VMs related to this VM
|
||||
# TODO(UNKNOWN): get all other VMs related to this VM
|
||||
def get_vm_info(self, _s_uuid, _h_uuid, _host):
|
||||
"""Return vm info connected with ids and host passed in."""
|
||||
updated = False
|
||||
json_app = {}
|
||||
|
||||
@ -592,7 +684,9 @@ class MusicHandler(object):
|
||||
|
||||
row = {}
|
||||
try:
|
||||
row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid)
|
||||
row = self.music.read_row(self.config.db_keyspace,
|
||||
self.config.db_app_table, 'stack_id',
|
||||
_s_uuid)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return None
|
||||
@ -608,8 +702,10 @@ class MusicHandler(object):
|
||||
if vm["host"] != _host:
|
||||
vm["planned_host"] = vm["host"]
|
||||
vm["host"] = _host
|
||||
self.logger.warn("db: conflicted placement decision from Ostro")
|
||||
# TODO: affinity, diversity, exclusivity validation check
|
||||
self.logger.warn("db: conflicted placement "
|
||||
"decision from Ostro")
|
||||
# TODO(UNKOWN): affinity, diversity,
|
||||
# exclusivity check
|
||||
updated = True
|
||||
else:
|
||||
self.logger.debug("db: placement as expected")
|
||||
@ -621,10 +717,12 @@ class MusicHandler(object):
|
||||
vm_info = vm
|
||||
break
|
||||
else:
|
||||
self.logger.error("MusicHandler.get_vm_info: vm is missing from stack")
|
||||
self.logger.error("MusicHandler.get_vm_info: vm is missing "
|
||||
"from stack")
|
||||
|
||||
else:
|
||||
self.logger.warn("MusicHandler.get_vm_info: not found stack for update = " + _s_uuid)
|
||||
self.logger.warn("MusicHandler.get_vm_info: not found stack for "
|
||||
"update = " + _s_uuid)
|
||||
|
||||
if updated is True:
|
||||
if self.add_app(_s_uuid, json_app) is False:
|
||||
@ -633,12 +731,15 @@ class MusicHandler(object):
|
||||
return vm_info
|
||||
|
||||
def update_vm_info(self, _s_uuid, _h_uuid):
|
||||
"""Return true if vm's heat and heat stack ids are updated in db."""
|
||||
updated = False
|
||||
json_app = {}
|
||||
|
||||
row = {}
|
||||
try:
|
||||
row = self.music.read_row(self.config.db_keyspace, self.config.db_app_table, 'stack_id', _s_uuid)
|
||||
row = self.music.read_row(self.config.db_keyspace,
|
||||
self.config.db_app_table, 'stack_id',
|
||||
_s_uuid)
|
||||
except Exception as e:
|
||||
self.logger.error("MUSIC error: " + str(e))
|
||||
return False
|
||||
@ -659,10 +760,12 @@ class MusicHandler(object):
|
||||
|
||||
break
|
||||
else:
|
||||
self.logger.error("MusicHandler.update_vm_info: vm is missing from stack")
|
||||
self.logger.error("MusicHandler.update_vm_info: vm is missing "
|
||||
"from stack")
|
||||
|
||||
else:
|
||||
self.logger.warn("MusicHandler.update_vm_info: not found stack for update = " + _s_uuid)
|
||||
self.logger.warn("MusicHandler.update_vm_info: not found stack for "
|
||||
"update = " + _s_uuid)
|
||||
|
||||
if updated is True:
|
||||
if self.add_app(_s_uuid, json_app) is False:
|
||||
|
@ -1,29 +1,37 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, LEVELS
|
||||
from valet.engine.optimizer.ostro.openstack_filters import AggregateInstanceExtraSpecsFilter
|
||||
from valet.engine.optimizer.ostro.openstack_filters import AvailabilityZoneFilter
|
||||
"""ConstraintSolver."""
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base \
|
||||
import VGroup, VM, LEVELS
|
||||
from valet.engine.optimizer.ostro.openstack_filters \
|
||||
import AggregateInstanceExtraSpecsFilter
|
||||
from valet.engine.optimizer.ostro.openstack_filters \
|
||||
import AvailabilityZoneFilter
|
||||
from valet.engine.optimizer.ostro.openstack_filters import CoreFilter
|
||||
from valet.engine.optimizer.ostro.openstack_filters import DiskFilter
|
||||
from valet.engine.optimizer.ostro.openstack_filters import RamFilter
|
||||
|
||||
|
||||
class ConstraintSolver(object):
|
||||
"""ConstraintSolver."""
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Initialization."""
|
||||
"""Instantiate filters to help enforce constraints."""
|
||||
self.logger = _logger
|
||||
|
||||
self.openstack_AZ = AvailabilityZoneFilter(self.logger)
|
||||
@ -34,12 +42,15 @@ class ConstraintSolver(object):
|
||||
|
||||
self.status = "success"
|
||||
|
||||
def compute_candidate_list(self, _level, _n, _node_placements, _avail_resources, _avail_logical_groups):
|
||||
def compute_candidate_list(self, _level, _n, _node_placements,
|
||||
_avail_resources, _avail_logical_groups):
|
||||
"""Compute candidate list for the given VGroup or VM."""
|
||||
candidate_list = []
|
||||
|
||||
''' when replanning '''
|
||||
"""When replanning."""
|
||||
if _n.node.host is not None and len(_n.node.host) > 0:
|
||||
self.logger.debug("ConstraintSolver: reconsider with given candidates")
|
||||
self.logger.debug("ConstraintSolver: reconsider with given "
|
||||
"candidates")
|
||||
for hk in _n.node.host:
|
||||
for ark, ar in _avail_resources.iteritems():
|
||||
if hk == ark:
|
||||
@ -52,135 +63,166 @@ class ConstraintSolver(object):
|
||||
self.logger.warn("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: num of candidates = " + str(len(candidate_list)))
|
||||
self.logger.debug("ConstraintSolver: num of candidates = " +
|
||||
str(len(candidate_list)))
|
||||
|
||||
''' availability zone constraint '''
|
||||
"""Availability zone constraint."""
|
||||
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
|
||||
if (isinstance(_n.node, VM) and _n.node.availability_zone is not None) or \
|
||||
(isinstance(_n.node, VGroup) and len(_n.node.availability_zone_list) > 0):
|
||||
if (isinstance(_n.node, VM) and _n.node.availability_zone
|
||||
is not None) or (isinstance(_n.node, VGroup) and
|
||||
len(_n.node.availability_zone_list) > 0):
|
||||
self._constrain_availability_zone(_level, _n, candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate availability zone constraint for node = " + _n.node.name
|
||||
self.status = "violate availability zone constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done availability_zone constraint")
|
||||
self.logger.debug("ConstraintSolver: done availability_"
|
||||
"zone constraint")
|
||||
|
||||
''' host aggregate constraint '''
|
||||
"""Host aggregate constraint."""
|
||||
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
|
||||
if len(_n.node.extra_specs_list) > 0:
|
||||
self._constrain_host_aggregates(_level, _n, candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate host aggregate constraint for node = " + _n.node.name
|
||||
self.status = "violate host aggregate constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done host_aggregate constraint")
|
||||
self.logger.debug("ConstraintSolver: done host_aggregate "
|
||||
"constraint")
|
||||
|
||||
''' cpu capacity constraint '''
|
||||
"""CPU capacity constraint."""
|
||||
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
|
||||
self._constrain_cpu_capacity(_level, _n, candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate cpu capacity constraint for node = " + _n.node.name
|
||||
self.status = "violate cpu capacity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done cpu capacity constraint")
|
||||
self.logger.debug("ConstraintSolver: done cpu capacity "
|
||||
"constraint")
|
||||
|
||||
''' memory capacity constraint '''
|
||||
"""Memory capacity constraint."""
|
||||
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
|
||||
self._constrain_mem_capacity(_level, _n, candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate memory capacity constraint for node = " + _n.node.name
|
||||
self.status = "violate memory capacity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done memory capacity constraint")
|
||||
self.logger.debug("ConstraintSolver: done memory capacity "
|
||||
"constraint")
|
||||
|
||||
''' local disk capacity constraint '''
|
||||
"""Local disk capacity constraint."""
|
||||
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
|
||||
self._constrain_local_disk_capacity(_level, _n, candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate local disk capacity constraint for node = " + _n.node.name
|
||||
self.status = "violate local disk capacity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done local disk capacity constraint")
|
||||
self.logger.debug("ConstraintSolver: done local disk capacity "
|
||||
"constraint")
|
||||
|
||||
''' network bandwidth constraint '''
|
||||
self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements, candidate_list)
|
||||
"""Network bandwidth constraint."""
|
||||
self._constrain_nw_bandwidth_capacity(_level, _n, _node_placements,
|
||||
candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate nw bandwidth capacity constraint for node = " + _n.node.name
|
||||
self.status = "violate nw bandwidth capacity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done bandwidth capacity constraint")
|
||||
self.logger.debug("ConstraintSolver: done bandwidth capacity "
|
||||
"constraint")
|
||||
|
||||
''' diversity constraint '''
|
||||
"""Diversity constraint."""
|
||||
if len(_n.node.diversity_groups) > 0:
|
||||
for _, diversity_id in _n.node.diversity_groups.iteritems():
|
||||
if diversity_id.split(":")[0] == _level:
|
||||
if diversity_id in _avail_logical_groups.keys():
|
||||
self._constrain_diversity_with_others(_level, diversity_id, candidate_list)
|
||||
self._constrain_diversity_with_others(_level,
|
||||
diversity_id,
|
||||
candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
break
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate diversity constraint for node = " + _n.node.name
|
||||
self.status = "violate diversity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self._constrain_diversity(_level, _n, _node_placements, candidate_list)
|
||||
self._constrain_diversity(_level, _n, _node_placements,
|
||||
candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate diversity constraint for node = " + _n.node.name
|
||||
self.status = "violate diversity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done diversity_group constraint")
|
||||
self.logger.debug("ConstraintSolver: done diversity_group "
|
||||
"constraint")
|
||||
|
||||
''' exclusivity constraint '''
|
||||
exclusivities = self.get_exclusivities(_n.node.exclusivity_groups, _level)
|
||||
"""Exclusivity constraint."""
|
||||
exclusivities = self.get_exclusivities(_n.node.exclusivity_groups,
|
||||
_level)
|
||||
if len(exclusivities) > 1:
|
||||
self.status = "violate exclusivity constraint (more than one exclusivity) for node = " + _n.node.name
|
||||
self.status = "violate exclusivity constraint (more than one " \
|
||||
"exclusivity) for node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return []
|
||||
else:
|
||||
if len(exclusivities) == 1:
|
||||
exclusivity_id = exclusivities[exclusivities.keys()[0]]
|
||||
if exclusivity_id.split(":")[0] == _level:
|
||||
self._constrain_exclusivity(_level, exclusivity_id, candidate_list)
|
||||
self._constrain_exclusivity(_level, exclusivity_id,
|
||||
candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate exclusivity constraint for node = " + _n.node.name
|
||||
self.status = "violate exclusivity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done exclusivity_group constraint")
|
||||
self.logger.debug("ConstraintSolver: done exclusivity "
|
||||
"group constraint")
|
||||
else:
|
||||
self._constrain_non_exclusivity(_level, candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate non-exclusivity constraint for node = " + _n.node.name
|
||||
self.status = "violate non-exclusivity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done non-exclusivity_group constraint")
|
||||
self.logger.debug("ConstraintSolver: done non-exclusivity_"
|
||||
"group constraint")
|
||||
|
||||
''' affinity constraint '''
|
||||
"""Affinity constraint."""
|
||||
affinity_id = _n.get_affinity_id() # level:name, except name == "any"
|
||||
if affinity_id is not None:
|
||||
if affinity_id.split(":")[0] == _level:
|
||||
if affinity_id in _avail_logical_groups.keys():
|
||||
self._constrain_affinity(_level, affinity_id, candidate_list)
|
||||
self._constrain_affinity(_level, affinity_id,
|
||||
candidate_list)
|
||||
if len(candidate_list) == 0:
|
||||
self.status = "violate affinity constraint for node = " + _n.node.name
|
||||
self.status = "violate affinity constraint for " \
|
||||
"node = " + _n.node.name
|
||||
self.logger.error("ConstraintSolver: " + self.status)
|
||||
return candidate_list
|
||||
else:
|
||||
self.logger.debug("ConstraintSolver: done affinity_group constraint")
|
||||
self.logger.debug("ConstraintSolver: done affinity_"
|
||||
"group constraint")
|
||||
|
||||
return candidate_list
|
||||
|
||||
'''
|
||||
constraint modules
|
||||
'''
|
||||
"""
|
||||
Constraint modules.
|
||||
"""
|
||||
|
||||
def _constrain_affinity(self, _level, _affinity_id, _candidate_list):
|
||||
conflict_list = []
|
||||
@ -191,11 +233,14 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(r)
|
||||
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: not exist affinity in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: not exist affinity "
|
||||
"in resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def _constrain_diversity_with_others(self, _level, _diversity_id, _candidate_list):
|
||||
def _constrain_diversity_with_others(self, _level, _diversity_id,
|
||||
_candidate_list):
|
||||
conflict_list = []
|
||||
|
||||
for r in _candidate_list:
|
||||
@ -204,11 +249,17 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(r)
|
||||
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: conflict diversity in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: conflict diversity "
|
||||
"in resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def exist_group(self, _level, _id, _group_type, _candidate):
|
||||
"""Check if group esists."""
|
||||
"""Return True if there exists a group within the candidate's
|
||||
membership list that matches the provided id and group type.
|
||||
"""
|
||||
match = False
|
||||
|
||||
memberships = _candidate.get_memberships(_level)
|
||||
@ -219,7 +270,8 @@ class ConstraintSolver(object):
|
||||
|
||||
return match
|
||||
|
||||
def _constrain_diversity(self, _level, _n, _node_placements, _candidate_list):
|
||||
def _constrain_diversity(self, _level, _n, _node_placements,
|
||||
_candidate_list):
|
||||
conflict_list = []
|
||||
|
||||
for r in _candidate_list:
|
||||
@ -228,29 +280,40 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(r)
|
||||
|
||||
resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: conflict the diversity in resource = " + resource_name)
|
||||
self.logger.debug("ConstraintSolver: conflict the "
|
||||
"diversity in resource = " +
|
||||
resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def conflict_diversity(self, _level, _n, _node_placements, _candidate):
|
||||
"""Return True if the candidate has a placement conflict."""
|
||||
conflict = False
|
||||
|
||||
for v in _node_placements.keys():
|
||||
diversity_level = _n.get_common_diversity(v.diversity_groups)
|
||||
if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level):
|
||||
if diversity_level != "ANY" and \
|
||||
LEVELS.index(diversity_level) >= \
|
||||
LEVELS.index(_level):
|
||||
if diversity_level == "host":
|
||||
if _candidate.cluster_name == _node_placements[v].cluster_name and \
|
||||
_candidate.rack_name == _node_placements[v].rack_name and \
|
||||
_candidate.host_name == _node_placements[v].host_name:
|
||||
if _candidate.cluster_name == \
|
||||
_node_placements[v].cluster_name and \
|
||||
_candidate.rack_name == \
|
||||
_node_placements[v].rack_name and \
|
||||
_candidate.host_name == \
|
||||
_node_placements[v].host_name:
|
||||
conflict = True
|
||||
break
|
||||
elif diversity_level == "rack":
|
||||
if _candidate.cluster_name == _node_placements[v].cluster_name and \
|
||||
if _candidate.cluster_name == \
|
||||
_node_placements[v].cluster_name and \
|
||||
_candidate.rack_name == _node_placements[v].rack_name:
|
||||
conflict = True
|
||||
break
|
||||
elif diversity_level == "cluster":
|
||||
if _candidate.cluster_name == _node_placements[v].cluster_name:
|
||||
if _candidate.cluster_name == \
|
||||
_node_placements[v].cluster_name:
|
||||
conflict = True
|
||||
break
|
||||
|
||||
@ -265,21 +328,31 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(r)
|
||||
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: exclusivity defined in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: exclusivity defined "
|
||||
"in resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def conflict_exclusivity(self, _level, _candidate):
|
||||
"""Check for an exculsivity conflict."""
|
||||
"""Check if the candidate contains an exclusivity group within its
|
||||
list of memberships."""
|
||||
conflict = False
|
||||
|
||||
memberships = _candidate.get_memberships(_level)
|
||||
for mk in memberships.keys():
|
||||
if memberships[mk].group_type == "EX" and mk.split(":")[0] == _level:
|
||||
if memberships[mk].group_type == "EX" and \
|
||||
mk.split(":")[0] == _level:
|
||||
conflict = True
|
||||
|
||||
return conflict
|
||||
|
||||
def get_exclusivities(self, _exclusivity_groups, _level):
|
||||
"""Return a list of filtered exclusivities."""
|
||||
"""Extract and return only those exclusivities that exist at the
|
||||
specified level.
|
||||
"""
|
||||
exclusivities = {}
|
||||
|
||||
for exk, level in _exclusivity_groups.iteritems():
|
||||
@ -289,15 +362,20 @@ class ConstraintSolver(object):
|
||||
return exclusivities
|
||||
|
||||
def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list):
|
||||
candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id, _candidate_list)
|
||||
candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id,
|
||||
_candidate_list)
|
||||
|
||||
if len(candidate_list) == 0:
|
||||
candidate_list = self._get_hibernated_candidates(_level, _candidate_list)
|
||||
_candidate_list[:] = [x for x in _candidate_list if x in candidate_list]
|
||||
candidate_list = self._get_hibernated_candidates(_level,
|
||||
_candidate_list)
|
||||
_candidate_list[:] = [x for x in _candidate_list
|
||||
if x in candidate_list]
|
||||
else:
|
||||
_candidate_list[:] = [x for x in _candidate_list if x in candidate_list]
|
||||
_candidate_list[:] = [x for x in _candidate_list
|
||||
if x in candidate_list]
|
||||
|
||||
def _get_exclusive_candidates(self, _level, _exclusivity_id, _candidate_list):
|
||||
def _get_exclusive_candidates(self, _level, _exclusivity_id,
|
||||
_candidate_list):
|
||||
candidate_list = []
|
||||
|
||||
for r in _candidate_list:
|
||||
@ -306,7 +384,8 @@ class ConstraintSolver(object):
|
||||
candidate_list.append(r)
|
||||
else:
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: exclusivity not exist in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: exclusivity not exist in "
|
||||
"resource = " + debug_resource_name)
|
||||
|
||||
return candidate_list
|
||||
|
||||
@ -319,11 +398,16 @@ class ConstraintSolver(object):
|
||||
candidate_list.append(r)
|
||||
else:
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: exclusivity not allowed in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: exclusivity not allowed "
|
||||
"in resource = " + debug_resource_name)
|
||||
|
||||
return candidate_list
|
||||
|
||||
def check_hibernated(self, _level, _candidate):
|
||||
"""Check if the candidate is hibernated."""
|
||||
"""Return True if the candidate has no placed VMs at the specified
|
||||
level.
|
||||
"""
|
||||
match = False
|
||||
|
||||
num_of_placed_vms = _candidate.get_num_of_placed_vms(_level)
|
||||
@ -341,11 +425,14 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(r)
|
||||
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: not meet aggregate in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: not meet aggregate "
|
||||
"in resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_host_aggregates(self, _level, _candidate, _v):
|
||||
"""Check if the candidate passes the aggregate instance extra specs zone filter."""
|
||||
return self.openstack_AIES.host_passes(_level, _candidate, _v)
|
||||
|
||||
def _constrain_availability_zone(self, _level, _n, _candidate_list):
|
||||
@ -357,11 +444,14 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(r)
|
||||
|
||||
debug_resource_name = r.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: not meet az in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: not meet az in "
|
||||
"resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_availability_zone(self, _level, _candidate, _v):
|
||||
"""Check if the candidate passes the availability zone filter."""
|
||||
return self.openstack_AZ.host_passes(_level, _candidate, _v)
|
||||
|
||||
def _constrain_cpu_capacity(self, _level, _n, _candidate_list):
|
||||
@ -372,11 +462,14 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(ch)
|
||||
|
||||
debug_resource_name = ch.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: lack of cpu in " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: lack of cpu in " +
|
||||
debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_cpu_capacity(self, _level, _v, _candidate):
|
||||
"""Check if the candidate passes the core filter."""
|
||||
return self.openstack_C.host_passes(_level, _candidate, _v)
|
||||
|
||||
def _constrain_mem_capacity(self, _level, _n, _candidate_list):
|
||||
@ -387,11 +480,14 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(ch)
|
||||
|
||||
debug_resource_name = ch.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: lack of mem in " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: lack of mem in " +
|
||||
debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_mem_capacity(self, _level, _v, _candidate):
|
||||
"""Check if the candidate passes the RAM filter."""
|
||||
return self.openstack_R.host_passes(_level, _candidate, _v)
|
||||
|
||||
def _constrain_local_disk_capacity(self, _level, _n, _candidate_list):
|
||||
@ -402,11 +498,14 @@ class ConstraintSolver(object):
|
||||
conflict_list.append(ch)
|
||||
|
||||
debug_resource_name = ch.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: lack of local disk in " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: lack of local disk in " +
|
||||
debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_local_disk_capacity(self, _level, _v, _candidate):
|
||||
"""Check if the candidate passes the disk filter."""
|
||||
return self.openstack_D.host_passes(_level, _candidate, _v)
|
||||
|
||||
def _constrain_storage_capacity(self, _level, _n, _candidate_list):
|
||||
@ -434,11 +533,14 @@ class ConstraintSolver(object):
|
||||
if vc == "any" or s.storage_class == vc:
|
||||
avail_disks.append(s.storage_avail_disk)
|
||||
|
||||
self.logger.debug("ConstraintSolver: storage constrained in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: storage constrained in"
|
||||
"resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_storage_availability(self, _level, _v, _ch):
|
||||
"""Return True if there is sufficient storage availability."""
|
||||
available = False
|
||||
|
||||
volume_sizes = []
|
||||
@ -462,21 +564,28 @@ class ConstraintSolver(object):
|
||||
|
||||
return available
|
||||
|
||||
def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements, _candidate_list):
|
||||
def _constrain_nw_bandwidth_capacity(self, _level, _n, _node_placements,
|
||||
_candidate_list):
|
||||
conflict_list = []
|
||||
|
||||
for cr in _candidate_list:
|
||||
if self.check_nw_bandwidth_availability(_level, _n, _node_placements, cr) is False:
|
||||
if self.check_nw_bandwidth_availability(
|
||||
_level, _n, _node_placements, cr) is False:
|
||||
if cr not in conflict_list:
|
||||
conflict_list.append(cr)
|
||||
|
||||
debug_resource_name = cr.get_resource_name(_level)
|
||||
self.logger.debug("ConstraintSolver: bw constrained in resource = " + debug_resource_name)
|
||||
self.logger.debug("ConstraintSolver: bw constrained in "
|
||||
"resource = " + debug_resource_name)
|
||||
|
||||
_candidate_list[:] = [c for c in _candidate_list if c not in conflict_list]
|
||||
_candidate_list[:] = [c for c in _candidate_list
|
||||
if c not in conflict_list]
|
||||
|
||||
def check_nw_bandwidth_availability(self, _level, _n, _node_placements, _cr):
|
||||
# NOTE: 3rd entry for special node requiring bandwidth of out-going from spine switch
|
||||
def check_nw_bandwidth_availability(self, _level, _n, _node_placements,
|
||||
_cr):
|
||||
"""Return True if there is sufficient network availability."""
|
||||
# NOTE: 3rd entry for special node requiring bandwidth of out-going
|
||||
# from spine switch
|
||||
total_req_bandwidths = [0, 0, 0]
|
||||
|
||||
link_list = _n.get_all_links()
|
||||
@ -486,26 +595,35 @@ class ConstraintSolver(object):
|
||||
|
||||
placement_level = None
|
||||
if vl.node in _node_placements.keys(): # vl.node is VM or Volume
|
||||
placement_level = _node_placements[vl.node].get_common_placement(_cr)
|
||||
placement_level = \
|
||||
_node_placements[vl.node].get_common_placement(_cr)
|
||||
else: # in the open list
|
||||
placement_level = _n.get_common_diversity(vl.node.diversity_groups)
|
||||
placement_level = \
|
||||
_n.get_common_diversity(vl.node.diversity_groups)
|
||||
if placement_level == "ANY":
|
||||
implicit_diversity = self.get_implicit_diversity(_n.node, link_list, vl.node, _level)
|
||||
implicit_diversity = self.get_implicit_diversity(_n.node,
|
||||
link_list,
|
||||
vl.node,
|
||||
_level)
|
||||
if implicit_diversity[0] is not None:
|
||||
placement_level = implicit_diversity[1]
|
||||
|
||||
self.get_req_bandwidths(_level, placement_level, bandwidth, total_req_bandwidths)
|
||||
self.get_req_bandwidths(_level, placement_level, bandwidth,
|
||||
total_req_bandwidths)
|
||||
|
||||
return self._check_nw_bandwidth_availability(_level, total_req_bandwidths, _cr)
|
||||
return self._check_nw_bandwidth_availability(_level,
|
||||
total_req_bandwidths, _cr)
|
||||
|
||||
# to find any implicit diversity relation caused by the other links of _v
|
||||
# (i.e., intersection between _v and _target_v)
|
||||
def get_implicit_diversity(self, _v, _link_list, _target_v, _level):
|
||||
"""Get the maximum implicit diversity between _v and _target_v."""
|
||||
max_implicit_diversity = (None, 0)
|
||||
|
||||
for vl in _link_list:
|
||||
diversity_level = _v.get_common_diversity(vl.node.diversity_groups)
|
||||
if diversity_level != "ANY" and LEVELS.index(diversity_level) >= LEVELS.index(_level):
|
||||
if diversity_level != "ANY" \
|
||||
and LEVELS.index(diversity_level) >= LEVELS.index(_level):
|
||||
for dk, dl in vl.node.diversity_groups.iteritems():
|
||||
if LEVELS.index(dl) > LEVELS.index(diversity_level):
|
||||
if _target_v.uuid != vl.node.uuid:
|
||||
@ -515,7 +633,9 @@ class ConstraintSolver(object):
|
||||
|
||||
return max_implicit_diversity
|
||||
|
||||
def get_req_bandwidths(self, _level, _placement_level, _bandwidth, _total_req_bandwidths):
|
||||
def get_req_bandwidths(self, _level, _placement_level, _bandwidth,
|
||||
_total_req_bandwidths):
|
||||
"""Calculate and update total required bandwidths."""
|
||||
if _level == "cluster" or _level == "rack":
|
||||
if _placement_level == "cluster" or _placement_level == "rack":
|
||||
_total_req_bandwidths[1] += _bandwidth
|
||||
@ -526,7 +646,8 @@ class ConstraintSolver(object):
|
||||
elif _placement_level == "host":
|
||||
_total_req_bandwidths[0] += _bandwidth
|
||||
|
||||
def _check_nw_bandwidth_availability(self, _level, _req_bandwidths, _candidate_resource):
|
||||
def _check_nw_bandwidth_availability(self, _level, _req_bandwidths,
|
||||
_candidate_resource):
|
||||
available = True
|
||||
|
||||
if _level == "cluster":
|
||||
@ -557,7 +678,8 @@ class ConstraintSolver(object):
|
||||
for _, sr in _candidate_resource.rack_avail_switches.iteritems():
|
||||
rack_avail_bandwidths.append(max(sr.avail_bandwidths))
|
||||
|
||||
avail_bandwidth = min(max(host_avail_bandwidths), max(rack_avail_bandwidths))
|
||||
avail_bandwidth = min(max(host_avail_bandwidths),
|
||||
max(rack_avail_bandwidths))
|
||||
if avail_bandwidth < _req_bandwidths[1]:
|
||||
available = False
|
||||
|
||||
|
@ -1,22 +1,24 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""AggregateInstanceExtraSpecsFilter."""
|
||||
|
||||
import six
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import VM
|
||||
import valet.engine.optimizer.ostro.openstack_utils
|
||||
from valet.engine.optimizer.ostro import openstack_utils
|
||||
|
||||
_SCOPE = 'aggregate_instance_extra_specs'
|
||||
|
||||
@ -28,14 +30,13 @@ class AggregateInstanceExtraSpecsFilter(object):
|
||||
run_filter_once_per_request = True
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Initialization."""
|
||||
self.logger = _logger
|
||||
|
||||
def host_passes(self, _level, _host, _v):
|
||||
"""Return a list of hosts that can create instance_type
|
||||
|
||||
Check that the extra specs associated with the instance type match
|
||||
the metadata provided by aggregates. If not present return False.
|
||||
"""
|
||||
"""Return a list of hosts that can create instance_type."""
|
||||
"""Check that the extra specs associated with the instance type match
|
||||
the metadata provided by aggregates. If not present return False."""
|
||||
|
||||
# If 'extra_specs' is not present or extra_specs are empty then we
|
||||
# need not proceed further
|
||||
@ -47,12 +48,14 @@ class AggregateInstanceExtraSpecsFilter(object):
|
||||
if len(extra_specs_list) == 0:
|
||||
return True
|
||||
|
||||
metadatas = openstack_utils.aggregate_metadata_get_by_host(_level, _host)
|
||||
metadatas = openstack_utils.aggregate_metadata_get_by_host(_level,
|
||||
_host)
|
||||
|
||||
matched_logical_group_list = []
|
||||
for extra_specs in extra_specs_list:
|
||||
for lgk, metadata in metadatas.iteritems():
|
||||
if self._match_metadata(_host.get_resource_name(_level), lgk, extra_specs, metadata) is True:
|
||||
if self._match_metadata(_host.get_resource_name(_level), lgk,
|
||||
extra_specs, metadata) is True:
|
||||
matched_logical_group_list.append(lgk)
|
||||
break
|
||||
else:
|
||||
@ -64,7 +67,8 @@ class AggregateInstanceExtraSpecsFilter(object):
|
||||
break
|
||||
else:
|
||||
host_aggregate_extra_specs = {}
|
||||
host_aggregate_extra_specs["host_aggregates"] = matched_logical_group_list
|
||||
host_aggregate_extra_specs["host_aggregates"] = \
|
||||
matched_logical_group_list
|
||||
_v.extra_specs_list.append(host_aggregate_extra_specs)
|
||||
|
||||
return True
|
||||
@ -85,13 +89,17 @@ class AggregateInstanceExtraSpecsFilter(object):
|
||||
|
||||
aggregate_vals = _metadata.get(key, None)
|
||||
if not aggregate_vals:
|
||||
self.logger.debug("key (" + key + ") not exists in logical_group (" + _lg_name + ") " + " of host (" + _h_name + ")")
|
||||
self.logger.debug("key (" + key + ") not exists in logical_"
|
||||
"group (" + _lg_name + ") " +
|
||||
" of host (" + _h_name + ")")
|
||||
return False
|
||||
for aggregate_val in aggregate_vals:
|
||||
if openstack_utils.match(aggregate_val, req):
|
||||
break
|
||||
else:
|
||||
self.logger.debug("key (" + key + ")'s value (" + req + ") not exists in logical_group " + "(" + _lg_name + ") " + " of host (" + _h_name + ")")
|
||||
self.logger.debug("key (" + key + ")'s value (" + req + ") not "
|
||||
"exists in logical_group " + "(" + _lg_name +
|
||||
") " + " of host (" + _h_name + ")")
|
||||
return False
|
||||
|
||||
return True
|
||||
@ -99,9 +107,9 @@ class AggregateInstanceExtraSpecsFilter(object):
|
||||
|
||||
# NOTE: originally, OpenStack used the metadata of host_aggregate
|
||||
class AvailabilityZoneFilter(object):
|
||||
""" Filters Hosts by availability zone.
|
||||
"""AvailabilityZoneFilter filters Hosts by availability zone."""
|
||||
|
||||
Works with aggregate metadata availability zones, using the key
|
||||
"""Work with aggregate metadata availability zones, using the key
|
||||
'availability_zone'
|
||||
Note: in theory a compute node can be part of multiple availability_zones
|
||||
"""
|
||||
@ -110,9 +118,11 @@ class AvailabilityZoneFilter(object):
|
||||
run_filter_once_per_request = True
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Initialization."""
|
||||
self.logger = _logger
|
||||
|
||||
def host_passes(self, _level, _host, _v):
|
||||
"""Return True if all availalibility zones in _v exist in the host."""
|
||||
az_request_list = []
|
||||
if isinstance(_v, VM):
|
||||
az_request_list.append(_v.availability_zone)
|
||||
@ -123,43 +133,54 @@ class AvailabilityZoneFilter(object):
|
||||
if len(az_request_list) == 0:
|
||||
return True
|
||||
|
||||
availability_zone_list = openstack_utils.availability_zone_get_by_host(_level, _host)
|
||||
availability_zone_list = \
|
||||
openstack_utils.availability_zone_get_by_host(_level, _host)
|
||||
|
||||
for azr in az_request_list:
|
||||
if azr not in availability_zone_list:
|
||||
self.logger.debug("AZ (" + azr + ") not exists in host " + "(" + _host.get_resource_name(_level) + ")")
|
||||
self.logger.debug("AZ (" + azr + ") not exists in host " + "(" +
|
||||
_host.get_resource_name(_level) + ")")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class RamFilter(object):
|
||||
"""RamFilter."""
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Initialization."""
|
||||
self.logger = _logger
|
||||
|
||||
def host_passes(self, _level, _host, _v):
|
||||
"""Only return hosts with sufficient available RAM."""
|
||||
"""Return True if host has sufficient available RAM."""
|
||||
requested_ram = _v.mem # MB
|
||||
(total_ram, usable_ram) = _host.get_mem(_level)
|
||||
|
||||
# Do not allow an instance to overcommit against itself, only against other instances.
|
||||
# Do not allow an instance to overcommit against itself, only against
|
||||
# other instances.
|
||||
if not total_ram >= requested_ram:
|
||||
self.logger.debug("requested mem (" + str(requested_ram) + ") more than total mem (" +
|
||||
str(total_ram) + ") in host (" + _host.get_resource_name(_level) + ")")
|
||||
self.logger.debug("requested mem (" + str(requested_ram) +
|
||||
") more than total mem (" +
|
||||
str(total_ram) + ") in host (" +
|
||||
_host.get_resource_name(_level) + ")")
|
||||
return False
|
||||
|
||||
if not usable_ram >= requested_ram:
|
||||
self.logger.debug("requested mem (" + str(requested_ram) + ") more than avail mem (" +
|
||||
str(usable_ram) + ") in host (" + _host.get_resource_name(_level) + ")")
|
||||
self.logger.debug("requested mem (" + str(requested_ram) +
|
||||
") more than avail mem (" +
|
||||
str(usable_ram) + ") in host (" +
|
||||
_host.get_resource_name(_level) + ")")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CoreFilter(object):
|
||||
"""CoreFilter."""
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Initialization."""
|
||||
self.logger = _logger
|
||||
|
||||
def host_passes(self, _level, _host, _v):
|
||||
@ -168,33 +189,42 @@ class CoreFilter(object):
|
||||
|
||||
instance_vCPUs = _v.vCPUs
|
||||
|
||||
# Do not allow an instance to overcommit against itself, only against other instances.
|
||||
# Do not allow an instance to overcommit against itself, only against
|
||||
# other instances.
|
||||
if instance_vCPUs > vCPUs:
|
||||
self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than total vCPUs (" +
|
||||
str(vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")")
|
||||
self.logger.debug("requested vCPUs (" + str(instance_vCPUs) +
|
||||
") more than total vCPUs (" +
|
||||
str(vCPUs) + ") in host (" +
|
||||
_host.get_resource_name(_level) + ")")
|
||||
return False
|
||||
|
||||
if avail_vCPUs < instance_vCPUs:
|
||||
self.logger.debug("requested vCPUs (" + str(instance_vCPUs) + ") more than avail vCPUs (" +
|
||||
str(avail_vCPUs) + ") in host (" + _host.get_resource_name(_level) + ")")
|
||||
self.logger.debug("requested vCPUs (" + str(instance_vCPUs) +
|
||||
") more than avail vCPUs (" +
|
||||
str(avail_vCPUs) + ") in host (" +
|
||||
_host.get_resource_name(_level) + ")")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class DiskFilter(object):
|
||||
"""DiskFilter."""
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Initialization."""
|
||||
self.logger = _logger
|
||||
|
||||
def host_passes(self, _level, _host, _v):
|
||||
"""Filter based on disk usage."""
|
||||
"""Return True if the requested disk is less than the available disk."""
|
||||
requested_disk = _v.local_volume_size
|
||||
(_, usable_disk) = _host.get_local_disk(_level)
|
||||
|
||||
if not usable_disk >= requested_disk:
|
||||
self.logger.debug("requested disk (" + str(requested_disk) + ") more than avail disk (" +
|
||||
str(usable_disk) + ") in host (" + _host.get_resource_name(_level) + ")")
|
||||
self.logger.debug("requested disk (" + str(requested_disk) +
|
||||
") more than avail disk (" +
|
||||
str(usable_disk) + ") in host (" +
|
||||
_host.get_resource_name(_level) + ")")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -1,27 +1,29 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Openstack utlity functions."""
|
||||
|
||||
import collections
|
||||
import operator
|
||||
|
||||
|
||||
# 1. The following operations are supported:
|
||||
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <all-in>, <or>, ==, !=, >=, <=
|
||||
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <all-in>, <or>, ==, !=, >=, <=
|
||||
# 2. Note that <or> is handled in a different way below.
|
||||
# 3. If the first word in the extra_specs is not one of the operators,
|
||||
# it is ignored.
|
||||
# it is ignored.
|
||||
op_methods = {'=': lambda x, y: float(x) >= float(y),
|
||||
'<in>': lambda x, y: y in x,
|
||||
'<all-in>': lambda x, y: all(val in x for val in y),
|
||||
@ -38,6 +40,7 @@ op_methods = {'=': lambda x, y: float(x) >= float(y),
|
||||
|
||||
|
||||
def match(value, req):
|
||||
"""Return True if value matches request."""
|
||||
words = req.split()
|
||||
|
||||
op = method = None
|
||||
@ -70,7 +73,10 @@ def match(value, req):
|
||||
|
||||
|
||||
def aggregate_metadata_get_by_host(_level, _host, _key=None):
|
||||
"""Returns a dict of all metadata based on a metadata key for a specific host. If the key is not provided, returns a dict of all metadata."""
|
||||
"""Return a dict of metadata for a specific host."""
|
||||
"""Base dict on a metadata key. If the key is not provided,
|
||||
return a dict of all metadata.
|
||||
"""
|
||||
|
||||
metadatas = {}
|
||||
|
||||
@ -90,6 +96,7 @@ def aggregate_metadata_get_by_host(_level, _host, _key=None):
|
||||
|
||||
# NOTE: this function not exist in OpenStack
|
||||
def availability_zone_get_by_host(_level, _host):
|
||||
"""Return a list of availability zones for a specific host."""
|
||||
availability_zone_list = []
|
||||
|
||||
logical_groups = _host.get_memberships(_level)
|
||||
|
@ -1,27 +1,32 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Optimizer."""
|
||||
|
||||
import time
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume
|
||||
from valet.engine.optimizer.app_manager.app_topology_base \
|
||||
import VGroup, VM, Volume
|
||||
from valet.engine.optimizer.ostro.search import Search
|
||||
|
||||
|
||||
class Optimizer(object):
|
||||
"""Optimizer."""
|
||||
|
||||
def __init__(self, _resource, _logger):
|
||||
"""Initialization."""
|
||||
self.resource = _resource
|
||||
self.logger = _logger
|
||||
|
||||
@ -30,6 +35,8 @@ class Optimizer(object):
|
||||
self.status = "success"
|
||||
|
||||
def place(self, _app_topology):
|
||||
"""Perform a replan, migration, or create operation."""
|
||||
"""Return a placement map for VMs, Volumes, and VGroups."""
|
||||
success = False
|
||||
|
||||
uuid_map = None
|
||||
@ -59,7 +66,8 @@ class Optimizer(object):
|
||||
uuid_map = self._delete_old_vms(_app_topology.old_vm_map)
|
||||
self.resource.update_topology(store=False)
|
||||
|
||||
self.logger.debug("Optimizer: remove old placements for replan")
|
||||
self.logger.debug("Optimizer: remove old placements for "
|
||||
"replan")
|
||||
else:
|
||||
success = self.search.place_nodes(_app_topology, self.resource)
|
||||
|
||||
@ -67,26 +75,35 @@ class Optimizer(object):
|
||||
|
||||
if success is True:
|
||||
|
||||
self.logger.debug("Optimizer: search running time = " + str(end_ts - start_ts) + " sec")
|
||||
self.logger.debug("Optimizer: total bandwidth = " + str(self.search.bandwidth_usage))
|
||||
self.logger.debug("Optimizer: total number of hosts = " + str(self.search.num_of_hosts))
|
||||
self.logger.debug("Optimizer: search running time = " +
|
||||
str(end_ts - start_ts) + " sec")
|
||||
self.logger.debug("Optimizer: total bandwidth = " +
|
||||
str(self.search.bandwidth_usage))
|
||||
self.logger.debug("Optimizer: total number of hosts = " +
|
||||
str(self.search.num_of_hosts))
|
||||
|
||||
placement_map = {}
|
||||
for v in self.search.node_placements.keys():
|
||||
if isinstance(v, VM):
|
||||
placement_map[v] = self.search.node_placements[v].host_name
|
||||
elif isinstance(v, Volume):
|
||||
placement_map[v] = self.search.node_placements[v].host_name + "@"
|
||||
placement_map[v] += self.search.node_placements[v].storage.storage_name
|
||||
placement_map[v] = \
|
||||
self.search.node_placements[v].host_name + "@"
|
||||
placement_map[v] += \
|
||||
self.search.node_placements[v].storage.storage_name
|
||||
elif isinstance(v, VGroup):
|
||||
if v.level == "host":
|
||||
placement_map[v] = self.search.node_placements[v].host_name
|
||||
placement_map[v] = \
|
||||
self.search.node_placements[v].host_name
|
||||
elif v.level == "rack":
|
||||
placement_map[v] = self.search.node_placements[v].rack_name
|
||||
placement_map[v] = \
|
||||
self.search.node_placements[v].rack_name
|
||||
elif v.level == "cluster":
|
||||
placement_map[v] = self.search.node_placements[v].cluster_name
|
||||
placement_map[v] = \
|
||||
self.search.node_placements[v].cluster_name
|
||||
|
||||
self.logger.debug(" " + v.name + " placed in " + placement_map[v])
|
||||
self.logger.debug(" " + v.name + " placed in " +
|
||||
placement_map[v])
|
||||
|
||||
self._update_resource_status(uuid_map)
|
||||
|
||||
@ -104,7 +121,8 @@ class Optimizer(object):
|
||||
if uuid is not None:
|
||||
uuid_map[h_uuid] = uuid
|
||||
|
||||
self.resource.remove_vm_by_h_uuid_from_host(info[0], h_uuid, info[1], info[2], info[3])
|
||||
self.resource.remove_vm_by_h_uuid_from_host(
|
||||
info[0], h_uuid, info[1], info[2], info[3])
|
||||
self.resource.update_host_time(info[0])
|
||||
|
||||
host = self.resource.hosts[info[0]]
|
||||
@ -123,58 +141,75 @@ class Optimizer(object):
|
||||
|
||||
self.resource.add_vm_to_host(np.host_name,
|
||||
(v.uuid, v.name, uuid),
|
||||
v.vCPUs, v.mem, v.local_volume_size)
|
||||
v.vCPUs, v.mem,
|
||||
v.local_volume_size)
|
||||
|
||||
for vl in v.vm_list:
|
||||
tnp = self.search.node_placements[vl.node]
|
||||
placement_level = np.get_common_placement(tnp)
|
||||
self.resource.deduct_bandwidth(np.host_name, placement_level, vl.nw_bandwidth)
|
||||
self.resource.deduct_bandwidth(np.host_name,
|
||||
placement_level,
|
||||
vl.nw_bandwidth)
|
||||
|
||||
for voll in v.volume_list:
|
||||
tnp = self.search.node_placements[voll.node]
|
||||
placement_level = np.get_common_placement(tnp)
|
||||
self.resource.deduct_bandwidth(np.host_name, placement_level, voll.io_bandwidth)
|
||||
self.resource.deduct_bandwidth(np.host_name,
|
||||
placement_level,
|
||||
voll.io_bandwidth)
|
||||
|
||||
self._update_logical_grouping(v, self.search.avail_hosts[np.host_name], uuid)
|
||||
self._update_logical_grouping(
|
||||
v, self.search.avail_hosts[np.host_name], uuid)
|
||||
|
||||
self.resource.update_host_time(np.host_name)
|
||||
|
||||
elif isinstance(v, Volume):
|
||||
self.resource.add_vol_to_host(np.host_name, np.storage.storage_name, v.name, v.volume_size)
|
||||
self.resource.add_vol_to_host(np.host_name,
|
||||
np.storage.storage_name, v.name,
|
||||
v.volume_size)
|
||||
|
||||
for vl in v.vm_list:
|
||||
tnp = self.search.node_placements[vl.node]
|
||||
placement_level = np.get_common_placement(tnp)
|
||||
self.resource.deduct_bandwidth(np.host_name, placement_level, vl.io_bandwidth)
|
||||
self.resource.deduct_bandwidth(np.host_name,
|
||||
placement_level,
|
||||
vl.io_bandwidth)
|
||||
|
||||
self.resource.update_storage_time(np.storage.storage_name)
|
||||
|
||||
def _update_logical_grouping(self, _v, _avail_host, _uuid):
|
||||
for lgk, lg in _avail_host.host_memberships.iteritems():
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
lg_name = lgk.split(":")
|
||||
if lg_name[0] == "host" and lg_name[1] != "any":
|
||||
self.resource.add_logical_group(_avail_host.host_name, lgk, lg.group_type)
|
||||
self.resource.add_logical_group(_avail_host.host_name,
|
||||
lgk, lg.group_type)
|
||||
|
||||
if _avail_host.rack_name != "any":
|
||||
for lgk, lg in _avail_host.rack_memberships.iteritems():
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
lg_name = lgk.split(":")
|
||||
if lg_name[0] == "rack" and lg_name[1] != "any":
|
||||
self.resource.add_logical_group(_avail_host.rack_name, lgk, lg.group_type)
|
||||
self.resource.add_logical_group(_avail_host.rack_name,
|
||||
lgk, lg.group_type)
|
||||
|
||||
if _avail_host.cluster_name != "any":
|
||||
for lgk, lg in _avail_host.cluster_memberships.iteritems():
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
lg_name = lgk.split(":")
|
||||
if lg_name[0] == "cluster" and lg_name[1] != "any":
|
||||
self.resource.add_logical_group(_avail_host.cluster_name, lgk, lg.group_type)
|
||||
self.resource.add_logical_group(
|
||||
_avail_host.cluster_name, lgk, lg.group_type)
|
||||
|
||||
vm_logical_groups = []
|
||||
self._collect_logical_groups_of_vm(_v, vm_logical_groups)
|
||||
|
||||
host = self.resource.hosts[_avail_host.host_name]
|
||||
self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid), vm_logical_groups)
|
||||
self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid),
|
||||
vm_logical_groups)
|
||||
|
||||
def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups):
|
||||
if isinstance(_v, VM):
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Valet Engine."""
|
||||
|
||||
from oslo_config import cfg
|
||||
import threading
|
||||
import time
|
||||
@ -30,21 +32,25 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class Ostro(object):
|
||||
"""Valet Engine."""
|
||||
|
||||
def __init__(self, _config, _logger):
|
||||
"""Initialization."""
|
||||
self.config = _config
|
||||
self.logger = _logger
|
||||
|
||||
self.db = MusicHandler(self.config, self.logger)
|
||||
if self.db.init_db() is False:
|
||||
self.logger.error("Ostro.__init__: error while initializing MUSIC database")
|
||||
self.logger.error("Ostro.__init__: error while initializing MUSIC "
|
||||
"database")
|
||||
else:
|
||||
self.logger.debug("Ostro.__init__: done init music")
|
||||
|
||||
self.resource = Resource(self.db, self.config, self.logger)
|
||||
self.logger.debug("done init resource")
|
||||
|
||||
self.app_handler = AppHandler(self.resource, self.db, self.config, self.logger)
|
||||
self.app_handler = AppHandler(self.resource, self.db, self.config,
|
||||
self.logger)
|
||||
self.logger.debug("done init apphandler")
|
||||
|
||||
self.optimizer = Optimizer(self.resource, self.logger)
|
||||
@ -53,10 +59,13 @@ class Ostro(object):
|
||||
self.data_lock = threading.Lock()
|
||||
self.thread_list = []
|
||||
|
||||
self.topology = TopologyManager(1, "Topology", self.resource, self.data_lock, self.config, self.logger)
|
||||
self.topology = TopologyManager(1, "Topology", self.resource,
|
||||
self.data_lock, self.config,
|
||||
self.logger)
|
||||
self.logger.debug("done init topology")
|
||||
|
||||
self.compute = ComputeManager(2, "Compute", self.resource, self.data_lock, self.config, self.logger)
|
||||
self.compute = ComputeManager(2, "Compute", self.resource,
|
||||
self.data_lock, self.config, self.logger)
|
||||
self.logger.debug("done init compute")
|
||||
|
||||
self.listener = ListenerManager(3, "Listener", CONF)
|
||||
@ -66,6 +75,10 @@ class Ostro(object):
|
||||
self.end_of_process = False
|
||||
|
||||
def run_ostro(self):
|
||||
"""Start main engine process."""
|
||||
"""Start topology, compute, and listener processes. Start process of
|
||||
retrieving and handling events and requests from the db every 1 second.
|
||||
"""
|
||||
self.logger.info("Ostro.run_ostro: start Ostro ......")
|
||||
|
||||
self.topology.start()
|
||||
@ -102,6 +115,10 @@ class Ostro(object):
|
||||
self.logger.info("Ostro.run_ostro: exit Ostro")
|
||||
|
||||
def stop_ostro(self):
|
||||
"""Stop main engine process."""
|
||||
"""Stop process of retrieving and handling events and requests from
|
||||
the db. Stop topology and compute processes.
|
||||
"""
|
||||
self.end_of_process = True
|
||||
|
||||
while len(self.thread_list) > 0:
|
||||
@ -111,10 +128,12 @@ class Ostro(object):
|
||||
self.thread_list.remove(t)
|
||||
|
||||
def bootstrap(self):
|
||||
"""Start bootstrap and update the engine's resource topology."""
|
||||
self.logger.info("Ostro.bootstrap: start bootstrap")
|
||||
|
||||
try:
|
||||
resource_status = self.db.get_resource_status(self.resource.datacenter.name)
|
||||
resource_status = self.db.get_resource_status(
|
||||
self.resource.datacenter.name)
|
||||
if resource_status is None:
|
||||
return False
|
||||
|
||||
@ -140,7 +159,8 @@ class Ostro(object):
|
||||
self.resource.update_topology()
|
||||
|
||||
except Exception:
|
||||
self.logger.critical("Ostro.bootstrap failed: " + traceback.format_exc())
|
||||
self.logger.critical("Ostro.bootstrap failed: " +
|
||||
traceback.format_exc())
|
||||
|
||||
self.logger.info("Ostro.bootstrap: done bootstrap")
|
||||
|
||||
@ -173,6 +193,7 @@ class Ostro(object):
|
||||
return True
|
||||
|
||||
def place_app(self, _app_data):
|
||||
"""Place results of query and placement requests in the db."""
|
||||
self.data_lock.acquire()
|
||||
|
||||
start_time = time.time()
|
||||
@ -190,7 +211,8 @@ class Ostro(object):
|
||||
|
||||
query_results = self._query(query_request_list)
|
||||
|
||||
result = self._get_json_results("query", "ok", self.status, query_results)
|
||||
result = self._get_json_results("query", "ok", self.status,
|
||||
query_results)
|
||||
|
||||
if self.db.put_result(result) is False:
|
||||
self.data_lock.release()
|
||||
@ -207,9 +229,11 @@ class Ostro(object):
|
||||
placement_map = self._place_app(placement_request_list)
|
||||
|
||||
if placement_map is None:
|
||||
result = self._get_json_results("placement", "error", self.status, placement_map)
|
||||
result = self._get_json_results("placement", "error",
|
||||
self.status, placement_map)
|
||||
else:
|
||||
result = self._get_json_results("placement", "ok", "success", placement_map)
|
||||
result = self._get_json_results("placement", "ok", "success",
|
||||
placement_map)
|
||||
|
||||
if self.db.put_result(result) is False:
|
||||
self.data_lock.release()
|
||||
@ -219,7 +243,8 @@ class Ostro(object):
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
self.logger.info("Ostro.place_app: total decision delay of request = " + str(end_time - start_time) + " sec")
|
||||
self.logger.info("Ostro.place_app: total decision delay of request = " +
|
||||
str(end_time - start_time) + " sec")
|
||||
|
||||
self.data_lock.release()
|
||||
return True
|
||||
@ -233,7 +258,8 @@ class Ostro(object):
|
||||
if "parameters" in q.keys():
|
||||
params = q["parameters"]
|
||||
if "group_name" in params.keys():
|
||||
vm_list = self._get_vms_from_logical_group(params["group_name"])
|
||||
vm_list = self._get_vms_from_logical_group(
|
||||
params["group_name"])
|
||||
query_results[q["stack_id"]] = vm_list
|
||||
else:
|
||||
self.status = "unknown paramenter in query"
|
||||
@ -261,7 +287,8 @@ class Ostro(object):
|
||||
|
||||
vm_id_list = []
|
||||
for lgk, lg in self.resource.logical_groups.iteritems():
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
lg_id = lgk.split(":")
|
||||
if lg_id[1] == _group_name:
|
||||
vm_id_list = lg.vm_list
|
||||
@ -282,14 +309,15 @@ class Ostro(object):
|
||||
return logical_groups
|
||||
|
||||
def _place_app(self, _app_data):
|
||||
''' set application topology '''
|
||||
"""Set application topology."""
|
||||
app_topology = self.app_handler.add_app(_app_data)
|
||||
if app_topology is None:
|
||||
self.status = self.app_handler.status
|
||||
self.logger.debug("Ostro._place_app: error while register requested apps: " + self.status)
|
||||
self.logger.debug("Ostro._place_app: error while register "
|
||||
"requested apps: " + self.status)
|
||||
return None
|
||||
|
||||
''' check and set vm flavor information '''
|
||||
"""Check and set vm flavor information."""
|
||||
for _, vm in app_topology.vms.iteritems():
|
||||
if self._set_vm_flavor_information(vm) is False:
|
||||
self.status = "fail to set flavor information"
|
||||
@ -301,22 +329,25 @@ class Ostro(object):
|
||||
self.logger.error("Ostro._place_app: " + self.status)
|
||||
return None
|
||||
|
||||
''' set weights for optimization '''
|
||||
"""Set weights for optimization."""
|
||||
app_topology.set_weight()
|
||||
app_topology.set_optimization_priority()
|
||||
|
||||
''' perform search for optimal placement of app topology '''
|
||||
"""Perform search for optimal placement of app topology."""
|
||||
placement_map = self.optimizer.place(app_topology)
|
||||
if placement_map is None:
|
||||
self.status = self.optimizer.status
|
||||
self.logger.debug("Ostro._place_app: error while optimizing app placement: " + self.status)
|
||||
self.logger.debug("Ostro._place_app: error while optimizing app "
|
||||
"placement: " + self.status)
|
||||
return None
|
||||
|
||||
''' update resource and app information '''
|
||||
"""Update resource and app information."""
|
||||
if len(placement_map) > 0:
|
||||
self.resource.update_topology()
|
||||
self.app_handler.add_placement(placement_map, self.resource.current_timestamp)
|
||||
if len(app_topology.exclusion_list_map) > 0 and len(app_topology.planned_vm_map) > 0:
|
||||
self.app_handler.add_placement(placement_map,
|
||||
self.resource.current_timestamp)
|
||||
if len(app_topology.exclusion_list_map) > 0 and \
|
||||
len(app_topology.planned_vm_map) > 0:
|
||||
for vk in app_topology.planned_vm_map.keys():
|
||||
if vk in placement_map.keys():
|
||||
del placement_map[vk]
|
||||
@ -336,9 +367,10 @@ class Ostro(object):
|
||||
flavor = self.resource.get_flavor(_vm.flavor)
|
||||
|
||||
if flavor is None:
|
||||
self.logger.warn("Ostro._set_vm_flavor_properties: does not exist flavor (" + _vm.flavor + ") and try to refetch")
|
||||
self.logger.warn("Ostro._set_vm_flavor_properties: does not exist "
|
||||
"flavor (" + _vm.flavor + ") and try to refetch")
|
||||
|
||||
''' reset flavor resource and try again '''
|
||||
"""Reset flavor resource and try again."""
|
||||
if self._set_flavors() is False:
|
||||
return False
|
||||
self.resource.update_topology()
|
||||
@ -359,6 +391,10 @@ class Ostro(object):
|
||||
return True
|
||||
|
||||
def handle_events(self, _event_list):
|
||||
"""Handle events in the event list."""
|
||||
"""Update the engine's resource topology based on the properties of
|
||||
each event in the event list.
|
||||
"""
|
||||
self.data_lock.acquire()
|
||||
|
||||
resource_updated = False
|
||||
@ -366,101 +402,131 @@ class Ostro(object):
|
||||
for e in _event_list:
|
||||
if e.host is not None and e.host != "none":
|
||||
if self._check_host(e.host) is False:
|
||||
self.logger.warn("Ostro.handle_events: host (" + e.host + ") related to this event not exists")
|
||||
self.logger.warn("Ostro.handle_events: host (" + e.host +
|
||||
") related to this event not exists")
|
||||
continue
|
||||
|
||||
if e.method == "build_and_run_instance": # VM is created (from stack)
|
||||
if e.method == "build_and_run_instance":
|
||||
# VM is created (from stack)
|
||||
self.logger.debug("Ostro.handle_events: got build_and_run event")
|
||||
if self.db.put_uuid(e) is False:
|
||||
self.data_lock.release()
|
||||
return False
|
||||
|
||||
elif e.method == "object_action":
|
||||
if e.object_name == 'Instance': # VM became active or deleted
|
||||
if e.object_name == 'Instance':
|
||||
# VM became active or deleted
|
||||
orch_id = self.db.get_uuid(e.uuid)
|
||||
if orch_id is None:
|
||||
self.data_lock.release()
|
||||
return False
|
||||
|
||||
if e.vm_state == "active":
|
||||
self.logger.debug("Ostro.handle_events: got instance_active event")
|
||||
self.logger.debug("Ostro.handle_events: got instance_"
|
||||
"active event")
|
||||
vm_info = self.app_handler.get_vm_info(orch_id[1], orch_id[0], e.host)
|
||||
if vm_info is None:
|
||||
self.logger.error("Ostro.handle_events: error while getting app info from MUSIC")
|
||||
self.logger.error("Ostro.handle_events: error "
|
||||
"while getting app info from MUSIC")
|
||||
self.data_lock.release()
|
||||
return False
|
||||
|
||||
if len(vm_info) == 0:
|
||||
'''
|
||||
h_uuid is None or "none" because vm is not created by stack
|
||||
or, stack not found because vm is created by the other stack
|
||||
'''
|
||||
self.logger.warn("Ostro.handle_events: no vm_info found in app placement record")
|
||||
self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk)
|
||||
"""
|
||||
h_uuid is None or "none" because vm is not created
|
||||
by stack or, stack not found because vm is created
|
||||
by the other stack
|
||||
"""
|
||||
self.logger.warn("Ostro.handle_events: no vm_info "
|
||||
"found in app placement record")
|
||||
self._add_vm_to_host(e.uuid, orch_id[0], e.host,
|
||||
e.vcpus, e.mem, e.local_disk)
|
||||
else:
|
||||
if "planned_host" in vm_info.keys() and vm_info["planned_host"] != e.host:
|
||||
'''
|
||||
if "planned_host" in vm_info.keys() and \
|
||||
vm_info["planned_host"] != e.host:
|
||||
"""
|
||||
vm is activated in the different host
|
||||
'''
|
||||
self.logger.warn("Ostro.handle_events: vm activated in the different host")
|
||||
self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk)
|
||||
"""
|
||||
self.logger.warn("Ostro.handle_events: vm "
|
||||
"activated in the different "
|
||||
"host")
|
||||
self._add_vm_to_host(
|
||||
e.uuid, orch_id[0], e.host, e.vcpus, e.mem,
|
||||
e.local_disk)
|
||||
|
||||
self._remove_vm_from_host(e.uuid, orch_id[0],
|
||||
vm_info["planned_host"],
|
||||
float(vm_info["cpus"]),
|
||||
float(vm_info["mem"]),
|
||||
float(vm_info["local_volume"]))
|
||||
self._remove_vm_from_host(
|
||||
e.uuid, orch_id[0], vm_info["planned_host"],
|
||||
float(vm_info["cpus"]),
|
||||
float(vm_info["mem"]),
|
||||
float(vm_info["local_volume"]))
|
||||
|
||||
self._remove_vm_from_logical_groups(e.uuid, orch_id[0], vm_info["planned_host"])
|
||||
self._remove_vm_from_logical_groups(
|
||||
e.uuid, orch_id[0], vm_info["planned_host"])
|
||||
else:
|
||||
'''
|
||||
"""
|
||||
found vm in the planned host,
|
||||
possibly the vm deleted in the host while batch cleanup
|
||||
'''
|
||||
if self._check_h_uuid(orch_id[0], e.host) is False:
|
||||
self.logger.debug("Ostro.handle_events: planned vm was deleted")
|
||||
"""
|
||||
if self._check_h_uuid(orch_id[0], e.host) \
|
||||
is False:
|
||||
self.logger.debug("Ostro.handle_events: "
|
||||
"planned vm was deleted")
|
||||
if self._check_uuid(e.uuid, e.host) is True:
|
||||
self._update_h_uuid_in_host(orch_id[0], e.uuid, e.host)
|
||||
self._update_h_uuid_in_logical_groups(orch_id[0], e.uuid, e.host)
|
||||
self._update_h_uuid_in_host(orch_id[0],
|
||||
e.uuid,
|
||||
e.host)
|
||||
self._update_h_uuid_in_logical_groups(
|
||||
orch_id[0], e.uuid, e.host)
|
||||
else:
|
||||
self.logger.debug("Ostro.handle_events: vm activated as planned")
|
||||
self._update_uuid_in_host(orch_id[0], e.uuid, e.host)
|
||||
self._update_uuid_in_logical_groups(orch_id[0], e.uuid, e.host)
|
||||
self.logger.debug("Ostro.handle_events: vm "
|
||||
"activated as planned")
|
||||
self._update_uuid_in_host(orch_id[0],
|
||||
e.uuid, e.host)
|
||||
self._update_uuid_in_logical_groups(
|
||||
orch_id[0], e.uuid, e.host)
|
||||
|
||||
resource_updated = True
|
||||
|
||||
elif e.vm_state == "deleted":
|
||||
self.logger.debug("Ostro.handle_events: got instance_delete event")
|
||||
self.logger.debug("Ostro.handle_events: got instance_"
|
||||
"delete event")
|
||||
|
||||
self._remove_vm_from_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk)
|
||||
self._remove_vm_from_logical_groups(e.uuid, orch_id[0], e.host)
|
||||
self._remove_vm_from_host(e.uuid, orch_id[0], e.host,
|
||||
e.vcpus, e.mem, e.local_disk)
|
||||
self._remove_vm_from_logical_groups(e.uuid, orch_id[0],
|
||||
e.host)
|
||||
|
||||
if self.app_handler.update_vm_info(orch_id[1], orch_id[0]) is False:
|
||||
self.logger.error("Ostro.handle_events: error while updating app in MUSIC")
|
||||
if self.app_handler.update_vm_info(orch_id[1],
|
||||
orch_id[0]) is False:
|
||||
self.logger.error("Ostro.handle_events: error "
|
||||
"while updating app in MUSIC")
|
||||
self.data_lock.release()
|
||||
return False
|
||||
|
||||
resource_updated = True
|
||||
|
||||
else:
|
||||
self.logger.warn("Ostro.handle_events: unknown vm_state = " + e.vm_state)
|
||||
self.logger.warn("Ostro.handle_events: unknown vm_"
|
||||
"state = " + e.vm_state)
|
||||
|
||||
elif e.object_name == 'ComputeNode': # Host resource is updated
|
||||
elif e.object_name == 'ComputeNode':
|
||||
# Host resource is updated
|
||||
self.logger.debug("Ostro.handle_events: got compute event")
|
||||
# NOTE: what if host is disabled?
|
||||
if self.resource.update_host_resources(e.host, e.status,
|
||||
e.vcpus, e.vcpus_used,
|
||||
e.mem, e.free_mem,
|
||||
e.local_disk, e.free_local_disk,
|
||||
e.disk_available_least) is True:
|
||||
if self.resource.update_host_resources(
|
||||
e.host, e.status, e.vcpus, e.vcpus_used, e.mem,
|
||||
e.free_mem, e.local_disk, e.free_local_disk,
|
||||
e.disk_available_least) is True:
|
||||
self.resource.update_host_time(e.host)
|
||||
|
||||
resource_updated = True
|
||||
|
||||
else:
|
||||
self.logger.warn("Ostro.handle_events: unknown object_name = " + e.object_name)
|
||||
self.logger.warn("Ostro.handle_events: unknown object_"
|
||||
"name = " + e.object_name)
|
||||
else:
|
||||
self.logger.warn("Ostro.handle_events: unknown event method = " + e.method)
|
||||
self.logger.warn("Ostro.handle_events: unknown event "
|
||||
"method = " + e.method)
|
||||
|
||||
if resource_updated is True:
|
||||
self.resource.update_topology()
|
||||
@ -480,23 +546,30 @@ class Ostro(object):
|
||||
|
||||
return True
|
||||
|
||||
def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk):
|
||||
def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
|
||||
_local_disk):
|
||||
vm_id = None
|
||||
if _h_uuid is None:
|
||||
vm_id = ("none", "none", _uuid)
|
||||
else:
|
||||
vm_id = (_h_uuid, "none", _uuid)
|
||||
|
||||
self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem, _local_disk)
|
||||
self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem,
|
||||
_local_disk)
|
||||
self.resource.update_host_time(_host_name)
|
||||
|
||||
def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem, _local_disk):
|
||||
def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
|
||||
_local_disk):
|
||||
if self._check_h_uuid(_h_uuid, _host_name) is True:
|
||||
self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid, _vcpus, _mem, _local_disk)
|
||||
self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid,
|
||||
_vcpus, _mem,
|
||||
_local_disk)
|
||||
self.resource.update_host_time(_host_name)
|
||||
else:
|
||||
if self._check_uuid(_uuid, _host_name) is True:
|
||||
self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid, _vcpus, _mem, _local_disk)
|
||||
self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid,
|
||||
_vcpus, _mem,
|
||||
_local_disk)
|
||||
self.resource.update_host_time(_host_name)
|
||||
|
||||
def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name):
|
||||
@ -537,7 +610,8 @@ class Ostro(object):
|
||||
if host.update_uuid(_h_uuid, _uuid) is True:
|
||||
self.resource.update_host_time(_host_name)
|
||||
else:
|
||||
self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid in host = " + host.name)
|
||||
self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid "
|
||||
"in host = " + host.name)
|
||||
|
||||
def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name):
|
||||
host = self.resource.hosts[_host_name]
|
||||
@ -554,7 +628,8 @@ class Ostro(object):
|
||||
|
||||
self.resource.update_h_uuid_in_logical_groups(_h_uuid, _uuid, host)
|
||||
|
||||
def _get_json_results(self, _request_type, _status_type, _status_message, _map):
|
||||
def _get_json_results(self, _request_type, _status_type, _status_message,
|
||||
_map):
|
||||
result = {}
|
||||
|
||||
if _request_type == "query":
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,39 +1,67 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, Volume, LEVELS
|
||||
"""Resources utlized by search engine."""
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base \
|
||||
import VGroup, VM, Volume, LEVELS
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""Resource."""
|
||||
|
||||
def __init__(self):
|
||||
self.level = None # level of placement
|
||||
"""Initialization."""
|
||||
# level of placement
|
||||
self.level = None
|
||||
|
||||
self.host_name = None
|
||||
self.host_memberships = {} # all mapped logical groups to host
|
||||
self.host_vCPUs = 0 # original total vCPUs before overcommit
|
||||
self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit
|
||||
self.host_mem = 0 # original total mem cap before overcommit
|
||||
self.host_avail_mem = 0 # remaining mem cap after
|
||||
self.host_local_disk = 0 # original total local disk cap before overcommit
|
||||
self.host_avail_local_disk = 0 # remaining local disk cap after overcommit
|
||||
self.host_avail_switches = {} # all mapped switches to host
|
||||
self.host_avail_storages = {} # all mapped storage_resources to host
|
||||
self.host_num_of_placed_vms = 0 # the number of vms currently placed in this host
|
||||
|
||||
self.rack_name = None # where this host is located
|
||||
# all mapped logical groups to host
|
||||
self.host_memberships = {}
|
||||
|
||||
# original total vCPUs before overcommit
|
||||
self.host_vCPUs = 0
|
||||
|
||||
# remaining vCPUs after overcommit
|
||||
self.host_avail_vCPUs = 0
|
||||
|
||||
# original total mem cap before overcommit
|
||||
self.host_mem = 0
|
||||
|
||||
# remaining mem cap after
|
||||
self.host_avail_mem = 0
|
||||
|
||||
# original total local disk cap before overcommit
|
||||
self.host_local_disk = 0
|
||||
|
||||
# remaining local disk cap after overcommit
|
||||
self.host_avail_local_disk = 0
|
||||
|
||||
# all mapped switches to host
|
||||
self.host_avail_switches = {}
|
||||
|
||||
# all mapped storage_resources to host
|
||||
self.host_avail_storages = {}
|
||||
|
||||
# the number of vms currently placed in this host
|
||||
self.host_num_of_placed_vms = 0
|
||||
|
||||
# where this host is located
|
||||
self.rack_name = None
|
||||
|
||||
self.rack_memberships = {}
|
||||
self.rack_vCPUs = 0
|
||||
self.rack_avail_vCPUs = 0
|
||||
@ -41,11 +69,18 @@ class Resource(object):
|
||||
self.rack_avail_mem = 0
|
||||
self.rack_local_disk = 0
|
||||
self.rack_avail_local_disk = 0
|
||||
self.rack_avail_switches = {} # all mapped switches to rack
|
||||
self.rack_avail_storages = {} # all mapped storage_resources to rack
|
||||
|
||||
# all mapped switches to rack
|
||||
self.rack_avail_switches = {}
|
||||
|
||||
# all mapped storage_resources to rack
|
||||
self.rack_avail_storages = {}
|
||||
|
||||
self.rack_num_of_placed_vms = 0
|
||||
|
||||
self.cluster_name = None # where this host and rack are located
|
||||
# where this host and rack are located
|
||||
self.cluster_name = None
|
||||
|
||||
self.cluster_memberships = {}
|
||||
self.cluster_vCPUs = 0
|
||||
self.cluster_avail_vCPUs = 0
|
||||
@ -53,15 +88,24 @@ class Resource(object):
|
||||
self.cluster_avail_mem = 0
|
||||
self.cluster_local_disk = 0
|
||||
self.cluster_avail_local_disk = 0
|
||||
self.cluster_avail_switches = {} # all mapped switches to cluster
|
||||
self.cluster_avail_storages = {} # all mapped storage_resources to cluster
|
||||
|
||||
# all mapped switches to cluster
|
||||
self.cluster_avail_switches = {}
|
||||
|
||||
# all mapped storage_resources to cluster
|
||||
self.cluster_avail_storages = {}
|
||||
self.cluster_num_of_placed_vms = 0
|
||||
|
||||
self.storage = None # selected best storage for volume among host_avail_storages
|
||||
# selected best storage for volume among host_avail_storages
|
||||
self.storage = None
|
||||
|
||||
self.sort_base = 0 # order to place
|
||||
# order to place
|
||||
self.sort_base = 0
|
||||
|
||||
def get_common_placement(self, _resource):
|
||||
"""Get common placement level."""
|
||||
"""Get the common level between this resource and the one
|
||||
provided."""
|
||||
level = None
|
||||
|
||||
if self.cluster_name != _resource.cluster_name:
|
||||
@ -78,6 +122,7 @@ class Resource(object):
|
||||
return level
|
||||
|
||||
def get_resource_name(self, _level):
|
||||
"""Get the name of this resource at the specified level."""
|
||||
name = "unknown"
|
||||
|
||||
if _level == "cluster":
|
||||
@ -90,6 +135,7 @@ class Resource(object):
|
||||
return name
|
||||
|
||||
def get_memberships(self, _level):
|
||||
"""Get the memberships of this resource at the specified level."""
|
||||
memberships = None
|
||||
|
||||
if _level == "cluster":
|
||||
@ -102,6 +148,7 @@ class Resource(object):
|
||||
return memberships
|
||||
|
||||
def get_num_of_placed_vms(self, _level):
|
||||
"""Get the number of placed vms of this resource at the specified level."""
|
||||
num_of_vms = 0
|
||||
|
||||
if _level == "cluster":
|
||||
@ -114,6 +161,7 @@ class Resource(object):
|
||||
return num_of_vms
|
||||
|
||||
def get_avail_resources(self, _level):
|
||||
"""Get the available vCPUs, memory, local disk of this resource at the specified level."""
|
||||
avail_vCPUs = 0
|
||||
avail_mem = 0
|
||||
avail_local_disk = 0
|
||||
@ -134,6 +182,7 @@ class Resource(object):
|
||||
return (avail_vCPUs, avail_mem, avail_local_disk)
|
||||
|
||||
def get_local_disk(self, _level):
|
||||
"""Get the local disk and available local disk of this resource at the specified level."""
|
||||
local_disk = 0
|
||||
avail_local_disk = 0
|
||||
|
||||
@ -150,6 +199,7 @@ class Resource(object):
|
||||
return (local_disk, avail_local_disk)
|
||||
|
||||
def get_vCPUs(self, _level):
|
||||
"""Get the vCPUs and available vCPUs of this resource at the specified level."""
|
||||
vCPUs = 0
|
||||
avail_vCPUs = 0
|
||||
|
||||
@ -166,6 +216,7 @@ class Resource(object):
|
||||
return (vCPUs, avail_vCPUs)
|
||||
|
||||
def get_mem(self, _level):
|
||||
"""Get the memory and available memory of this resource at the specified level."""
|
||||
mem = 0
|
||||
avail_mem = 0
|
||||
|
||||
@ -182,6 +233,7 @@ class Resource(object):
|
||||
return (mem, avail_mem)
|
||||
|
||||
def get_avail_storages(self, _level):
|
||||
"""Get the available storages of this resource at the specified level."""
|
||||
avail_storages = None
|
||||
|
||||
if _level == "cluster":
|
||||
@ -194,6 +246,7 @@ class Resource(object):
|
||||
return avail_storages
|
||||
|
||||
def get_avail_switches(self, _level):
|
||||
"""Get the available switches of this resource at the specified level."""
|
||||
avail_switches = None
|
||||
|
||||
if _level == "cluster":
|
||||
@ -207,20 +260,26 @@ class Resource(object):
|
||||
|
||||
|
||||
class LogicalGroupResource(object):
|
||||
"""LogicalGroupResource."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialization."""
|
||||
self.name = None
|
||||
self.group_type = "AGGR"
|
||||
|
||||
self.metadata = {}
|
||||
|
||||
self.num_of_placed_vms = 0
|
||||
self.num_of_placed_vms_per_host = {} # key = host (i.e., id of host or rack), value = num_of_placed_vms
|
||||
|
||||
# key = host (i.e., id of host or rack), value = num_of_placed_vms
|
||||
self.num_of_placed_vms_per_host = {}
|
||||
|
||||
|
||||
class StorageResource(object):
|
||||
"""StorageResource."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialization."""
|
||||
self.storage_name = None
|
||||
self.storage_class = None
|
||||
self.storage_avail_disk = 0
|
||||
@ -229,8 +288,10 @@ class StorageResource(object):
|
||||
|
||||
|
||||
class SwitchResource(object):
|
||||
"""SwitchResource."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialization."""
|
||||
self.switch_name = None
|
||||
self.switch_type = None
|
||||
self.avail_bandwidths = [] # out-bound bandwidths
|
||||
@ -239,13 +300,16 @@ class SwitchResource(object):
|
||||
|
||||
|
||||
class Node(object):
|
||||
"""Node."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialization."""
|
||||
self.node = None # VM, Volume, or VGroup
|
||||
|
||||
self.sort_base = -1
|
||||
|
||||
def get_all_links(self):
|
||||
"""Return a list of links for vms, volumes, and/or vgroups."""
|
||||
link_list = []
|
||||
|
||||
if isinstance(self.node, VM):
|
||||
@ -263,6 +327,7 @@ class Node(object):
|
||||
return link_list
|
||||
|
||||
def get_bandwidth_of_link(self, _link):
|
||||
"""Return bandwidth of link."""
|
||||
bandwidth = 0
|
||||
|
||||
if isinstance(self.node, VGroup) or isinstance(self.node, VM):
|
||||
@ -276,6 +341,7 @@ class Node(object):
|
||||
return bandwidth
|
||||
|
||||
def get_common_diversity(self, _diversity_groups):
|
||||
"""Return the common level of the given diversity groups."""
|
||||
common_level = "ANY"
|
||||
|
||||
for dk in self.node.diversity_groups.keys():
|
||||
@ -290,16 +356,19 @@ class Node(object):
|
||||
return common_level
|
||||
|
||||
def get_affinity_id(self):
|
||||
"""Return the affinity id."""
|
||||
aff_id = None
|
||||
|
||||
if isinstance(self.node, VGroup) and self.node.vgroup_type == "AFF" and \
|
||||
self.node.name != "any":
|
||||
if isinstance(self.node, VGroup) and \
|
||||
self.node.vgroup_type == "AFF" and \
|
||||
self.node.name != "any":
|
||||
aff_id = self.node.level + ":" + self.node.name
|
||||
|
||||
return aff_id
|
||||
|
||||
|
||||
def compute_reservation(_level, _placement_level, _bandwidth):
|
||||
"""Compute and return the reservation."""
|
||||
reservation = 0
|
||||
|
||||
if _placement_level != "ANY":
|
||||
|
@ -1,12 +1,12 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -15,6 +15,8 @@
|
||||
|
||||
# - Set all configurations to run Ostro
|
||||
|
||||
"""Valet Engine Server Configuration."""
|
||||
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from valet.engine.conf import register_conf
|
||||
@ -24,9 +26,10 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class Config(object):
|
||||
"""Valet Engine Server Configuration."""
|
||||
|
||||
def __init__(self, *default_config_files):
|
||||
|
||||
"""Initialization."""
|
||||
register_conf()
|
||||
if default_config_files:
|
||||
CONF(default_config_files=default_config_files)
|
||||
@ -126,7 +129,7 @@ class Config(object):
|
||||
self.base_flavor_disk = 0
|
||||
|
||||
def configure(self):
|
||||
|
||||
"""Store config info extracted from oslo."""
|
||||
status = self._init_system()
|
||||
if status != "success":
|
||||
return status
|
||||
@ -181,17 +184,21 @@ class Config(object):
|
||||
|
||||
self.network_control_url = CONF.engine.network_control_url
|
||||
|
||||
self.default_cpu_allocation_ratio = CONF.engine.default_cpu_allocation_ratio
|
||||
self.default_cpu_allocation_ratio = \
|
||||
CONF.engine.default_cpu_allocation_ratio
|
||||
|
||||
self.default_ram_allocation_ratio = CONF.engine.default_ram_allocation_ratio
|
||||
self.default_ram_allocation_ratio = \
|
||||
CONF.engine.default_ram_allocation_ratio
|
||||
|
||||
self.default_disk_allocation_ratio = CONF.engine.default_disk_allocation_ratio
|
||||
self.default_disk_allocation_ratio = \
|
||||
CONF.engine.default_disk_allocation_ratio
|
||||
|
||||
self.static_cpu_standby_ratio = CONF.engine.static_cpu_standby_ratio
|
||||
|
||||
self.static_mem_standby_ratio = CONF.engine.static_mem_standby_ratio
|
||||
|
||||
self.static_local_disk_standby_ratio = CONF.engine.static_local_disk_standby_ratio
|
||||
self.static_local_disk_standby_ratio = \
|
||||
CONF.engine.static_local_disk_standby_ratio
|
||||
|
||||
self.topology_trigger_time = CONF.engine.topology_trigger_time
|
||||
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Generic Daemon."""
|
||||
|
||||
import atexit
|
||||
import os
|
||||
from signal import SIGTERM
|
||||
@ -21,12 +23,14 @@ import time
|
||||
|
||||
|
||||
class Daemon(object):
|
||||
""" A generic daemon class.
|
||||
"""A generic daemon class."""
|
||||
|
||||
Usage: subclass the Daemon class and override the run() method
|
||||
"""Usage: subclass the Daemon class and override the run() method
|
||||
"""
|
||||
|
||||
def __init__(self, priority, pidfile, logger, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
|
||||
def __init__(self, priority, pidfile, logger, stdin='/dev/null',
|
||||
stdout='/dev/null', stderr='/dev/null'):
|
||||
"""Initialization."""
|
||||
self.stdin = stdin
|
||||
self.stdout = stdout
|
||||
self.stderr = stderr
|
||||
@ -35,9 +39,9 @@ class Daemon(object):
|
||||
self.logger = logger
|
||||
|
||||
def daemonize(self):
|
||||
""" Do the UNIX double-fork magic, see Stevens' "Advanced
|
||||
|
||||
Programming in the UNIX Environment" for details (ISBN 0201563177)
|
||||
"""Do the UNIX double-fork magic."""
|
||||
"""See Stevens' "Advanced Programming in the UNIX Environment"
|
||||
for details. (ISBN 0201563177).
|
||||
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
|
||||
"""
|
||||
try:
|
||||
@ -47,7 +51,8 @@ class Daemon(object):
|
||||
sys.exit(0)
|
||||
except OSError as e:
|
||||
self.logger.error("Daemon error at step1: " + e.strerror)
|
||||
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
|
||||
sys.stderr.write("fork #1 failed: %d (%s)\n" %
|
||||
(e.errno, e.strerror))
|
||||
sys.exit(1)
|
||||
|
||||
# decouple from parent environment
|
||||
@ -63,7 +68,8 @@ class Daemon(object):
|
||||
sys.exit(0)
|
||||
except OSError as e:
|
||||
self.logger.error("Daemon error at step2: " + e.strerror)
|
||||
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
|
||||
sys.stderr.write("fork #2 failed: %d (%s)\n" %
|
||||
(e.errno, e.strerror))
|
||||
sys.exit(1)
|
||||
|
||||
# redirect standard file descriptors
|
||||
@ -82,10 +88,11 @@ class Daemon(object):
|
||||
file(self.pidfile, 'w+').write("%s\n" % pid)
|
||||
|
||||
def delpid(self):
|
||||
"""Remove pidfile."""
|
||||
os.remove(self.pidfile)
|
||||
|
||||
def getpid(self):
|
||||
"""returns the content of pidfile or None."""
|
||||
"""Return the content of pidfile or None."""
|
||||
try:
|
||||
pf = file(self.pidfile, 'r')
|
||||
pid = int(pf.read().strip())
|
||||
@ -95,7 +102,7 @@ class Daemon(object):
|
||||
return pid
|
||||
|
||||
def checkpid(self, pid):
|
||||
""" Check For the existence of a unix pid. """
|
||||
"""Check for the existence of a UNIX pid."""
|
||||
if pid is None:
|
||||
return False
|
||||
|
||||
@ -108,7 +115,7 @@ class Daemon(object):
|
||||
return True
|
||||
|
||||
def start(self):
|
||||
"""Start the daemon"""
|
||||
"""Start thedaemon."""
|
||||
# Check for a pidfile to see if the daemon already runs
|
||||
pid = self.getpid()
|
||||
|
||||
@ -122,7 +129,7 @@ class Daemon(object):
|
||||
self.run()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the daemon"""
|
||||
"""Stop the daemon."""
|
||||
# Get the pid from the pidfile
|
||||
pid = self.getpid()
|
||||
|
||||
@ -146,12 +153,12 @@ class Daemon(object):
|
||||
sys.exit(1)
|
||||
|
||||
def restart(self):
|
||||
"""Restart the daemon"""
|
||||
"""Restart the daemon."""
|
||||
self.stop()
|
||||
self.start()
|
||||
|
||||
def status(self):
|
||||
""" returns instance's priority """
|
||||
"""Return instance's priority."""
|
||||
# Check for a pidfile to see if the daemon already runs
|
||||
pid = self.getpid()
|
||||
|
||||
@ -161,13 +168,14 @@ class Daemon(object):
|
||||
message = "status: pidfile %s exist. Daemon is running\n"
|
||||
status = self.priority
|
||||
else:
|
||||
message = "status: pidfile %s does not exist. Daemon is not running\n"
|
||||
message = "status: pidfile %s does not exist. Daemon is not " \
|
||||
"running\n"
|
||||
|
||||
sys.stderr.write(message % self.pidfile)
|
||||
return status
|
||||
|
||||
def run(self):
|
||||
""" You should override this method when you subclass Daemon.
|
||||
|
||||
It will be called after the process has been daemonized by start() or restart().
|
||||
"""You should override this method when you subclass Daemon."""
|
||||
"""It will be called after the process has been daemonized by
|
||||
start() or restart().
|
||||
"""
|
||||
|
@ -1,12 +1,12 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@ -15,28 +15,38 @@
|
||||
|
||||
# - Handle user requests
|
||||
|
||||
import sys
|
||||
"""Database Cleaner."""
|
||||
|
||||
from configuration import Config
|
||||
|
||||
import sys
|
||||
from valet.api.db.models.music import Music
|
||||
|
||||
|
||||
class DBCleaner(object):
|
||||
"""Database Cleaner."""
|
||||
|
||||
def __init__(self, _config):
|
||||
"""Initialization."""
|
||||
self.config = _config
|
||||
|
||||
self.music = Music()
|
||||
|
||||
def clean_db_tables(self):
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table)
|
||||
"""Clean tables in Music."""
|
||||
"""Clean resource, resource_index, request, response, event,
|
||||
app, app_index, and uuid tables.
|
||||
"""
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_resource_table)
|
||||
if len(results) > 0:
|
||||
print("resource table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
self.music.delete_row_eventually(self.config.db_keyspace, self.config.db_resource_table, 'site_name', row['site_name'])
|
||||
self.music.delete_row_eventually(self.config.db_keyspace,
|
||||
self.config.db_resource_table,
|
||||
'site_name', row['site_name'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_request_table)
|
||||
if len(results) > 0:
|
||||
print("request table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
@ -44,7 +54,8 @@ class DBCleaner(object):
|
||||
self.config.db_request_table,
|
||||
'stack_id', row['stack_id'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_response_table)
|
||||
if len(results) > 0:
|
||||
print("response table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
@ -52,7 +63,8 @@ class DBCleaner(object):
|
||||
self.config.db_response_table,
|
||||
'stack_id', row['stack_id'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_event_table)
|
||||
if len(results) > 0:
|
||||
print("event table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
@ -60,15 +72,18 @@ class DBCleaner(object):
|
||||
self.config.db_event_table,
|
||||
'timestamp', row['timestamp'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_resource_index_table)
|
||||
if len(results) > 0:
|
||||
print("resource_index table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
self.music.delete_row_eventually(self.config.db_keyspace,
|
||||
self.config.db_resource_index_table,
|
||||
'site_name', row['site_name'])
|
||||
self.music.delete_row_eventually(
|
||||
self.config.db_keyspace,
|
||||
self.config.db_resource_index_table,
|
||||
'site_name', row['site_name'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_app_index_table)
|
||||
if len(results) > 0:
|
||||
print("app_index table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
@ -76,7 +91,8 @@ class DBCleaner(object):
|
||||
self.config.db_app_index_table,
|
||||
'site_name', row['site_name'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_app_table)
|
||||
if len(results) > 0:
|
||||
print("app table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
@ -84,7 +100,8 @@ class DBCleaner(object):
|
||||
self.config.db_app_table,
|
||||
'stack_id', row['stack_id'])
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_uuid_table)
|
||||
if len(results) > 0:
|
||||
print("uuid table result = ", len(results))
|
||||
for _, row in results.iteritems():
|
||||
@ -93,49 +110,61 @@ class DBCleaner(object):
|
||||
'uuid', row['uuid'])
|
||||
|
||||
def check_db_tables(self):
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_table)
|
||||
"""Log whether tables in Music have been cleaned."""
|
||||
"""Check resource, resource_index, request, response, event,
|
||||
app, app_index, and uuid tables.
|
||||
"""
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_resource_table)
|
||||
if len(results) > 0:
|
||||
print("resource table not cleaned ")
|
||||
else:
|
||||
print("resource table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_request_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_request_table)
|
||||
if len(results) > 0:
|
||||
print("request table not cleaned ")
|
||||
else:
|
||||
print("request table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_response_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_response_table)
|
||||
if len(results) > 0:
|
||||
print("response table not cleaned ")
|
||||
else:
|
||||
print("response table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_event_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_event_table)
|
||||
if len(results) > 0:
|
||||
print("event table not cleaned ")
|
||||
else:
|
||||
print("event table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_resource_index_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_resource_index_table)
|
||||
if len(results) > 0:
|
||||
print("resource log index table not cleaned ")
|
||||
else:
|
||||
print("resource log index table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_index_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_app_index_table)
|
||||
if len(results) > 0:
|
||||
print("app log index table not cleaned ")
|
||||
else:
|
||||
print("app log index table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_app_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_app_table)
|
||||
if len(results) > 0:
|
||||
print("app log table not cleaned ")
|
||||
else:
|
||||
print("app log table cleaned")
|
||||
|
||||
results = self.music.read_all_rows(self.config.db_keyspace, self.config.db_uuid_table)
|
||||
results = self.music.read_all_rows(self.config.db_keyspace,
|
||||
self.config.db_uuid_table)
|
||||
if len(results) > 0:
|
||||
print("uuid table not cleaned ")
|
||||
else:
|
||||
|
@ -1,31 +1,34 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Daemon foe Valet Engine."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
from valet.engine.optimizer.ostro.ostro import Ostro
|
||||
from valet.engine.optimizer.ostro_server.configuration import Config
|
||||
from valet.engine.optimizer.ostro_server.daemon import Daemon # implemented for Python v2.7
|
||||
from valet.engine.optimizer.ostro_server.daemon import Daemon
|
||||
from valet.engine.optimizer.util.util import init_logger
|
||||
|
||||
|
||||
class OstroDaemon(Daemon):
|
||||
"""Daemon foe Valet Engine."""
|
||||
|
||||
def run(self):
|
||||
|
||||
"""Run the daemon."""
|
||||
self.logger.info("##### Valet Engine is launched #####")
|
||||
try:
|
||||
ostro = Ostro(config, self.logger)
|
||||
@ -40,6 +43,7 @@ class OstroDaemon(Daemon):
|
||||
|
||||
|
||||
def verify_dirs(list_of_dirs):
|
||||
"""If a directory in the list does not exist, create it."""
|
||||
for d in list_of_dirs:
|
||||
try:
|
||||
if not os.path.exists(d):
|
||||
@ -50,7 +54,7 @@ def verify_dirs(list_of_dirs):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
''' configuration '''
|
||||
""" configuration """
|
||||
# Configuration
|
||||
try:
|
||||
config = Config()
|
||||
@ -59,11 +63,12 @@ if __name__ == "__main__":
|
||||
print(config_status)
|
||||
sys.exit(2)
|
||||
|
||||
''' verify directories '''
|
||||
dirs_list = [config.logging_loc, config.resource_log_loc, config.app_log_loc, os.path.dirname(config.process)]
|
||||
""" verify directories """
|
||||
dirs_list = [config.logging_loc, config.resource_log_loc,
|
||||
config.app_log_loc, os.path.dirname(config.process)]
|
||||
verify_dirs(dirs_list)
|
||||
|
||||
''' logger '''
|
||||
""" logger """
|
||||
logger = init_logger(config)
|
||||
|
||||
# Start daemon process
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Version 2.0.2: Feb. 9, 2016
|
||||
# Version 2.0.2
|
||||
|
||||
# Set simulation parameters
|
||||
num_of_spine_switches=0
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Util."""
|
||||
|
||||
from os import listdir, stat
|
||||
from os.path import isfile, join
|
||||
import logging
|
||||
@ -20,6 +22,7 @@ from logging.handlers import RotatingFileHandler
|
||||
|
||||
|
||||
def get_logfile(_loc, _max_log_size, _name):
|
||||
"""Get logfile from location and return with file mode."""
|
||||
files = [f for f in listdir(_loc) if isfile(join(_loc, f))]
|
||||
|
||||
logfile_index = 0
|
||||
@ -50,7 +53,9 @@ def get_logfile(_loc, _max_log_size, _name):
|
||||
return (last_logfile, mode)
|
||||
|
||||
|
||||
def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index):
|
||||
def get_last_logfile(_loc, _max_log_size, _max_num_of_logs,
|
||||
_name, _last_index):
|
||||
"""Return last logfile from location with index and mode."""
|
||||
last_logfile = _name + "_" + str(_last_index) + ".log"
|
||||
mode = None
|
||||
|
||||
@ -74,6 +79,7 @@ def get_last_logfile(_loc, _max_log_size, _max_num_of_logs, _name, _last_index):
|
||||
|
||||
|
||||
def adjust_json_string(_data):
|
||||
"""Adjust data value formatting to be consistent and return."""
|
||||
_data = _data.replace("None", '"none"')
|
||||
_data = _data.replace("False", '"false"')
|
||||
_data = _data.replace("True", '"true"')
|
||||
@ -85,7 +91,9 @@ def adjust_json_string(_data):
|
||||
|
||||
|
||||
def init_logger(config):
|
||||
log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
|
||||
"""Return an initialized logger."""
|
||||
log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - "
|
||||
"%(message)s")
|
||||
log_handler = RotatingFileHandler(config.logging_loc + config.logger_name,
|
||||
mode='a',
|
||||
maxBytes=config.max_main_log_size,
|
||||
@ -94,7 +102,8 @@ def init_logger(config):
|
||||
delay=0)
|
||||
log_handler.setFormatter(log_formatter)
|
||||
logger = logging.getLogger(config.logger_name)
|
||||
logger.setLevel(logging.DEBUG if config.logging_level == "debug" else logging.INFO)
|
||||
logger.setLevel(logging.DEBUG if config.logging_level == "debug"
|
||||
else logging.INFO)
|
||||
logger.addHandler(log_handler)
|
||||
|
||||
return logger
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Compute."""
|
||||
|
||||
from novaclient import client as nova_client
|
||||
from oslo_config import cfg
|
||||
from resource_base import Host, LogicalGroup, Flavor
|
||||
@ -25,12 +27,21 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class Compute(object):
|
||||
"""Compute Class.
|
||||
|
||||
This class performs functions of setting hosts, availability zones,
|
||||
aggregates, placed vms, resources, flavors, etc.
|
||||
|
||||
Interacts with nova client to perform these actions.
|
||||
"""
|
||||
|
||||
def __init__(self, _logger):
|
||||
"""Compute init."""
|
||||
self.logger = _logger
|
||||
self.nova = None
|
||||
|
||||
def set_hosts(self, _hosts, _logical_groups):
|
||||
|
||||
"""Return success if az's, aggregates, vms, resources, all set."""
|
||||
self._get_nova_client()
|
||||
|
||||
status = self._set_availability_zones(_hosts, _logical_groups)
|
||||
@ -56,7 +67,7 @@ class Compute(object):
|
||||
return "success"
|
||||
|
||||
def _get_nova_client(self):
|
||||
'''Returns a nova client'''
|
||||
"""Return a nova client."""
|
||||
self.nova = nova_client.Client(VERSION,
|
||||
CONF.identity.username,
|
||||
CONF.identity.password,
|
||||
@ -86,7 +97,8 @@ class Compute(object):
|
||||
if host.name not in logical_group.vms_per_host.keys():
|
||||
logical_group.vms_per_host[host.name] = []
|
||||
|
||||
self.logger.info("adding Host LogicalGroup: " + str(host.__dict__))
|
||||
self.logger.info("adding Host LogicalGroup: " +
|
||||
str(host.__dict__))
|
||||
|
||||
_hosts[host.name] = host
|
||||
|
||||
@ -114,7 +126,8 @@ class Compute(object):
|
||||
metadata[mk] = a.metadata.get(mk)
|
||||
aggregate.metadata = metadata
|
||||
|
||||
self.logger.info("adding aggregate LogicalGroup: " + str(aggregate.__dict__))
|
||||
self.logger.info("adding aggregate LogicalGroup: " +
|
||||
str(aggregate.__dict__))
|
||||
|
||||
_logical_groups[aggregate.name] = aggregate
|
||||
|
||||
@ -141,7 +154,8 @@ class Compute(object):
|
||||
if result_status == "success":
|
||||
for vm_uuid in vm_uuid_list:
|
||||
vm_detail = [] # (vm_name, az, metadata, status)
|
||||
result_status_detail = self._get_vm_detail(vm_uuid, vm_detail)
|
||||
result_status_detail = self._get_vm_detail(vm_uuid,
|
||||
vm_detail)
|
||||
|
||||
if result_status_detail == "success":
|
||||
vm_id = ("none", vm_detail[0], vm_uuid)
|
||||
@ -162,7 +176,8 @@ class Compute(object):
|
||||
return error_status
|
||||
|
||||
def _get_vms_of_host(self, _hk, _vm_list):
|
||||
hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk, servers=True)
|
||||
hypervisor_list = self.nova.hypervisors.search(hypervisor_match=_hk,
|
||||
servers=True)
|
||||
|
||||
try:
|
||||
for hv in hypervisor_list:
|
||||
@ -221,6 +236,7 @@ class Compute(object):
|
||||
return "success"
|
||||
|
||||
def set_flavors(self, _flavors):
|
||||
"""Set flavors."""
|
||||
error_status = None
|
||||
|
||||
self._get_nova_client()
|
||||
@ -260,7 +276,8 @@ class Compute(object):
|
||||
|
||||
ephemeral_gb = 0.0
|
||||
if hasattr(f, "OS-FLV-EXT-DATA:ephemeral"):
|
||||
ephemeral_gb = float(getattr(f, "OS-FLV-EXT-DATA:ephemeral"))
|
||||
ephemeral_gb = float(getattr(f,
|
||||
"OS-FLV-EXT-DATA:ephemeral"))
|
||||
|
||||
swap_mb = 0.0
|
||||
if hasattr(f, "swap"):
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Compute Manager."""
|
||||
|
||||
import threading
|
||||
import time
|
||||
|
||||
@ -23,8 +25,14 @@ from valet.engine.resource_manager.resource_base import Host
|
||||
|
||||
|
||||
class ComputeManager(threading.Thread):
|
||||
"""Compute Manager Class.
|
||||
|
||||
Threaded class to setup and manage compute for resources, hosts,
|
||||
flavors, etc. Calls many functions from Resource.
|
||||
"""
|
||||
|
||||
def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger):
|
||||
"""Init Compute Manager."""
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
self.thread_id = _t_id
|
||||
@ -42,7 +50,9 @@ class ComputeManager(threading.Thread):
|
||||
self.project_token = None
|
||||
|
||||
def run(self):
|
||||
self.logger.info("ComputeManager: start " + self.thread_name + " ......")
|
||||
"""Start Compute Manager thread to run setup."""
|
||||
self.logger.info("ComputeManager: start " + self.thread_name +
|
||||
" ......")
|
||||
|
||||
if self.config.compute_trigger_freq > 0:
|
||||
period_end = time.time() + self.config.compute_trigger_freq
|
||||
@ -67,7 +77,10 @@ class ComputeManager(threading.Thread):
|
||||
time.sleep(60)
|
||||
|
||||
now = time.localtime()
|
||||
if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday:
|
||||
if now.tm_year > last_trigger_year or \
|
||||
now.tm_mon > last_trigger_mon or \
|
||||
now.tm_mday > last_trigger_mday:
|
||||
|
||||
timeout = False
|
||||
|
||||
if timeout is False and \
|
||||
@ -82,34 +95,39 @@ class ComputeManager(threading.Thread):
|
||||
self.logger.info("ComputeManager: exit " + self.thread_name)
|
||||
|
||||
def _run(self):
|
||||
self.logger.info("ComputeManager: --- start compute_nodes status update ---")
|
||||
self.logger.info("ComputeManager: --- start compute_nodes "
|
||||
"status update ---")
|
||||
|
||||
self.data_lock.acquire()
|
||||
try:
|
||||
triggered_host_updates = self.set_hosts()
|
||||
triggered_flavor_updates = self.set_flavors()
|
||||
|
||||
if triggered_host_updates is True and triggered_flavor_updates is True:
|
||||
if triggered_host_updates is True and \
|
||||
triggered_flavor_updates is True:
|
||||
if self.resource.update_topology() is False:
|
||||
# TODO: error in MUSIC. ignore?
|
||||
# TODO(UNKNOWN): error in MUSIC. ignore?
|
||||
pass
|
||||
else:
|
||||
# TODO: error handling, e.g., 3 times failure then stop Ostro?
|
||||
# TODO(UNKNOWN): error handling, e.g.,
|
||||
# 3 times failure then stop Ostro?
|
||||
pass
|
||||
finally:
|
||||
self.data_lock.release()
|
||||
|
||||
self.logger.info("ComputeManager: --- done compute_nodes status update ---")
|
||||
self.logger.info("ComputeManager: --- done compute_nodes "
|
||||
"status update ---")
|
||||
|
||||
return True
|
||||
|
||||
def set_hosts(self):
|
||||
"""Return True if hosts set, compute avail resources, checks update."""
|
||||
hosts = {}
|
||||
logical_groups = {}
|
||||
|
||||
compute = None
|
||||
if self.config.mode.startswith("sim") is True or \
|
||||
self.config.mode.startswith("test") is True:
|
||||
self.config.mode.startswith("test") is True:
|
||||
compute = SimCompute(self.config)
|
||||
else:
|
||||
compute = Compute(self.logger)
|
||||
@ -136,25 +154,30 @@ class ComputeManager(threading.Thread):
|
||||
self.resource.logical_groups[lk] = deepcopy(_logical_groups[lk])
|
||||
|
||||
self.resource.logical_groups[lk].last_update = time.time()
|
||||
self.logger.warn("ComputeManager: new logical group (" + lk + ") added")
|
||||
self.logger.warn("ComputeManager: new logical group (" +
|
||||
lk + ") added")
|
||||
|
||||
for rlk in self.resource.logical_groups.keys():
|
||||
rl = self.resource.logical_groups[rlk]
|
||||
if rl.group_type != "EX" and rl.group_type != "AFF" and rl.group_type != "DIV":
|
||||
if rl.group_type != "EX" and rl.group_type != "AFF" and \
|
||||
rl.group_type != "DIV":
|
||||
if rlk not in _logical_groups.keys():
|
||||
self.resource.logical_groups[rlk].status = "disabled"
|
||||
|
||||
self.resource.logical_groups[rlk].last_update = time.time()
|
||||
self.logger.warn("ComputeManager: logical group (" + rlk + ") removed")
|
||||
self.logger.warn("ComputeManager: logical group (" +
|
||||
rlk + ") removed")
|
||||
|
||||
for lk in _logical_groups.keys():
|
||||
lg = _logical_groups[lk]
|
||||
rlg = self.resource.logical_groups[lk]
|
||||
if lg.group_type != "EX" and lg.group_type != "AFF" and lg.group_type != "DIV":
|
||||
if lg.group_type != "EX" and lg.group_type != "AFF" and \
|
||||
lg.group_type != "DIV":
|
||||
if self._check_logical_group_metadata_update(lg, rlg) is True:
|
||||
|
||||
rlg.last_update = time.time()
|
||||
self.logger.warn("ComputeManager: logical group (" + lk + ") updated")
|
||||
self.logger.warn("ComputeManager: logical group (" +
|
||||
lk + ") updated")
|
||||
|
||||
def _check_logical_group_metadata_update(self, _lg, _rlg):
|
||||
if _lg.status != _rlg.status:
|
||||
@ -183,7 +206,8 @@ class ComputeManager(threading.Thread):
|
||||
self.resource.hosts[new_host.name] = new_host
|
||||
|
||||
new_host.last_update = time.time()
|
||||
self.logger.warn("ComputeManager: new host (" + new_host.name + ") added")
|
||||
self.logger.warn("ComputeManager: new host (" +
|
||||
new_host.name + ") added")
|
||||
|
||||
for rhk, rhost in self.resource.hosts.iteritems():
|
||||
if rhk not in _hosts.keys():
|
||||
@ -191,7 +215,8 @@ class ComputeManager(threading.Thread):
|
||||
rhost.tag.remove("nova")
|
||||
|
||||
rhost.last_update = time.time()
|
||||
self.logger.warn("ComputeManager: host (" + rhost.name + ") disabled")
|
||||
self.logger.warn("ComputeManager: host (" +
|
||||
rhost.name + ") disabled")
|
||||
|
||||
for hk in _hosts.keys():
|
||||
host = _hosts[hk]
|
||||
@ -202,7 +227,8 @@ class ComputeManager(threading.Thread):
|
||||
for hk, h in self.resource.hosts.iteritems():
|
||||
if h.clean_memberships() is True:
|
||||
h.last_update = time.time()
|
||||
self.logger.warn("ComputeManager: host (" + h.name + ") updated (delete EX/AFF/DIV membership)")
|
||||
self.logger.warn("ComputeManager: host (" + h.name +
|
||||
") updated (delete EX/AFF/DIV membership)")
|
||||
|
||||
for hk, host in self.resource.hosts.iteritems():
|
||||
if host.last_update > self.resource.current_timestamp:
|
||||
@ -224,17 +250,20 @@ class ComputeManager(threading.Thread):
|
||||
if "nova" not in _rhost.tag:
|
||||
_rhost.tag.append("nova")
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (tag added)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (tag added)")
|
||||
|
||||
if _host.status != _rhost.status:
|
||||
_rhost.status = _host.status
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (status changed)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (status changed)")
|
||||
|
||||
if _host.state != _rhost.state:
|
||||
_rhost.state = _host.state
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (state changed)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (state changed)")
|
||||
|
||||
return topology_updated
|
||||
|
||||
@ -248,7 +277,8 @@ class ComputeManager(threading.Thread):
|
||||
_rhost.original_vCPUs = _host.original_vCPUs
|
||||
_rhost.avail_vCPUs = _host.avail_vCPUs
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (CPU updated)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (CPU updated)")
|
||||
|
||||
if _host.mem_cap != _rhost.mem_cap or \
|
||||
_host.original_mem_cap != _rhost.original_mem_cap or \
|
||||
@ -257,7 +287,8 @@ class ComputeManager(threading.Thread):
|
||||
_rhost.original_mem_cap = _host.original_mem_cap
|
||||
_rhost.avail_mem_cap = _host.avail_mem_cap
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (mem updated)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (mem updated)")
|
||||
|
||||
if _host.local_disk_cap != _rhost.local_disk_cap or \
|
||||
_host.original_local_disk_cap != _rhost.original_local_disk_cap or \
|
||||
@ -266,7 +297,8 @@ class ComputeManager(threading.Thread):
|
||||
_rhost.original_local_disk_cap = _host.original_local_disk_cap
|
||||
_rhost.avail_local_disk_cap = _host.avail_local_disk_cap
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (local disk space updated)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (local disk space updated)")
|
||||
|
||||
if _host.vCPUs_used != _rhost.vCPUs_used or \
|
||||
_host.free_mem_mb != _rhost.free_mem_mb or \
|
||||
@ -277,7 +309,8 @@ class ComputeManager(threading.Thread):
|
||||
_rhost.free_disk_gb = _host.free_disk_gb
|
||||
_rhost.disk_available_least = _host.disk_available_least
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (other resource numbers)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (other resource numbers)")
|
||||
|
||||
return topology_updated
|
||||
|
||||
@ -288,15 +321,18 @@ class ComputeManager(threading.Thread):
|
||||
if mk not in _rhost.memberships.keys():
|
||||
_rhost.memberships[mk] = self.resource.logical_groups[mk]
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new membership)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (new membership)")
|
||||
|
||||
for mk in _rhost.memberships.keys():
|
||||
m = _rhost.memberships[mk]
|
||||
if m.group_type != "EX" and m.group_type != "AFF" and m.group_type != "DIV":
|
||||
if m.group_type != "EX" and m.group_type != "AFF" and \
|
||||
m.group_type != "DIV":
|
||||
if mk not in _host.memberships.keys():
|
||||
del _rhost.memberships[mk]
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (delete membership)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (delete membership)")
|
||||
|
||||
return topology_updated
|
||||
|
||||
@ -309,7 +345,8 @@ class ComputeManager(threading.Thread):
|
||||
_rhost.vm_list.remove(rvm_id)
|
||||
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (none vm removed)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (none vm removed)")
|
||||
|
||||
self.resource.clean_none_vms_from_logical_groups(_rhost)
|
||||
|
||||
@ -318,25 +355,29 @@ class ComputeManager(threading.Thread):
|
||||
_rhost.vm_list.append(vm_id)
|
||||
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (new vm placed)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (new vm placed)")
|
||||
|
||||
for rvm_id in _rhost.vm_list:
|
||||
if _host.exist_vm_by_uuid(rvm_id[2]) is False:
|
||||
_rhost.vm_list.remove(rvm_id)
|
||||
|
||||
self.resource.remove_vm_by_uuid_from_logical_groups(_rhost, rvm_id[2])
|
||||
self.resource.remove_vm_by_uuid_from_logical_groups(_rhost,
|
||||
rvm_id[2])
|
||||
|
||||
topology_updated = True
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (vm removed)")
|
||||
self.logger.warn("ComputeManager: host (" + _rhost.name +
|
||||
") updated (vm removed)")
|
||||
|
||||
return topology_updated
|
||||
|
||||
def set_flavors(self):
|
||||
"""Return True if compute set flavors returns success."""
|
||||
flavors = {}
|
||||
|
||||
compute = None
|
||||
if self.config.mode.startswith("sim") is True or \
|
||||
self.config.mode.startswith("test") is True:
|
||||
self.config.mode.startswith("test") is True:
|
||||
compute = SimCompute(self.config)
|
||||
else:
|
||||
compute = Compute(self.logger)
|
||||
@ -356,14 +397,16 @@ class ComputeManager(threading.Thread):
|
||||
self.resource.flavors[fk] = deepcopy(_flavors[fk])
|
||||
|
||||
self.resource.flavors[fk].last_update = time.time()
|
||||
self.logger.warn("ComputeManager: new flavor (" + fk + ") added")
|
||||
self.logger.warn("ComputeManager: new flavor (" +
|
||||
fk + ") added")
|
||||
|
||||
for rfk in self.resource.flavors.keys():
|
||||
if rfk not in _flavors.keys():
|
||||
self.resource.flavors[rfk].status = "disabled"
|
||||
|
||||
self.resource.flavors[rfk].last_update = time.time()
|
||||
self.logger.warn("ComputeManager: flavor (" + rfk + ") removed")
|
||||
self.logger.warn("ComputeManager: flavor (" +
|
||||
rfk + ") removed")
|
||||
|
||||
for fk in _flavors.keys():
|
||||
f = _flavors[fk]
|
||||
@ -371,7 +414,8 @@ class ComputeManager(threading.Thread):
|
||||
|
||||
if self._check_flavor_spec_update(f, rf) is True:
|
||||
rf.last_update = time.time()
|
||||
self.logger.warn("ComputeManager: flavor (" + fk + ") spec updated")
|
||||
self.logger.warn("ComputeManager: flavor (" +
|
||||
fk + ") spec updated")
|
||||
|
||||
def _check_flavor_spec_update(self, _f, _rf):
|
||||
spec_updated = False
|
||||
@ -380,7 +424,8 @@ class ComputeManager(threading.Thread):
|
||||
_rf.status = _f.status
|
||||
spec_updated = True
|
||||
|
||||
if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or _f.disk_cap != _rf.disk_cap:
|
||||
if _f.vCPUs != _rf.vCPUs or _f.mem_cap != _rf.mem_cap or \
|
||||
_f.disk_cap != _rf.disk_cap:
|
||||
_rf.vCPUs = _f.vCPUs
|
||||
_rf.mem_cap = _f.mem_cap
|
||||
_rf.disk_cap = _f.disk_cap
|
||||
|
@ -1,28 +1,37 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor
|
||||
"""Compute Simulator."""
|
||||
|
||||
from valet.engine.resource_manager.resource_base \
|
||||
import Host, LogicalGroup, Flavor
|
||||
|
||||
|
||||
class SimCompute(object):
|
||||
"""Sim Compute Class.
|
||||
|
||||
This class simulates a compute datacenter using classes from resource_base.
|
||||
"""
|
||||
|
||||
def __init__(self, _config):
|
||||
"""Init Sim Compute class (object)."""
|
||||
self.config = _config
|
||||
self.datacenter_name = "sim"
|
||||
|
||||
def set_hosts(self, _hosts, _logical_groups):
|
||||
"""Set hosts and logical groups using resource_base, return success."""
|
||||
self._set_availability_zones(_hosts, _logical_groups)
|
||||
|
||||
self._set_aggregates(_hosts, _logical_groups)
|
||||
@ -40,7 +49,8 @@ class SimCompute(object):
|
||||
|
||||
for r_num in range(0, self.config.num_of_racks):
|
||||
for h_num in range(0, self.config.num_of_hosts_per_rack):
|
||||
host = Host(self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num))
|
||||
host = Host(self.datacenter_name + "0r" + str(r_num) +
|
||||
"c" + str(h_num))
|
||||
host.tag.append("nova")
|
||||
host.memberships["nova"] = logical_group
|
||||
|
||||
@ -63,9 +73,11 @@ class SimCompute(object):
|
||||
aggregate = _logical_groups["aggregate" + str(a_num)]
|
||||
for r_num in range(0, self.config.num_of_racks):
|
||||
for h_num in range(0, self.config.num_of_hosts_per_rack):
|
||||
host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num)
|
||||
host_name = self.datacenter_name + "0r" + str(r_num) +\
|
||||
"c" + str(h_num)
|
||||
if host_name in _hosts.keys():
|
||||
if (h_num % (self.config.aggregated_ratio + a_num)) == 0:
|
||||
if (h_num %
|
||||
(self.config.aggregated_ratio + a_num)) == 0:
|
||||
host = _hosts[host_name]
|
||||
host.memberships[aggregate.name] = aggregate
|
||||
|
||||
@ -77,23 +89,28 @@ class SimCompute(object):
|
||||
def _set_resources(self, _hosts):
|
||||
for r_num in range(0, self.config.num_of_racks):
|
||||
for h_num in range(0, self.config.num_of_hosts_per_rack):
|
||||
host_name = self.datacenter_name + "0r" + str(r_num) + "c" + str(h_num)
|
||||
host_name = self.datacenter_name + "0r" + str(r_num) +\
|
||||
"c" + str(h_num)
|
||||
if host_name in _hosts.keys():
|
||||
host = _hosts[host_name]
|
||||
host.original_vCPUs = float(self.config.cpus_per_host)
|
||||
host.vCPUs_used = 0.0
|
||||
host.original_mem_cap = float(self.config.mem_per_host)
|
||||
host.free_mem_mb = host.original_mem_cap
|
||||
host.original_local_disk_cap = float(self.config.disk_per_host)
|
||||
host.original_local_disk_cap = \
|
||||
float(self.config.disk_per_host)
|
||||
host.free_disk_gb = host.original_local_disk_cap
|
||||
host.disk_available_least = host.original_local_disk_cap
|
||||
|
||||
def set_flavors(self, _flavors):
|
||||
"""Set flavors in compute sim, return success."""
|
||||
for f_num in range(0, self.config.num_of_basic_flavors):
|
||||
flavor = Flavor("bflavor" + str(f_num))
|
||||
flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1))
|
||||
flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1))
|
||||
flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0
|
||||
flavor.disk_cap = \
|
||||
float(self.config.base_flavor_disk * (f_num + 1)) + \
|
||||
10.0 + 20.0 / 1024.0
|
||||
|
||||
_flavors[flavor.name] = flavor
|
||||
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Resource - Handles data, metadata, status of resources."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
@ -20,33 +22,42 @@ import traceback
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
|
||||
from valet.engine.optimizer.util import util as util
|
||||
from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, LogicalGroup
|
||||
from valet.engine.resource_manager.resource_base \
|
||||
import Datacenter, HostGroup, Host, LogicalGroup
|
||||
from valet.engine.resource_manager.resource_base import Flavor, Switch, Link
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""Resource Class.
|
||||
|
||||
This class bootsraps the resources from the database and initializes
|
||||
them using base resources (datacenter, host group, host, logical group).
|
||||
Also manages aggregate status of resources and metadata and handles
|
||||
updates to base resource types.
|
||||
"""
|
||||
|
||||
def __init__(self, _db, _config, _logger):
|
||||
"""Init Resource Class."""
|
||||
self.db = _db
|
||||
|
||||
self.config = _config
|
||||
self.logger = _logger
|
||||
|
||||
''' resource data '''
|
||||
""" resource data """
|
||||
self.datacenter = Datacenter(self.config.datacenter_name)
|
||||
self.host_groups = {}
|
||||
self.hosts = {}
|
||||
self.switches = {}
|
||||
self.storage_hosts = {}
|
||||
|
||||
''' metadata '''
|
||||
""" metadata """
|
||||
self.logical_groups = {}
|
||||
self.flavors = {}
|
||||
|
||||
self.current_timestamp = 0
|
||||
self.last_log_index = 0
|
||||
|
||||
''' resource status aggregation '''
|
||||
""" resource status aggregation """
|
||||
self.CPU_avail = 0
|
||||
self.mem_avail = 0
|
||||
self.local_disk_avail = 0
|
||||
@ -54,6 +65,7 @@ class Resource(object):
|
||||
self.nw_bandwidth_avail = 0
|
||||
|
||||
def bootstrap_from_db(self, _resource_status):
|
||||
"""Return True if bootsrap resource from database successful."""
|
||||
try:
|
||||
logical_groups = _resource_status.get("logical_groups")
|
||||
if logical_groups:
|
||||
@ -69,9 +81,11 @@ class Resource(object):
|
||||
self.logical_groups[lgk] = logical_group
|
||||
|
||||
if len(self.logical_groups) > 0:
|
||||
self.logger.debug("Resource.bootstrap_from_db: logical_groups loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: logical_groups "
|
||||
"loaded")
|
||||
else:
|
||||
self.logger.warn("Resource.bootstrap_from_db: no logical_groups")
|
||||
self.logger.warn("Resource.bootstrap_from_db: no "
|
||||
"logical_groups")
|
||||
|
||||
flavors = _resource_status.get("flavors")
|
||||
if flavors:
|
||||
@ -89,7 +103,8 @@ class Resource(object):
|
||||
if len(self.flavors) > 0:
|
||||
self.logger.debug("Resource.bootstrap_from_db: flavors loaded")
|
||||
else:
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading flavors")
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading "
|
||||
"flavors")
|
||||
|
||||
switches = _resource_status.get("switches")
|
||||
if switches:
|
||||
@ -129,9 +144,11 @@ class Resource(object):
|
||||
|
||||
switch.peer_links = peer_links
|
||||
|
||||
self.logger.debug("Resource.bootstrap_from_db: switch links loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: switch links "
|
||||
"loaded")
|
||||
else:
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading switches")
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading "
|
||||
"switches")
|
||||
|
||||
# storage_hosts
|
||||
hosts = _resource_status.get("hosts")
|
||||
@ -168,9 +185,11 @@ class Resource(object):
|
||||
self.hosts[hk] = host
|
||||
|
||||
if len(self.hosts) > 0:
|
||||
self.logger.debug("Resource.bootstrap_from_db: hosts loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: hosts "
|
||||
"loaded")
|
||||
else:
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading hosts")
|
||||
self.logger.error("Resource.bootstrap_from_db: fail "
|
||||
"loading hosts")
|
||||
|
||||
host_groups = _resource_status.get("host_groups")
|
||||
if host_groups:
|
||||
@ -185,7 +204,8 @@ class Resource(object):
|
||||
host_group.original_mem_cap = hg.get("original_mem")
|
||||
host_group.avail_mem_cap = hg.get("avail_mem")
|
||||
host_group.local_disk_cap = hg.get("local_disk")
|
||||
host_group.original_local_disk_cap = hg.get("original_local_disk")
|
||||
host_group.original_local_disk_cap = \
|
||||
hg.get("original_local_disk")
|
||||
host_group.avail_local_disk_cap = hg.get("avail_local_disk")
|
||||
host_group.vm_list = hg.get("vm_list")
|
||||
host_group.volume_list = hg.get("volume_list", [])
|
||||
@ -201,9 +221,11 @@ class Resource(object):
|
||||
self.host_groups[hgk] = host_group
|
||||
|
||||
if len(self.host_groups) > 0:
|
||||
self.logger.debug("Resource.bootstrap_from_db: host_groups loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: host_groups "
|
||||
"loaded")
|
||||
else:
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading host_groups")
|
||||
self.logger.error("Resource.bootstrap_from_db: fail "
|
||||
"loading host_groups")
|
||||
|
||||
dc = _resource_status.get("datacenter")
|
||||
if dc:
|
||||
@ -217,7 +239,8 @@ class Resource(object):
|
||||
self.datacenter.original_mem_cap = dc.get("original_mem")
|
||||
self.datacenter.avail_mem_cap = dc.get("avail_mem")
|
||||
self.datacenter.local_disk_cap = dc.get("local_disk")
|
||||
self.datacenter.original_local_disk_cap = dc.get("original_local_disk")
|
||||
self.datacenter.original_local_disk_cap = \
|
||||
dc.get("original_local_disk")
|
||||
self.datacenter.avail_local_disk_cap = dc.get("avail_local_disk")
|
||||
self.datacenter.vm_list = dc.get("vm_list")
|
||||
self.datacenter.volume_list = dc.get("volume_list", [])
|
||||
@ -237,9 +260,11 @@ class Resource(object):
|
||||
self.datacenter.resources[ck] = self.hosts[ck]
|
||||
|
||||
if len(self.datacenter.resources) > 0:
|
||||
self.logger.debug("Resource.bootstrap_from_db: datacenter loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: datacenter "
|
||||
"loaded")
|
||||
else:
|
||||
self.logger.error("Resource.bootstrap_from_db: fail loading datacenter")
|
||||
self.logger.error("Resource.bootstrap_from_db: fail "
|
||||
"loading datacenter")
|
||||
|
||||
hgs = _resource_status.get("host_groups")
|
||||
if hgs:
|
||||
@ -258,7 +283,8 @@ class Resource(object):
|
||||
elif ck in self.host_groups.keys():
|
||||
host_group.child_resources[ck] = self.host_groups[ck]
|
||||
|
||||
self.logger.debug("Resource.bootstrap_from_db: host_groups'layout loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: "
|
||||
"host_groups'layout loaded")
|
||||
|
||||
hs = _resource_status.get("hosts")
|
||||
if hs:
|
||||
@ -271,20 +297,24 @@ class Resource(object):
|
||||
elif pk in self.host_groups.keys():
|
||||
host.host_group = self.host_groups[pk]
|
||||
|
||||
self.logger.debug("Resource.bootstrap_from_db: hosts'layout loaded")
|
||||
self.logger.debug("Resource.bootstrap_from_db: "
|
||||
"hosts'layout loaded")
|
||||
|
||||
self._update_compute_avail()
|
||||
self._update_storage_avail()
|
||||
self._update_nw_bandwidth_avail()
|
||||
|
||||
self.logger.debug("Resource.bootstrap_from_db: resource availability updated")
|
||||
self.logger.debug("Resource.bootstrap_from_db: "
|
||||
"resource availability updated")
|
||||
|
||||
except Exception:
|
||||
self.logger.error("Resource.bootstrap_from_db - FAILED:" + traceback.format_exc())
|
||||
self.logger.error("Resource.bootstrap_from_db - "
|
||||
"FAILED:" + traceback.format_exc())
|
||||
|
||||
return True
|
||||
|
||||
def update_topology(self, store=True):
|
||||
"""Update Topology and return True, if store True then store update."""
|
||||
self._update_topology()
|
||||
|
||||
self._update_compute_avail()
|
||||
@ -304,7 +334,8 @@ class Resource(object):
|
||||
def _update_topology(self):
|
||||
for level in LEVELS:
|
||||
for _, host_group in self.host_groups.iteritems():
|
||||
if host_group.host_type == level and host_group.check_availability() is True:
|
||||
if host_group.host_type == level and \
|
||||
host_group.check_availability() is True:
|
||||
if host_group.last_update > self.current_timestamp:
|
||||
self._update_host_group_topology(host_group)
|
||||
|
||||
@ -326,7 +357,8 @@ class Resource(object):
|
||||
_host_group.original_mem_cap += host.original_mem_cap
|
||||
_host_group.avail_mem_cap += host.avail_mem_cap
|
||||
_host_group.local_disk_cap += host.local_disk_cap
|
||||
_host_group.original_local_disk_cap += host.original_local_disk_cap
|
||||
_host_group.original_local_disk_cap += \
|
||||
host.original_local_disk_cap
|
||||
_host_group.avail_local_disk_cap += host.avail_local_disk_cap
|
||||
|
||||
for shk, storage_host in host.storages.iteritems():
|
||||
@ -362,8 +394,10 @@ class Resource(object):
|
||||
self.datacenter.original_mem_cap += resource.original_mem_cap
|
||||
self.datacenter.avail_mem_cap += resource.avail_mem_cap
|
||||
self.datacenter.local_disk_cap += resource.local_disk_cap
|
||||
self.datacenter.original_local_disk_cap += resource.original_local_disk_cap
|
||||
self.datacenter.avail_local_disk_cap += resource.avail_local_disk_cap
|
||||
self.datacenter.original_local_disk_cap += \
|
||||
resource.original_local_disk_cap
|
||||
self.datacenter.avail_local_disk_cap += \
|
||||
resource.avail_local_disk_cap
|
||||
|
||||
for shk, storage_host in resource.storages.iteritems():
|
||||
if storage_host.status == "enabled":
|
||||
@ -413,7 +447,8 @@ class Resource(object):
|
||||
for sk, s in h.switches.iteritems():
|
||||
if s.status == "enabled":
|
||||
for ulk, ul in s.up_links.iteritems():
|
||||
avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth)
|
||||
avail_nw_bandwidth_list.append(
|
||||
ul.avail_nw_bandwidth)
|
||||
self.nw_bandwidth_avail += min(avail_nw_bandwidth_list)
|
||||
elif level == "spine":
|
||||
for _, hg in self.host_groups.iteritems():
|
||||
@ -422,7 +457,8 @@ class Resource(object):
|
||||
for _, s in hg.switches.iteritems():
|
||||
if s.status == "enabled":
|
||||
for _, ul in s.up_links.iteritems():
|
||||
avail_nw_bandwidth_list.append(ul.avail_nw_bandwidth)
|
||||
avail_nw_bandwidth_list.append(
|
||||
ul.avail_nw_bandwidth)
|
||||
# NOTE: peer links?
|
||||
self.nw_bandwidth_avail += min(avail_nw_bandwidth_list)
|
||||
|
||||
@ -466,7 +502,8 @@ class Resource(object):
|
||||
last_update_time = s.last_update
|
||||
|
||||
for hk, host in self.hosts.iteritems():
|
||||
if host.last_update > self.current_timestamp or host.last_link_update > self.current_timestamp:
|
||||
if host.last_update > self.current_timestamp or \
|
||||
host.last_link_update > self.current_timestamp:
|
||||
host_updates[hk] = host.get_json_info()
|
||||
|
||||
if host.last_update > self.current_timestamp:
|
||||
@ -493,11 +530,10 @@ class Resource(object):
|
||||
if self.datacenter.last_link_update > self.current_timestamp:
|
||||
last_update_time = self.datacenter.last_link_update
|
||||
|
||||
(resource_logfile, last_index, mode) = util.get_last_logfile(self.config.resource_log_loc,
|
||||
self.config.max_log_size,
|
||||
self.config.max_num_of_logs,
|
||||
self.datacenter.name,
|
||||
self.last_log_index)
|
||||
(resource_logfile, last_index, mode) = util.get_last_logfile(
|
||||
self.config.resource_log_loc, self.config.max_log_size,
|
||||
self.config.max_num_of_logs, self.datacenter.name,
|
||||
self.last_log_index)
|
||||
self.last_log_index = last_index
|
||||
|
||||
logging = open(self.config.resource_log_loc + resource_logfile, mode)
|
||||
@ -527,17 +563,21 @@ class Resource(object):
|
||||
|
||||
logging.close()
|
||||
|
||||
self.logger.info("Resource._store_topology_updates: log resource status in " + resource_logfile)
|
||||
self.logger.info("Resource._store_topology_updates: log resource "
|
||||
"status in " + resource_logfile)
|
||||
|
||||
if self.db is not None:
|
||||
if self.db.update_resource_status(self.datacenter.name, json_logging) is False:
|
||||
if self.db.update_resource_status(self.datacenter.name,
|
||||
json_logging) is False:
|
||||
return None
|
||||
if self.db.update_resource_log_index(self.datacenter.name, self.last_log_index) is False:
|
||||
if self.db.update_resource_log_index(self.datacenter.name,
|
||||
self.last_log_index) is False:
|
||||
return None
|
||||
|
||||
return last_update_time
|
||||
|
||||
def update_rack_resource(self, _host):
|
||||
"""Update resources for rack (host), then update cluster."""
|
||||
rack = _host.host_group
|
||||
|
||||
if rack is not None:
|
||||
@ -547,6 +587,7 @@ class Resource(object):
|
||||
self.update_cluster_resource(rack)
|
||||
|
||||
def update_cluster_resource(self, _rack):
|
||||
"""Update cluster rack belonged to, then update datacenter."""
|
||||
cluster = _rack.parent_resource
|
||||
|
||||
if cluster is not None:
|
||||
@ -556,11 +597,13 @@ class Resource(object):
|
||||
self.datacenter.last_update = time.time()
|
||||
|
||||
def get_uuid(self, _h_uuid, _host_name):
|
||||
"""Return host uuid."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
return host.get_uuid(_h_uuid)
|
||||
|
||||
def add_vm_to_host(self, _host_name, _vm_id, _vcpus, _mem, _ldisk):
|
||||
"""Add vm to host and adjust compute resources for host."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
host.vm_list.append(_vm_id)
|
||||
@ -574,7 +617,9 @@ class Resource(object):
|
||||
host.free_disk_gb -= _ldisk
|
||||
host.disk_available_least -= _ldisk
|
||||
|
||||
def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem, _ldisk):
|
||||
def remove_vm_by_h_uuid_from_host(self, _host_name, _h_uuid, _vcpus, _mem,
|
||||
_ldisk):
|
||||
"""Remove vm from host by h_uuid, adjust compute resources for host."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
host.remove_vm_by_h_uuid(_h_uuid)
|
||||
@ -588,7 +633,9 @@ class Resource(object):
|
||||
host.free_disk_gb += _ldisk
|
||||
host.disk_available_least += _ldisk
|
||||
|
||||
def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem, _ldisk):
|
||||
def remove_vm_by_uuid_from_host(self, _host_name, _uuid, _vcpus, _mem,
|
||||
_ldisk):
|
||||
"""Remove vm from host by uuid, adjust compute resources for host."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
host.remove_vm_by_uuid(_uuid)
|
||||
@ -603,6 +650,7 @@ class Resource(object):
|
||||
host.disk_available_least += _ldisk
|
||||
|
||||
def add_vol_to_host(self, _host_name, _storage_name, _v_id, _disk):
|
||||
"""Add volume to host and adjust available disk on host."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
host.volume_list.append(_v_id)
|
||||
@ -612,9 +660,11 @@ class Resource(object):
|
||||
|
||||
storage_host.avail_disk_cap -= _disk
|
||||
|
||||
# NOTE: Assume the up-link of spine switch is not used except out-going from datacenter
|
||||
# NOTE: Assume the up-link of spine switch is not used except out-going
|
||||
# from datacenter
|
||||
# NOTE: What about peer-switches?
|
||||
def deduct_bandwidth(self, _host_name, _placement_level, _bandwidth):
|
||||
"""Deduct bandwidth at appropriate placement level."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
if _placement_level == "host":
|
||||
@ -648,26 +698,31 @@ class Resource(object):
|
||||
|
||||
hs.last_update = time.time()
|
||||
|
||||
def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem, _ldisk, _fldisk, _avail_least):
|
||||
def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem,
|
||||
_ldisk, _fldisk, _avail_least):
|
||||
"""Return True if status or compute resources avail on host changed."""
|
||||
updated = False
|
||||
|
||||
host = self.hosts[_hn]
|
||||
|
||||
if host.status != _st:
|
||||
host.status = _st
|
||||
self.logger.debug("Resource.update_host_resources: host status changed")
|
||||
self.logger.debug("Resource.update_host_resources: host status "
|
||||
"changed")
|
||||
updated = True
|
||||
|
||||
if host.original_vCPUs != _vcpus or \
|
||||
host.vCPUs_used != _vcpus_used:
|
||||
self.logger.debug("Resource.update_host_resources: host cpu changed")
|
||||
self.logger.debug("Resource.update_host_resources: host cpu "
|
||||
"changed")
|
||||
host.original_vCPUs = _vcpus
|
||||
host.vCPUs_used = _vcpus_used
|
||||
updated = True
|
||||
|
||||
if host.free_mem_mb != _fmem or \
|
||||
host.original_mem_cap != _mem:
|
||||
self.logger.debug("Resource.update_host_resources: host mem changed")
|
||||
self.logger.debug("Resource.update_host_resources: host mem "
|
||||
"changed")
|
||||
host.free_mem_mb = _fmem
|
||||
host.original_mem_cap = _mem
|
||||
updated = True
|
||||
@ -675,7 +730,8 @@ class Resource(object):
|
||||
if host.free_disk_gb != _fldisk or \
|
||||
host.original_local_disk_cap != _ldisk or \
|
||||
host.disk_available_least != _avail_least:
|
||||
self.logger.debug("Resource.update_host_resources: host disk changed")
|
||||
self.logger.debug("Resource.update_host_resources: host disk "
|
||||
"changed")
|
||||
host.free_disk_gb = _fldisk
|
||||
host.original_local_disk_cap = _ldisk
|
||||
host.disk_available_least = _avail_least
|
||||
@ -687,17 +743,20 @@ class Resource(object):
|
||||
return updated
|
||||
|
||||
def update_host_time(self, _host_name):
|
||||
"""Update last host update time."""
|
||||
host = self.hosts[_host_name]
|
||||
|
||||
host.last_update = time.time()
|
||||
self.update_rack_resource(host)
|
||||
|
||||
def update_storage_time(self, _storage_name):
|
||||
"""Update last storage update time."""
|
||||
storage_host = self.storage_hosts[_storage_name]
|
||||
|
||||
storage_host.last_cap_update = time.time()
|
||||
|
||||
def add_logical_group(self, _host_name, _lg_name, _lg_type):
|
||||
"""Add logical group to host memberships and update host resource."""
|
||||
host = None
|
||||
if _host_name in self.hosts.keys():
|
||||
host = self.hosts[_host_name]
|
||||
@ -720,6 +779,7 @@ class Resource(object):
|
||||
self.update_cluster_resource(host)
|
||||
|
||||
def add_vm_to_logical_groups(self, _host, _vm_id, _logical_groups_of_vm):
|
||||
"""Add vm to logical group and update corresponding lg."""
|
||||
for lgk in _host.memberships.keys():
|
||||
if lgk in _logical_groups_of_vm:
|
||||
lg = self.logical_groups[lgk]
|
||||
@ -728,17 +788,21 @@ class Resource(object):
|
||||
if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
elif isinstance(_host, HostGroup):
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if lgk.split(":")[0] == _host.host_type:
|
||||
if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
|
||||
if isinstance(_host, Host) and _host.host_group is not None:
|
||||
self.add_vm_to_logical_groups(_host.host_group, _vm_id, _logical_groups_of_vm)
|
||||
self.add_vm_to_logical_groups(_host.host_group, _vm_id,
|
||||
_logical_groups_of_vm)
|
||||
elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
|
||||
self.add_vm_to_logical_groups(_host.parent_resource, _vm_id, _logical_groups_of_vm)
|
||||
self.add_vm_to_logical_groups(_host.parent_resource, _vm_id,
|
||||
_logical_groups_of_vm)
|
||||
|
||||
def remove_vm_by_h_uuid_from_logical_groups(self, _host, _h_uuid):
|
||||
"""Remove vm by orchestration id from lgs. Update host and lgs."""
|
||||
for lgk in _host.memberships.keys():
|
||||
if lgk not in self.logical_groups.keys():
|
||||
continue
|
||||
@ -752,7 +816,8 @@ class Resource(object):
|
||||
_host.last_update = time.time()
|
||||
|
||||
elif isinstance(_host, HostGroup):
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if lgk.split(":")[0] == _host.host_type:
|
||||
if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
@ -760,16 +825,20 @@ class Resource(object):
|
||||
if _host.remove_membership(lg) is True:
|
||||
_host.last_update = time.time()
|
||||
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if len(lg.vm_list) == 0:
|
||||
del self.logical_groups[lgk]
|
||||
|
||||
if isinstance(_host, Host) and _host.host_group is not None:
|
||||
self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group, _h_uuid)
|
||||
self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group,
|
||||
_h_uuid)
|
||||
elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
|
||||
self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource, _h_uuid)
|
||||
self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource,
|
||||
_h_uuid)
|
||||
|
||||
def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid):
|
||||
"""Remove vm by uuid from lgs and update proper host and lgs."""
|
||||
for lgk in _host.memberships.keys():
|
||||
if lgk not in self.logical_groups.keys():
|
||||
continue
|
||||
@ -783,7 +852,8 @@ class Resource(object):
|
||||
_host.last_update = time.time()
|
||||
|
||||
elif isinstance(_host, HostGroup):
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if lgk.split(":")[0] == _host.host_type:
|
||||
if lg.remove_vm_by_uuid(_uuid, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
@ -791,16 +861,19 @@ class Resource(object):
|
||||
if _host.remove_membership(lg) is True:
|
||||
_host.last_update = time.time()
|
||||
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if len(lg.vm_list) == 0:
|
||||
del self.logical_groups[lgk]
|
||||
|
||||
if isinstance(_host, Host) and _host.host_group is not None:
|
||||
self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid)
|
||||
elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
|
||||
self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource, _uuid)
|
||||
self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource,
|
||||
_uuid)
|
||||
|
||||
def clean_none_vms_from_logical_groups(self, _host):
|
||||
"""Clean vms with status none from logical groups."""
|
||||
for lgk in _host.memberships.keys():
|
||||
if lgk not in self.logical_groups.keys():
|
||||
continue
|
||||
@ -814,7 +887,8 @@ class Resource(object):
|
||||
_host.last_update = time.time()
|
||||
|
||||
elif isinstance(_host, HostGroup):
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if lgk.split(":")[0] == _host.host_type:
|
||||
if lg.clean_none_vms(_host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
@ -822,7 +896,8 @@ class Resource(object):
|
||||
if _host.remove_membership(lg) is True:
|
||||
_host.last_update = time.time()
|
||||
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if len(lg.vm_list) == 0:
|
||||
del self.logical_groups[lgk]
|
||||
|
||||
@ -832,6 +907,7 @@ class Resource(object):
|
||||
self.clean_none_vms_from_logical_groups(_host.parent_resource)
|
||||
|
||||
def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host):
|
||||
"""Update uuid in lgs and update lg last update time."""
|
||||
for lgk in _host.memberships.keys():
|
||||
lg = self.logical_groups[lgk]
|
||||
|
||||
@ -839,7 +915,8 @@ class Resource(object):
|
||||
if lg.update_uuid(_h_uuid, _uuid, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
elif isinstance(_host, HostGroup):
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if lgk.split(":")[0] == _host.host_type:
|
||||
if lg.update_uuid(_h_uuid, _uuid, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
@ -847,9 +924,11 @@ class Resource(object):
|
||||
if isinstance(_host, Host) and _host.host_group is not None:
|
||||
self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group)
|
||||
elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
|
||||
self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource)
|
||||
self.update_uuid_in_logical_groups(_h_uuid, _uuid,
|
||||
_host.parent_resource)
|
||||
|
||||
def update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host):
|
||||
"""Update orchestration id in lgs and update lg last update time."""
|
||||
for lgk in _host.memberships.keys():
|
||||
lg = self.logical_groups[lgk]
|
||||
|
||||
@ -857,17 +936,26 @@ class Resource(object):
|
||||
if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
elif isinstance(_host, HostGroup):
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
if lgk.split(":")[0] == _host.host_type:
|
||||
if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True:
|
||||
lg.last_update = time.time()
|
||||
|
||||
if isinstance(_host, Host) and _host.host_group is not None:
|
||||
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group)
|
||||
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid,
|
||||
_host.host_group)
|
||||
elif isinstance(_host, HostGroup) and _host.parent_resource is not None:
|
||||
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource)
|
||||
self.update_h_uuid_in_logical_groups(_h_uuid, _uuid,
|
||||
_host.parent_resource)
|
||||
|
||||
def compute_avail_resources(self, hk, host):
|
||||
"""Compute avail resources for host.
|
||||
|
||||
This function computes ram, cpu and disk allocation ratios for
|
||||
the passed in host. Then uses data to compute avail memory, disk
|
||||
and vCPUs.
|
||||
"""
|
||||
ram_allocation_ratio_list = []
|
||||
cpu_allocation_ratio_list = []
|
||||
disk_allocation_ratio_list = []
|
||||
@ -875,11 +963,14 @@ class Resource(object):
|
||||
for _, lg in host.memberships.iteritems():
|
||||
if lg.group_type == "AGGR":
|
||||
if "ram_allocation_ratio" in lg.metadata.keys():
|
||||
ram_allocation_ratio_list.append(float(lg.metadata["ram_allocation_ratio"]))
|
||||
ram_allocation_ratio_list.append(
|
||||
float(lg.metadata["ram_allocation_ratio"]))
|
||||
if "cpu_allocation_ratio" in lg.metadata.keys():
|
||||
cpu_allocation_ratio_list.append(float(lg.metadata["cpu_allocation_ratio"]))
|
||||
cpu_allocation_ratio_list.append(
|
||||
float(lg.metadata["cpu_allocation_ratio"]))
|
||||
if "disk_allocation_ratio" in lg.metadata.keys():
|
||||
disk_allocation_ratio_list.append(float(lg.metadata["disk_allocation_ratio"]))
|
||||
disk_allocation_ratio_list.append(
|
||||
float(lg.metadata["disk_allocation_ratio"]))
|
||||
|
||||
ram_allocation_ratio = 1.0
|
||||
if len(ram_allocation_ratio_list) > 0:
|
||||
@ -890,12 +981,15 @@ class Resource(object):
|
||||
|
||||
static_ram_standby_ratio = 0
|
||||
if self.config.static_mem_standby_ratio > 0:
|
||||
static_ram_standby_ratio = float(self.config.static_mem_standby_ratio) / float(100)
|
||||
static_ram_standby_ratio = \
|
||||
float(self.config.static_mem_standby_ratio) / float(100)
|
||||
|
||||
host.compute_avail_mem(ram_allocation_ratio, static_ram_standby_ratio)
|
||||
|
||||
self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_mem = " +
|
||||
str(host.mem_cap) + ", avail_mem = " + str(host.avail_mem_cap))
|
||||
self.logger.debug("Resource.compute_avail_resources: host (" +
|
||||
hk + ")'s total_mem = " +
|
||||
str(host.mem_cap) + ", avail_mem = " +
|
||||
str(host.avail_mem_cap))
|
||||
|
||||
cpu_allocation_ratio = 1.0
|
||||
if len(cpu_allocation_ratio_list) > 0:
|
||||
@ -906,30 +1000,39 @@ class Resource(object):
|
||||
|
||||
static_cpu_standby_ratio = 0
|
||||
if self.config.static_cpu_standby_ratio > 0:
|
||||
static_cpu_standby_ratio = float(self.config.static_cpu_standby_ratio) / float(100)
|
||||
static_cpu_standby_ratio = \
|
||||
float(self.config.static_cpu_standby_ratio) / float(100)
|
||||
|
||||
host.compute_avail_vCPUs(cpu_allocation_ratio, static_cpu_standby_ratio)
|
||||
|
||||
self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_vCPUs = " +
|
||||
str(host.vCPUs) + ", avail_vCPUs = " + str(host.avail_vCPUs))
|
||||
self.logger.debug("Resource.compute_avail_resources: host (" +
|
||||
hk + ")'s total_vCPUs = " +
|
||||
str(host.vCPUs) + ", avail_vCPUs = " +
|
||||
str(host.avail_vCPUs))
|
||||
|
||||
disk_allocation_ratio = 1.0
|
||||
if len(disk_allocation_ratio_list) > 0:
|
||||
disk_allocation_ratio = min(disk_allocation_ratio_list)
|
||||
else:
|
||||
if self.config.default_disk_allocation_ratio > 0:
|
||||
disk_allocation_ratio = self.config.default_disk_allocation_ratio
|
||||
disk_allocation_ratio = \
|
||||
self.config.default_disk_allocation_ratio
|
||||
|
||||
static_disk_standby_ratio = 0
|
||||
if self.config.static_local_disk_standby_ratio > 0:
|
||||
static_disk_standby_ratio = float(self.config.static_local_disk_standby_ratio) / float(100)
|
||||
static_disk_standby_ratio = \
|
||||
float(self.config.static_local_disk_standby_ratio) / float(100)
|
||||
|
||||
host.compute_avail_disk(disk_allocation_ratio, static_disk_standby_ratio)
|
||||
host.compute_avail_disk(disk_allocation_ratio,
|
||||
static_disk_standby_ratio)
|
||||
|
||||
self.logger.debug("Resource.compute_avail_resources: host (" + hk + ")'s total_local_disk = " +
|
||||
str(host.local_disk_cap) + ", avail_local_disk = " + str(host.avail_local_disk_cap))
|
||||
self.logger.debug("Resource.compute_avail_resources: host (" +
|
||||
hk + ")'s total_local_disk = " +
|
||||
str(host.local_disk_cap) + ", avail_local_disk = " +
|
||||
str(host.avail_local_disk_cap))
|
||||
|
||||
def get_flavor(self, _name):
|
||||
"""Return flavor according to name passed in."""
|
||||
flavor = None
|
||||
|
||||
if _name in self.flavors.keys():
|
||||
|
@ -1,31 +1,45 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Resource Base.
|
||||
|
||||
File contains resource datatype objects from base type of a flavor and
|
||||
builds all the way up to a datacenter object.
|
||||
"""
|
||||
|
||||
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
|
||||
|
||||
|
||||
class Datacenter(object):
|
||||
"""Datacenter Class.
|
||||
|
||||
This object represents a datacenter. It contains all memberships or
|
||||
logical groups in the datacenter, all resources available, placed vms,
|
||||
and more throughout the datacenter.
|
||||
"""
|
||||
|
||||
def __init__(self, _name):
|
||||
"""Init Datacenter object."""
|
||||
self.name = _name
|
||||
|
||||
self.region_code_list = []
|
||||
|
||||
self.status = "enabled"
|
||||
|
||||
self.memberships = {} # all available logical groups (e.g., aggregate) in the datacenter
|
||||
# all available logical groups (e.g., aggregate) in the datacenter
|
||||
self.memberships = {}
|
||||
|
||||
self.vCPUs = 0
|
||||
self.original_vCPUs = 0
|
||||
@ -42,13 +56,17 @@ class Datacenter(object):
|
||||
|
||||
self.resources = {}
|
||||
|
||||
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.volume_list = [] # a list of placed volumes
|
||||
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.vm_list = []
|
||||
|
||||
# a list of placed volumes
|
||||
self.volume_list = []
|
||||
|
||||
self.last_update = 0
|
||||
self.last_link_update = 0
|
||||
|
||||
def init_resources(self):
|
||||
"""Init datacenter resources to 0."""
|
||||
self.vCPUs = 0
|
||||
self.original_vCPUs = 0
|
||||
self.avail_vCPUs = 0
|
||||
@ -60,6 +78,7 @@ class Datacenter(object):
|
||||
self.avail_local_disk_cap = 0
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info for datacenter object."""
|
||||
membership_list = []
|
||||
for lgk in self.memberships.keys():
|
||||
membership_list.append(lgk)
|
||||
@ -100,14 +119,24 @@ class Datacenter(object):
|
||||
|
||||
# data container for rack or cluster
|
||||
class HostGroup(object):
|
||||
"""Class for Host Group Object.
|
||||
|
||||
This Class represents a group of hosts. If a single host is a single server
|
||||
then host group is a rack or cluster of servers. This class contains all
|
||||
memberships and resources for the group of hosts.
|
||||
"""
|
||||
|
||||
def __init__(self, _id):
|
||||
"""Init for Host Group Class."""
|
||||
self.name = _id
|
||||
self.host_type = "rack" # rack or cluster(e.g., power domain, zone)
|
||||
|
||||
# rack or cluster(e.g., power domain, zone)
|
||||
self.host_type = "rack"
|
||||
|
||||
self.status = "enabled"
|
||||
|
||||
self.memberships = {} # all available logical groups (e.g., aggregate) in this group
|
||||
# all available logical groups (e.g., aggregate) in this group
|
||||
self.memberships = {}
|
||||
|
||||
self.vCPUs = 0
|
||||
self.original_vCPUs = 0
|
||||
@ -125,13 +154,17 @@ class HostGroup(object):
|
||||
self.parent_resource = None # e.g., datacenter
|
||||
self.child_resources = {} # e.g., hosting servers
|
||||
|
||||
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.volume_list = [] # a list of placed volumes
|
||||
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.vm_list = []
|
||||
|
||||
# a list of placed volumes
|
||||
self.volume_list = []
|
||||
|
||||
self.last_update = 0
|
||||
self.last_link_update = 0
|
||||
|
||||
def init_resources(self):
|
||||
"""Init all host group resources to 0."""
|
||||
self.vCPUs = 0
|
||||
self.original_vCPUs = 0
|
||||
self.avail_vCPUs = 0
|
||||
@ -143,19 +176,24 @@ class HostGroup(object):
|
||||
self.avail_local_disk_cap = 0
|
||||
|
||||
def init_memberships(self):
|
||||
"""Init Host Group memberships."""
|
||||
for lgk in self.memberships.keys():
|
||||
lg = self.memberships[lgk]
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or lg.group_type == "DIV":
|
||||
if lg.group_type == "EX" or lg.group_type == "AFF" or \
|
||||
lg.group_type == "DIV":
|
||||
level = lg.name.split(":")[0]
|
||||
if LEVELS.index(level) < LEVELS.index(self.host_type) or self.name not in lg.vms_per_host.keys():
|
||||
if LEVELS.index(level) < LEVELS.index(self.host_type) or \
|
||||
self.name not in lg.vms_per_host.keys():
|
||||
del self.memberships[lgk]
|
||||
else:
|
||||
del self.memberships[lgk]
|
||||
|
||||
def remove_membership(self, _lg):
|
||||
"""Return True if membership to group _lg removed."""
|
||||
cleaned = False
|
||||
|
||||
if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV":
|
||||
if _lg.group_type == "EX" or _lg.group_type == "AFF" or \
|
||||
_lg.group_type == "DIV":
|
||||
if self.name not in _lg.vms_per_host.keys():
|
||||
del self.memberships[_lg.name]
|
||||
cleaned = True
|
||||
@ -163,12 +201,14 @@ class HostGroup(object):
|
||||
return cleaned
|
||||
|
||||
def check_availability(self):
|
||||
"""Return True if Host Group status is 'enabled'."""
|
||||
if self.status == "enabled":
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info for Host Group object."""
|
||||
membership_list = []
|
||||
for lgk in self.memberships.keys():
|
||||
membership_list.append(lgk)
|
||||
@ -208,15 +248,25 @@ class HostGroup(object):
|
||||
|
||||
|
||||
class Host(object):
|
||||
"""Class for Host Object.
|
||||
|
||||
This class is for a Host Object, imagine a server. This means
|
||||
information about the groups the host is a part of, all the hardware
|
||||
parameters (vCPUs, local disk, memory) as well as the list of vms and
|
||||
volumes placed on the host.
|
||||
"""
|
||||
|
||||
def __init__(self, _name):
|
||||
"""Init for Host object."""
|
||||
self.name = _name
|
||||
|
||||
self.tag = [] # mark if this is synch'ed by multiple sources
|
||||
# mark if this is synch'ed by multiple sources
|
||||
self.tag = []
|
||||
self.status = "enabled"
|
||||
self.state = "up"
|
||||
|
||||
self.memberships = {} # logical group (e.g., aggregate) this hosting server is involved in
|
||||
# logical group (e.g., aggregate) this hosting server is involved in
|
||||
self.memberships = {}
|
||||
|
||||
self.vCPUs = 0
|
||||
self.original_vCPUs = 0
|
||||
@ -238,13 +288,17 @@ class Host(object):
|
||||
|
||||
self.host_group = None # e.g., rack
|
||||
|
||||
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.volume_list = [] # a list of placed volumes
|
||||
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.vm_list = []
|
||||
|
||||
# a list of placed volumes
|
||||
self.volume_list = []
|
||||
|
||||
self.last_update = 0
|
||||
self.last_link_update = 0
|
||||
|
||||
def clean_memberships(self):
|
||||
"""Return True if host cleaned from logical group membership."""
|
||||
cleaned = False
|
||||
|
||||
for lgk in self.memberships.keys():
|
||||
@ -256,9 +310,11 @@ class Host(object):
|
||||
return cleaned
|
||||
|
||||
def remove_membership(self, _lg):
|
||||
"""Return True if host removed from logical group _lg passed in."""
|
||||
cleaned = False
|
||||
|
||||
if _lg.group_type == "EX" or _lg.group_type == "AFF" or _lg.group_type == "DIV":
|
||||
if _lg.group_type == "EX" or _lg.group_type == "AFF" or \
|
||||
_lg.group_type == "DIV":
|
||||
if self.name not in _lg.vms_per_host.keys():
|
||||
del self.memberships[_lg.name]
|
||||
cleaned = True
|
||||
@ -266,12 +322,15 @@ class Host(object):
|
||||
return cleaned
|
||||
|
||||
def check_availability(self):
|
||||
if self.status == "enabled" and self.state == "up" and ("nova" in self.tag) and ("infra" in self.tag):
|
||||
"""Return True if host is up, enabled and tagged as nova infra."""
|
||||
if self.status == "enabled" and self.state == "up" and \
|
||||
("nova" in self.tag) and ("infra" in self.tag):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_uuid(self, _h_uuid):
|
||||
"""Return uuid of vm with matching orchestration id(_h_uuid)."""
|
||||
uuid = None
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -282,6 +341,7 @@ class Host(object):
|
||||
return uuid
|
||||
|
||||
def exist_vm_by_h_uuid(self, _h_uuid):
|
||||
"""Return True if vm with orchestration id(_h_uuid) exists on host."""
|
||||
exist = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -292,6 +352,7 @@ class Host(object):
|
||||
return exist
|
||||
|
||||
def exist_vm_by_uuid(self, _uuid):
|
||||
"""Return True if vm with physical id(_uuid) exists on host."""
|
||||
exist = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -302,6 +363,7 @@ class Host(object):
|
||||
return exist
|
||||
|
||||
def remove_vm_by_h_uuid(self, _h_uuid):
|
||||
"""Return True if vm removed with matching _h_uuid."""
|
||||
success = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -313,6 +375,7 @@ class Host(object):
|
||||
return success
|
||||
|
||||
def remove_vm_by_uuid(self, _uuid):
|
||||
"""Return True if vm removed with matching _uuid."""
|
||||
success = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -324,6 +387,7 @@ class Host(object):
|
||||
return success
|
||||
|
||||
def update_uuid(self, _h_uuid, _uuid):
|
||||
"""Return True if vm physical id updated."""
|
||||
success = False
|
||||
|
||||
vm_name = "none"
|
||||
@ -341,6 +405,7 @@ class Host(object):
|
||||
return success
|
||||
|
||||
def update_h_uuid(self, _h_uuid, _uuid):
|
||||
"""Return True if vm orchestration id (_h_uuid) updated."""
|
||||
success = False
|
||||
|
||||
vm_name = "none"
|
||||
@ -358,19 +423,27 @@ class Host(object):
|
||||
return success
|
||||
|
||||
def compute_avail_vCPUs(self, _overcommit_ratio, _standby_ratio):
|
||||
self.vCPUs = self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio)
|
||||
"""Calc avail_vCPUs by calculating vCPUs and subtracting in use."""
|
||||
self.vCPUs = \
|
||||
self.original_vCPUs * _overcommit_ratio * (1.0 - _standby_ratio)
|
||||
|
||||
self.avail_vCPUs = self.vCPUs - self.vCPUs_used
|
||||
|
||||
def compute_avail_mem(self, _overcommit_ratio, _standby_ratio):
|
||||
self.mem_cap = self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio)
|
||||
"""Calc avail_mem by calculating mem_cap and subtract used mem."""
|
||||
self.mem_cap = \
|
||||
self.original_mem_cap * _overcommit_ratio * (1.0 - _standby_ratio)
|
||||
|
||||
used_mem_mb = self.original_mem_cap - self.free_mem_mb
|
||||
|
||||
self.avail_mem_cap = self.mem_cap - used_mem_mb
|
||||
|
||||
def compute_avail_disk(self, _overcommit_ratio, _standby_ratio):
|
||||
self.local_disk_cap = self.original_local_disk_cap * _overcommit_ratio * (1.0 - _standby_ratio)
|
||||
"""Calc avail_disk by calc local_disk_cap and subtract used disk."""
|
||||
self.local_disk_cap = \
|
||||
self.original_local_disk_cap * \
|
||||
_overcommit_ratio * \
|
||||
(1.0 - _standby_ratio)
|
||||
|
||||
free_disk_cap = self.free_disk_gb
|
||||
if self.disk_available_least > 0:
|
||||
@ -381,6 +454,7 @@ class Host(object):
|
||||
self.avail_local_disk_cap = self.local_disk_cap - used_disk_cap
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info for Host object."""
|
||||
membership_list = []
|
||||
for lgk in self.memberships.keys():
|
||||
membership_list.append(lgk)
|
||||
@ -418,23 +492,37 @@ class Host(object):
|
||||
|
||||
|
||||
class LogicalGroup(object):
|
||||
"""Logical Group class.
|
||||
|
||||
This class contains info about grouped vms, such as metadata when placing
|
||||
nodes, list of placed vms, list of placed volumes and group type.
|
||||
"""
|
||||
|
||||
def __init__(self, _name):
|
||||
"""Init Logical Group object."""
|
||||
self.name = _name
|
||||
self.group_type = "AGGR" # AGGR, AZ, INTG, EX, DIV, or AFF
|
||||
|
||||
# AGGR, AZ, INTG, EX, DIV, or AFF
|
||||
self.group_type = "AGGR"
|
||||
|
||||
self.status = "enabled"
|
||||
|
||||
self.metadata = {} # any metadata to be matched when placing nodes
|
||||
# any metadata to be matched when placing nodes
|
||||
self.metadata = {}
|
||||
|
||||
self.vm_list = [] # a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.volume_list = [] # a list of placed volumes
|
||||
# a list of placed vms, (ochestration_uuid, vm_name, physical_uuid)
|
||||
self.vm_list = []
|
||||
|
||||
self.vms_per_host = {} # key = host_id, value = a list of placed vms
|
||||
# a list of placed volumes
|
||||
self.volume_list = []
|
||||
|
||||
# key = host_id, value = a list of placed vms
|
||||
self.vms_per_host = {}
|
||||
|
||||
self.last_update = 0
|
||||
|
||||
def exist_vm_by_h_uuid(self, _h_uuid):
|
||||
"""Return True if h_uuid exist in vm_list as an orchestration_uuid."""
|
||||
exist = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -445,6 +533,7 @@ class LogicalGroup(object):
|
||||
return exist
|
||||
|
||||
def exist_vm_by_uuid(self, _uuid):
|
||||
"""Return True if uuid exist in vm_list as physical_uuid."""
|
||||
exist = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -455,6 +544,7 @@ class LogicalGroup(object):
|
||||
return exist
|
||||
|
||||
def update_uuid(self, _h_uuid, _uuid, _host_id):
|
||||
"""Return True if _uuid and/or _host_id successfully updated."""
|
||||
success = False
|
||||
|
||||
vm_name = "none"
|
||||
@ -481,6 +571,7 @@ class LogicalGroup(object):
|
||||
return success
|
||||
|
||||
def update_h_uuid(self, _h_uuid, _uuid, _host_id):
|
||||
"""Return True physical_uuid and/or _host_id successfully updated."""
|
||||
success = False
|
||||
|
||||
vm_name = "none"
|
||||
@ -507,12 +598,14 @@ class LogicalGroup(object):
|
||||
return success
|
||||
|
||||
def add_vm_by_h_uuid(self, _vm_id, _host_id):
|
||||
"""Return True if vm added with id _vm_id(orchestration id)."""
|
||||
success = False
|
||||
|
||||
if self.exist_vm_by_h_uuid(_vm_id[0]) is False:
|
||||
self.vm_list.append(_vm_id)
|
||||
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV":
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or \
|
||||
self.group_type == "DIV":
|
||||
if _host_id not in self.vms_per_host.keys():
|
||||
self.vms_per_host[_host_id] = []
|
||||
self.vms_per_host[_host_id].append(_vm_id)
|
||||
@ -522,6 +615,7 @@ class LogicalGroup(object):
|
||||
return success
|
||||
|
||||
def remove_vm_by_h_uuid(self, _h_uuid, _host_id):
|
||||
"""Return True if vm removed with id _h_uuid(orchestration id)."""
|
||||
success = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -537,13 +631,16 @@ class LogicalGroup(object):
|
||||
success = True
|
||||
break
|
||||
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV":
|
||||
if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0:
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or \
|
||||
self.group_type == "DIV":
|
||||
if (_host_id in self.vms_per_host.keys()) and \
|
||||
len(self.vms_per_host[_host_id]) == 0:
|
||||
del self.vms_per_host[_host_id]
|
||||
|
||||
return success
|
||||
|
||||
def remove_vm_by_uuid(self, _uuid, _host_id):
|
||||
"""Return True if vm with matching uuid found and removed."""
|
||||
success = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -559,13 +656,16 @@ class LogicalGroup(object):
|
||||
success = True
|
||||
break
|
||||
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV":
|
||||
if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0:
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or \
|
||||
self.group_type == "DIV":
|
||||
if (_host_id in self.vms_per_host.keys()) and \
|
||||
len(self.vms_per_host[_host_id]) == 0:
|
||||
del self.vms_per_host[_host_id]
|
||||
|
||||
return success
|
||||
|
||||
def clean_none_vms(self, _host_id):
|
||||
"""Return True if vm's or host vm's removed with physical id none."""
|
||||
success = False
|
||||
|
||||
for vm_id in self.vm_list:
|
||||
@ -579,13 +679,16 @@ class LogicalGroup(object):
|
||||
self.vms_per_host[_host_id].remove(vm_id)
|
||||
success = True
|
||||
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or self.group_type == "DIV":
|
||||
if (_host_id in self.vms_per_host.keys()) and len(self.vms_per_host[_host_id]) == 0:
|
||||
if self.group_type == "EX" or self.group_type == "AFF" or \
|
||||
self.group_type == "DIV":
|
||||
if (_host_id in self.vms_per_host.keys()) and \
|
||||
len(self.vms_per_host[_host_id]) == 0:
|
||||
del self.vms_per_host[_host_id]
|
||||
|
||||
return success
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info for Logical Group object."""
|
||||
return {'status': self.status,
|
||||
'group_type': self.group_type,
|
||||
'metadata': self.metadata,
|
||||
@ -596,8 +699,10 @@ class LogicalGroup(object):
|
||||
|
||||
|
||||
class Switch(object):
|
||||
"""Switch class."""
|
||||
|
||||
def __init__(self, _switch_id):
|
||||
"""Init Switch object."""
|
||||
self.name = _switch_id
|
||||
self.switch_type = "ToR" # root, spine, ToR, or leaf
|
||||
|
||||
@ -610,6 +715,7 @@ class Switch(object):
|
||||
self.last_update = 0
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info on Switch object."""
|
||||
ulinks = {}
|
||||
for ulk, ul in self.up_links.iteritems():
|
||||
ulinks[ulk] = ul.get_json_info()
|
||||
@ -626,8 +732,10 @@ class Switch(object):
|
||||
|
||||
|
||||
class Link(object):
|
||||
"""Link class."""
|
||||
|
||||
def __init__(self, _name):
|
||||
"""Init Link object."""
|
||||
self.name = _name # format: source + "-" + target
|
||||
self.resource = None # switch beging connected to
|
||||
|
||||
@ -635,29 +743,33 @@ class Link(object):
|
||||
self.avail_nw_bandwidth = 0
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info on Link object."""
|
||||
return {'resource': self.resource.name,
|
||||
'bandwidth': self.nw_bandwidth,
|
||||
'avail_bandwidth': self.avail_nw_bandwidth}
|
||||
|
||||
|
||||
class StorageHost(object):
|
||||
"""Storage Host class."""
|
||||
|
||||
def __init__(self, _name):
|
||||
"""Init Storage Host object."""
|
||||
self.name = _name
|
||||
self.storage_class = None # tiering, e.g., platinum, gold, silver
|
||||
self.storage_class = None # tiering, e.g., platinum, gold, silver
|
||||
|
||||
self.status = "enabled"
|
||||
self.host_list = []
|
||||
|
||||
self.disk_cap = 0 # GB
|
||||
self.disk_cap = 0 # GB
|
||||
self.avail_disk_cap = 0
|
||||
|
||||
self.volume_list = [] # list of volume names placed in this host
|
||||
self.volume_list = [] # list of volume names placed in this host
|
||||
|
||||
self.last_update = 0
|
||||
self.last_cap_update = 0
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info on Storage Host object."""
|
||||
return {'status': self.status,
|
||||
'class': self.storage_class,
|
||||
'host_list': self.host_list,
|
||||
@ -669,8 +781,10 @@ class StorageHost(object):
|
||||
|
||||
|
||||
class Flavor(object):
|
||||
"""Flavor class."""
|
||||
|
||||
def __init__(self, _name):
|
||||
"""Init flavor object."""
|
||||
self.name = _name
|
||||
self.flavor_id = None
|
||||
|
||||
@ -685,6 +799,7 @@ class Flavor(object):
|
||||
self.last_update = 0
|
||||
|
||||
def get_json_info(self):
|
||||
"""Return JSON info of Flavor Object."""
|
||||
return {'status': self.status,
|
||||
'flavor_id': self.flavor_id,
|
||||
'vCPUs': self.vCPUs,
|
||||
|
@ -1,36 +1,33 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Simulate hosts and flavors."""
|
||||
|
||||
#################################################################################################################
|
||||
# Author: Gueyoung Jung
|
||||
# Contact: gjung@research.att.com
|
||||
# Version 2.0.2: Feb. 9, 2016
|
||||
#
|
||||
# Functions
|
||||
# - Simulate hosts and flavors
|
||||
#
|
||||
#################################################################################################################
|
||||
|
||||
|
||||
from valet.engine.resource_manager.resource_base import Host, LogicalGroup, Flavor
|
||||
from valet.engine.resource_manager.resource_base \
|
||||
import Host, LogicalGroup, Flavor
|
||||
|
||||
|
||||
class SimCompute(object):
|
||||
"""Simulate Compute class."""
|
||||
|
||||
def __init__(self, _config):
|
||||
"""Init."""
|
||||
self.config = _config
|
||||
|
||||
def set_hosts(self, _hosts, _logical_groups):
|
||||
"""Return success after setting sim hosts and flavors."""
|
||||
self._set_availability_zones(_hosts, _logical_groups)
|
||||
|
||||
self._set_aggregates(_hosts, _logical_groups)
|
||||
@ -47,18 +44,9 @@ class SimCompute(object):
|
||||
_logical_groups[logical_group.name] = logical_group
|
||||
|
||||
for r_num in range(0, self.config.num_of_racks):
|
||||
|
||||
# for test
|
||||
'''
|
||||
num_of_hosts = 0
|
||||
if r_num == 1:
|
||||
num_of_hosts = 1
|
||||
else:
|
||||
num_of_hosts = 2
|
||||
for h_num in range(0, num_of_hosts):
|
||||
'''
|
||||
for h_num in range(0, self.config.num_of_hosts_per_rack):
|
||||
host = Host(self.config.mode + "0r" + str(r_num) + "c" + str(h_num))
|
||||
host = Host(self.config.mode + "0r" + str(r_num) + "c" +
|
||||
str(h_num))
|
||||
host.tag.append("nova")
|
||||
host.memberships["nova"] = logical_group
|
||||
|
||||
@ -81,9 +69,11 @@ class SimCompute(object):
|
||||
aggregate = _logical_groups["aggregate" + str(a_num)]
|
||||
for r_num in range(0, self.config.num_of_racks):
|
||||
for h_num in range(0, self.config.num_of_hosts_per_rack):
|
||||
host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num)
|
||||
host_name = self.config.mode + "0r" + str(r_num) + "c" + \
|
||||
str(h_num)
|
||||
if host_name in _hosts.keys():
|
||||
if (h_num % (self.config.aggregated_ratio + a_num)) == 0:
|
||||
if (h_num %
|
||||
(self.config.aggregated_ratio + a_num)) == 0:
|
||||
host = _hosts[host_name]
|
||||
host.memberships[aggregate.name] = aggregate
|
||||
|
||||
@ -94,40 +84,29 @@ class SimCompute(object):
|
||||
|
||||
def _set_resources(self, _hosts):
|
||||
for r_num in range(0, self.config.num_of_racks):
|
||||
|
||||
# for test
|
||||
'''
|
||||
num_of_hosts = 0
|
||||
if r_num == 1:
|
||||
num_of_hosts = 1
|
||||
else:
|
||||
num_of_hosts = 2
|
||||
for h_num in range(0, num_of_hosts):
|
||||
'''
|
||||
for h_num in range(0, self.config.num_of_hosts_per_rack):
|
||||
host_name = self.config.mode + "0r" + str(r_num) + "c" + str(h_num)
|
||||
host_name = self.config.mode + "0r" + str(r_num) + "c" + \
|
||||
str(h_num)
|
||||
if host_name in _hosts.keys():
|
||||
host = _hosts[host_name]
|
||||
# for test
|
||||
'''
|
||||
if r_num == 1:
|
||||
host.status = "disabled"
|
||||
host.state = "down"
|
||||
'''
|
||||
host.original_vCPUs = float(self.config.cpus_per_host)
|
||||
host.vCPUs_used = 0.0
|
||||
host.original_mem_cap = float(self.config.mem_per_host)
|
||||
host.free_mem_mb = host.original_mem_cap
|
||||
host.original_local_disk_cap = float(self.config.disk_per_host)
|
||||
host.original_local_disk_cap = \
|
||||
float(self.config.disk_per_host)
|
||||
host.free_disk_gb = host.original_local_disk_cap
|
||||
host.disk_available_least = host.original_local_disk_cap
|
||||
|
||||
def set_flavors(self, _flavors):
|
||||
"""Return success after setting passed in flavors."""
|
||||
for f_num in range(0, self.config.num_of_basic_flavors):
|
||||
flavor = Flavor("bflavor" + str(f_num))
|
||||
flavor.vCPUs = float(self.config.base_flavor_cpus * (f_num + 1))
|
||||
flavor.mem_cap = float(self.config.base_flavor_mem * (f_num + 1))
|
||||
flavor.disk_cap = float(self.config.base_flavor_disk * (f_num + 1)) + 10.0 + 20.0 / 1024.0
|
||||
flavor.disk_cap = \
|
||||
float(self.config.base_flavor_disk * (f_num + 1)) + \
|
||||
10.0 + 20.0 / 1024.0
|
||||
|
||||
_flavors[flavor.name] = flavor
|
||||
|
||||
@ -137,7 +116,6 @@ class SimCompute(object):
|
||||
flavor.mem_cap = self.config.base_flavor_mem * (a_num + 1)
|
||||
flavor.disk_cap = self.config.base_flavor_disk * (a_num + 1)
|
||||
|
||||
# flavor.extra_specs["availability_zone"] = "nova"
|
||||
flavor.extra_specs["cpu_allocation_ratio"] = "0.5"
|
||||
|
||||
_flavors[flavor.name] = flavor
|
||||
|
@ -1,36 +1,33 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Simulate datacenter configurations (i.e., layout, cabling)."""
|
||||
|
||||
#################################################################################################################
|
||||
# Author: Gueyoung Jung
|
||||
# Contact: gjung@research.att.com
|
||||
# Version 2.0.2: Feb. 9, 2016
|
||||
#
|
||||
# Functions
|
||||
# - Simulate datacenter configurations (i.e., layout, cabling)
|
||||
#
|
||||
#################################################################################################################
|
||||
|
||||
|
||||
from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link
|
||||
from valet.engine.resource_manager.resource_base \
|
||||
import HostGroup, Host, Switch, Link
|
||||
|
||||
|
||||
class SimTopology(object):
|
||||
"""Simulate Network and Host Topology class."""
|
||||
|
||||
def __init__(self, _config):
|
||||
"""Init."""
|
||||
self.config = _config
|
||||
|
||||
def set_topology(self, _datacenter, _host_groups, _hosts, _switches):
|
||||
"""Return success string after setting network and host topology."""
|
||||
self._set_network_topology(_switches)
|
||||
self._set_host_topology(_datacenter, _host_groups, _hosts, _switches)
|
||||
|
||||
@ -71,7 +68,8 @@ class SimTopology(object):
|
||||
ps = None
|
||||
if (s_num % 2) == 0:
|
||||
if (s_num + 1) < self.config.num_of_spine_switches:
|
||||
ps = _switches[root_switch.name + "s" + str(s_num + 1)]
|
||||
ps = _switches[root_switch.name + "s" +
|
||||
str(s_num + 1)]
|
||||
else:
|
||||
ps = _switches[root_switch.name + "s" + str(s_num - 1)]
|
||||
if ps is not None:
|
||||
@ -87,7 +85,8 @@ class SimTopology(object):
|
||||
parent_switch_list = []
|
||||
if self.config.num_of_spine_switches > 0:
|
||||
for s_num in range(0, self.config.num_of_spine_switches):
|
||||
parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)])
|
||||
parent_switch_list.append(_switches[root_switch.name +
|
||||
"s" + str(s_num)])
|
||||
else:
|
||||
parent_switch_list.append(_switches[root_switch.name])
|
||||
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Topology class - performs actual setting up of Topology object."""
|
||||
|
||||
import copy
|
||||
import sys
|
||||
|
||||
@ -21,18 +23,24 @@ from valet.engine.resource_manager.resource_base import HostGroup, Switch, Link
|
||||
|
||||
|
||||
class Topology(object):
|
||||
"""Topology class."""
|
||||
|
||||
def __init__(self, _config, _logger):
|
||||
"""Init config and logger."""
|
||||
self.config = _config
|
||||
self.logger = _logger
|
||||
|
||||
# Triggered by rhosts change
|
||||
def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts, _switches):
|
||||
result_status = self._set_host_topology(_datacenter, _host_groups, _hosts, _rhosts)
|
||||
def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts,
|
||||
_switches):
|
||||
"""Return result status if setting host or network topology fails."""
|
||||
result_status = self._set_host_topology(_datacenter, _host_groups,
|
||||
_hosts, _rhosts)
|
||||
if result_status != "success":
|
||||
return result_status
|
||||
|
||||
result_status = self._set_network_topology(_datacenter, _host_groups, _hosts, _switches)
|
||||
result_status = self._set_network_topology(_datacenter, _host_groups,
|
||||
_hosts, _switches)
|
||||
if result_status != "success":
|
||||
return result_status
|
||||
|
||||
@ -80,7 +88,8 @@ class Topology(object):
|
||||
return "success"
|
||||
|
||||
# NOTE: this is just muck-ups
|
||||
def _set_network_topology(self, _datacenter, _host_groups, _hosts, _switches):
|
||||
def _set_network_topology(self, _datacenter, _host_groups, _hosts,
|
||||
_switches):
|
||||
root_switch = Switch(_datacenter.name)
|
||||
root_switch.switch_type = "root"
|
||||
|
||||
@ -134,7 +143,8 @@ class Topology(object):
|
||||
if index >= self.config.num_of_region_chars:
|
||||
if not isdigit(c):
|
||||
if index == self.config.num_of_region_chars:
|
||||
status = "invalid region name = " + _host_name[:index] + c
|
||||
status = "invalid region name = " + \
|
||||
_host_name[:index] + c
|
||||
validated_name = False
|
||||
break
|
||||
|
||||
@ -152,7 +162,9 @@ class Topology(object):
|
||||
validated_name = False
|
||||
break
|
||||
|
||||
if end_of_rack_index == 0 and index > (end_of_region_index + 1):
|
||||
if end_of_rack_index == 0 and \
|
||||
index > (end_of_region_index + 1):
|
||||
|
||||
end_of_rack_index = index
|
||||
num_of_fields += 1
|
||||
|
||||
@ -179,7 +191,8 @@ class Topology(object):
|
||||
validated_name = False
|
||||
|
||||
if num_of_fields != 3:
|
||||
status = "invalid number of identification fields = " + str(num_of_fields)
|
||||
status = "invalid number of identification fields = " + \
|
||||
str(num_of_fields)
|
||||
validated_name = False
|
||||
|
||||
if validated_name is False:
|
||||
|
@ -1,28 +1,38 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Topology Manager.
|
||||
|
||||
Actions involved in setting up and managing topology. This includes setting
|
||||
topology, checking updates, creating new switches( also hosts and links), as
|
||||
well as updating them.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import time
|
||||
|
||||
from valet.engine.resource_manager.resource_base import Datacenter, HostGroup, Host, Switch, Link
|
||||
from valet.engine.resource_manager.resource_base \
|
||||
import Datacenter, HostGroup, Host, Switch, Link
|
||||
from valet.engine.resource_manager.topology import Topology
|
||||
|
||||
|
||||
class TopologyManager(threading.Thread):
|
||||
"""Topology Manager Class."""
|
||||
|
||||
def __init__(self, _t_id, _t_name, _resource, _data_lock, _config, _logger):
|
||||
"""Init Topology Manager."""
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
self.thread_id = _t_id
|
||||
@ -37,7 +47,9 @@ class TopologyManager(threading.Thread):
|
||||
self.logger = _logger
|
||||
|
||||
def run(self):
|
||||
self.logger.info("TopologyManager: start " + self.thread_name + " ......")
|
||||
"""Function starts and tracks Topology Manager Thread."""
|
||||
self.logger.info("TopologyManager: start " +
|
||||
self.thread_name + " ......")
|
||||
|
||||
if self.config.topology_trigger_freq > 0:
|
||||
period_end = time.time() + self.config.topology_trigger_freq
|
||||
@ -61,7 +73,10 @@ class TopologyManager(threading.Thread):
|
||||
time.sleep(70)
|
||||
|
||||
now = time.localtime()
|
||||
if now.tm_year > last_trigger_year or now.tm_mon > last_trigger_mon or now.tm_mday > last_trigger_mday:
|
||||
if now.tm_year > last_trigger_year or \
|
||||
now.tm_mon > last_trigger_mon or \
|
||||
now.tm_mday > last_trigger_mday:
|
||||
|
||||
timeout = False
|
||||
|
||||
if timeout is False and \
|
||||
@ -77,13 +92,14 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
def _run(self):
|
||||
|
||||
self.logger.info("TopologyManager: --- start topology status update ---")
|
||||
self.logger.info("TopologyManager: --- start topology "
|
||||
"status update ---")
|
||||
|
||||
self.data_lock.acquire()
|
||||
try:
|
||||
if self.set_topology() is True:
|
||||
if self.resource.update_topology() is False:
|
||||
# TODO: ignore?
|
||||
# TODO(UNKOWN): ignore?
|
||||
pass
|
||||
finally:
|
||||
self.data_lock.release()
|
||||
@ -91,6 +107,7 @@ class TopologyManager(threading.Thread):
|
||||
self.logger.info("TopologyManager: --- done topology status update ---")
|
||||
|
||||
def set_topology(self):
|
||||
"""Return True if datacenter topology successfully setup."""
|
||||
datacenter = None
|
||||
host_groups = {}
|
||||
hosts = {}
|
||||
@ -105,7 +122,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
topology = Topology(self.config, self.logger)
|
||||
|
||||
status = topology.set_topology(datacenter, host_groups, hosts, self.resource.hosts, switches)
|
||||
status = topology.set_topology(datacenter, host_groups, hosts,
|
||||
self.resource.hosts, switches)
|
||||
if status != "success":
|
||||
self.logger.error("TopologyManager: " + status)
|
||||
return False
|
||||
@ -122,7 +140,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
new_switch.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: new switch (" + new_switch.name + ") added")
|
||||
self.logger.warn("TopologyManager: new switch (" +
|
||||
new_switch.name + ") added")
|
||||
|
||||
for rsk in self.resource.switches.keys():
|
||||
if rsk not in _switches.keys():
|
||||
@ -131,7 +150,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
switch.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: switch (" + switch.name + ") disabled")
|
||||
self.logger.warn("TopologyManager: switch (" +
|
||||
switch.name + ") disabled")
|
||||
|
||||
for hk in _hosts.keys():
|
||||
if hk not in self.resource.hosts.keys():
|
||||
@ -140,7 +160,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
new_host.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: new host (" + new_host.name + ") added from configuration")
|
||||
self.logger.warn("TopologyManager: new host (" +
|
||||
new_host.name + ") added from configuration")
|
||||
|
||||
for rhk in self.resource.hosts.keys():
|
||||
if rhk not in _hosts.keys():
|
||||
@ -150,7 +171,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
host.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: host (" + host.name + ") removed from configuration")
|
||||
self.logger.warn("TopologyManager: host (" +
|
||||
host.name + ") removed from configuration")
|
||||
|
||||
for hgk in _host_groups.keys():
|
||||
if hgk not in self.resource.host_groups.keys():
|
||||
@ -159,7 +181,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
new_host_group.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: new host_group (" + new_host_group.name + ") added")
|
||||
self.logger.warn("TopologyManager: new host_group (" +
|
||||
new_host_group.name + ") added")
|
||||
|
||||
for rhgk in self.resource.host_groups.keys():
|
||||
if rhgk not in _host_groups.keys():
|
||||
@ -168,7 +191,8 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
host_group.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: host_group (" + host_group.name + ") disabled")
|
||||
self.logger.warn("TopologyManager: host_group (" +
|
||||
host_group.name + ") disabled")
|
||||
|
||||
for sk in _switches.keys():
|
||||
switch = _switches[sk]
|
||||
@ -180,7 +204,8 @@ class TopologyManager(threading.Thread):
|
||||
for hk in _hosts.keys():
|
||||
host = _hosts[hk]
|
||||
rhost = self.resource.hosts[hk]
|
||||
(topology_updated, link_updated) = self._check_host_update(host, rhost)
|
||||
(topology_updated, link_updated) = \
|
||||
self._check_host_update(host, rhost)
|
||||
if topology_updated is True:
|
||||
rhost.last_update = time.time()
|
||||
if link_updated is True:
|
||||
@ -189,13 +214,15 @@ class TopologyManager(threading.Thread):
|
||||
for hgk in _host_groups.keys():
|
||||
hg = _host_groups[hgk]
|
||||
rhg = self.resource.host_groups[hgk]
|
||||
(topology_updated, link_updated) = self._check_host_group_update(hg, rhg)
|
||||
(topology_updated, link_updated) = \
|
||||
self._check_host_group_update(hg, rhg)
|
||||
if topology_updated is True:
|
||||
rhg.last_update = time.time()
|
||||
if link_updated is True:
|
||||
rhg.last_link_update = time.time()
|
||||
|
||||
(topology_updated, link_updated) = self._check_datacenter_update(_datacenter)
|
||||
(topology_updated, link_updated) = \
|
||||
self._check_datacenter_update(_datacenter)
|
||||
if topology_updated is True:
|
||||
self.resource.datacenter.last_update = time.time()
|
||||
if link_updated is True:
|
||||
@ -242,12 +269,14 @@ class TopologyManager(threading.Thread):
|
||||
if _switch.switch_type != _rswitch.switch_type:
|
||||
_rswitch.switch_type = _switch.switch_type
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (switch type)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (switch type)")
|
||||
|
||||
if _rswitch.status == "disabled":
|
||||
_rswitch.status = "enabled"
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (enabled)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (enabled)")
|
||||
|
||||
for ulk in _switch.up_links.keys():
|
||||
exist = False
|
||||
@ -259,7 +288,8 @@ class TopologyManager(threading.Thread):
|
||||
new_link = self._create_new_link(_switch.up_links[ulk])
|
||||
_rswitch.up_links[new_link.name] = new_link
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (new link)")
|
||||
|
||||
for rulk in _rswitch.up_links.keys():
|
||||
exist = False
|
||||
@ -270,14 +300,16 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del _rswitch.up_links[rulk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (link removed)")
|
||||
|
||||
for ulk in _rswitch.up_links.keys():
|
||||
link = _switch.up_links[ulk]
|
||||
rlink = _rswitch.up_links[ulk]
|
||||
if self._check_link_update(link, rlink) is True:
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (bandwidth)")
|
||||
|
||||
for plk in _switch.peer_links.keys():
|
||||
exist = False
|
||||
@ -289,7 +321,8 @@ class TopologyManager(threading.Thread):
|
||||
new_link = self._create_new_link(_switch.peer_links[plk])
|
||||
_rswitch.peer_links[new_link.name] = new_link
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (new link)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (new link)")
|
||||
|
||||
for rplk in _rswitch.peer_links.keys():
|
||||
exist = False
|
||||
@ -300,14 +333,16 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del _rswitch.peer_links[rplk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (link removed)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (link removed)")
|
||||
|
||||
for plk in _rswitch.peer_links.keys():
|
||||
link = _switch.peer_links[plk]
|
||||
rlink = _rswitch.peer_links[plk]
|
||||
if self._check_link_update(link, rlink) is True:
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name + ") updated (bandwidth)")
|
||||
self.logger.warn("TopologyManager: switch (" + _rswitch.name +
|
||||
") updated (bandwidth)")
|
||||
|
||||
return updated
|
||||
|
||||
@ -327,15 +362,20 @@ class TopologyManager(threading.Thread):
|
||||
if "infra" not in _rhost.tag:
|
||||
_rhost.tag.append("infra")
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (tag)")
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
||||
") updated (tag)")
|
||||
|
||||
if _rhost.host_group is None or \
|
||||
_host.host_group.name != _rhost.host_group.name:
|
||||
|
||||
if _rhost.host_group is None or _host.host_group.name != _rhost.host_group.name:
|
||||
if _host.host_group.name in self.resource.host_groups.keys():
|
||||
_rhost.host_group = self.resource.host_groups[_host.host_group.name]
|
||||
_rhost.host_group = \
|
||||
self.resource.host_groups[_host.host_group.name]
|
||||
else:
|
||||
_rhost.host_group = self.resource.datacenter
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (host_group)")
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
||||
") updated (host_group)")
|
||||
|
||||
for sk in _host.switches.keys():
|
||||
exist = False
|
||||
@ -346,7 +386,8 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
_rhost.switches[sk] = self.resource.switches[sk]
|
||||
link_updated = True
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (new switch)")
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
||||
") updated (new switch)")
|
||||
|
||||
for rsk in _rhost.switches.keys():
|
||||
exist = False
|
||||
@ -357,7 +398,8 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del _rhost.switches[rsk]
|
||||
link_updated = True
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (switch removed)")
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
||||
") updated (switch removed)")
|
||||
|
||||
return (updated, link_updated)
|
||||
|
||||
@ -368,20 +410,26 @@ class TopologyManager(threading.Thread):
|
||||
if _hg.host_type != _rhg.host_type:
|
||||
_rhg.host_type = _hg.host_type
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (hosting type)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (hosting type)")
|
||||
|
||||
if _rhg.status == "disabled":
|
||||
_rhg.status = "enabled"
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (enabled)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (enabled)")
|
||||
|
||||
if _rhg.parent_resource is None or \
|
||||
_hg.parent_resource.name != _rhg.parent_resource.name:
|
||||
|
||||
if _rhg.parent_resource is None or _hg.parent_resource.name != _rhg.parent_resource.name:
|
||||
if _hg.parent_resource.name in self.resource.host_groups.keys():
|
||||
_rhg.parent_resource = self.resource.host_groups[_hg.parent_resource.name]
|
||||
_rhg.parent_resource = \
|
||||
self.resource.host_groups[_hg.parent_resource.name]
|
||||
else:
|
||||
_rhg.parent_resource = self.resource.datacenter
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (parent host_group)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (parent host_group)")
|
||||
|
||||
for rk in _hg.child_resources.keys():
|
||||
exist = False
|
||||
@ -395,7 +443,8 @@ class TopologyManager(threading.Thread):
|
||||
elif _rhg.host_type == "cluster":
|
||||
_rhg.child_resources[rk] = self.resource.host_groups[rk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new child host)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (new child host)")
|
||||
|
||||
for rrk in _rhg.child_resources.keys():
|
||||
exist = False
|
||||
@ -406,7 +455,8 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del _rhg.child_resources[rrk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (child host removed)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (child host removed)")
|
||||
|
||||
for sk in _hg.switches.keys():
|
||||
exist = False
|
||||
@ -417,7 +467,8 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
_rhg.switches[sk] = self.resource.switches[sk]
|
||||
link_updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (new switch)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (new switch)")
|
||||
|
||||
for rsk in _rhg.switches.keys():
|
||||
exist = False
|
||||
@ -428,7 +479,8 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del _rhg.switches[rsk]
|
||||
link_updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name + ") updated (switch removed)")
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (switch removed)")
|
||||
|
||||
return (updated, link_updated)
|
||||
|
||||
@ -440,13 +492,15 @@ class TopologyManager(threading.Thread):
|
||||
if rc not in self.resource.datacenter.region_code_list:
|
||||
self.resource.datacenter.region_code_list.append(rc)
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated (new region code, " + rc + ")")
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
"(new region code, " + rc + ")")
|
||||
|
||||
for rrc in self.resource.datacenter.region_code_list:
|
||||
if rrc not in _datacenter.region_code_list:
|
||||
self.resource.datacenter.region_code_list.remove(rrc)
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated (region code, " + rrc + ", removed)")
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
"(region code, " + rrc + ", removed)")
|
||||
|
||||
for rk in _datacenter.resources.keys():
|
||||
exist = False
|
||||
@ -457,11 +511,14 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
r = _datacenter.resources[rk]
|
||||
if isinstance(r, HostGroup):
|
||||
self.resource.datacenter.resources[rk] = self.resource.host_groups[rk]
|
||||
self.resource.datacenter.resources[rk] = \
|
||||
self.resource.host_groups[rk]
|
||||
elif isinstance(r, Host):
|
||||
self.resource.datacenter.resources[rk] = self.resource.hosts[rk]
|
||||
self.resource.datacenter.resources[rk] = \
|
||||
self.resource.hosts[rk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated (new resource)")
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
"(new resource)")
|
||||
|
||||
for rrk in self.resource.datacenter.resources.keys():
|
||||
exist = False
|
||||
@ -472,7 +529,8 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del self.resource.datacenter.resources[rrk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated (resource removed)")
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
"(resource removed)")
|
||||
|
||||
for sk in _datacenter.root_switches.keys():
|
||||
exist = False
|
||||
@ -481,9 +539,11 @@ class TopologyManager(threading.Thread):
|
||||
exist = True
|
||||
break
|
||||
if exist is False:
|
||||
self.resource.datacenter.root_switches[sk] = self.resource.switches[sk]
|
||||
self.resource.datacenter.root_switches[sk] = \
|
||||
self.resource.switches[sk]
|
||||
link_updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated (new switch)")
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
"(new switch)")
|
||||
|
||||
for rsk in self.resource.datacenter.root_switches.keys():
|
||||
exist = False
|
||||
@ -494,6 +554,7 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del self.resource.datacenter.root_switches[rsk]
|
||||
link_updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated (switch removed)")
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
"(switch removed)")
|
||||
|
||||
return (updated, link_updated)
|
||||
|
@ -1,29 +1,36 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# - Simulate datacenter configurations (i.e., layout, cabling)
|
||||
"""Simulate datacenter configurations (i.e., layout, cabling)."""
|
||||
|
||||
from valet.engine.resource_manager.resource_base import HostGroup, Host, Switch, Link
|
||||
from valet.engine.resource_manager.resource_base \
|
||||
import HostGroup, Host, Switch, Link
|
||||
|
||||
|
||||
class SimTopology(object):
|
||||
"""Simulate Topology class.
|
||||
|
||||
Sim network and host topology for datacenters.
|
||||
"""
|
||||
|
||||
def __init__(self, _config):
|
||||
"""Init."""
|
||||
self.config = _config
|
||||
|
||||
def set_topology(self, _datacenter, _host_groups, _hosts, _switches):
|
||||
"""Return success after setting network and host topology."""
|
||||
self._set_network_topology(_switches)
|
||||
self._set_host_topology(_datacenter, _host_groups, _hosts, _switches)
|
||||
|
||||
@ -64,7 +71,8 @@ class SimTopology(object):
|
||||
ps = None
|
||||
if (s_num % 2) == 0:
|
||||
if (s_num + 1) < self.config.num_of_spine_switches:
|
||||
ps = _switches[root_switch.name + "s" + str(s_num + 1)]
|
||||
ps = _switches[root_switch.name + "s" +
|
||||
str(s_num + 1)]
|
||||
else:
|
||||
ps = _switches[root_switch.name + "s" + str(s_num - 1)]
|
||||
if ps is not None:
|
||||
@ -80,7 +88,8 @@ class SimTopology(object):
|
||||
parent_switch_list = []
|
||||
if self.config.num_of_spine_switches > 0:
|
||||
for s_num in range(0, self.config.num_of_spine_switches):
|
||||
parent_switch_list.append(_switches[root_switch.name + "s" + str(s_num)])
|
||||
parent_switch_list.append(_switches[root_switch.name +
|
||||
"s" + str(s_num)])
|
||||
else:
|
||||
parent_switch_list.append(_switches[root_switch.name])
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
#!/usr/bin/env python
|
||||
# vi: sw=4 ts=4:
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
@ -15,25 +13,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
"""HA Valet.
|
||||
|
||||
Mnemonic: ha_valet.py
|
||||
Abstract: High availability script for valet processes.
|
||||
starts it's configured processes, and pings for their availability.
|
||||
If local instances are not running, then makes the
|
||||
current instances start. If it finds multiple instances running, then
|
||||
determines which instance should be shut down based on priorities.
|
||||
Mnemonic: ha_valet.py
|
||||
Abstract: High availability script for valet processes. Starts it's
|
||||
configured processes, and pings for their availability. If local
|
||||
instances are not running, then makes the current instances
|
||||
start. If it finds multiple instances running, then determines
|
||||
which instance should be shut down based on priorities.
|
||||
|
||||
Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi
|
||||
Author: Amnon Sagiv based on ha_tegu by Kaustubh Joshi
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
|
||||
Algorithm
|
||||
-----------
|
||||
The ha_valet script runs on each valet node in a continuous loop checking for
|
||||
heartbeats from all the valet nodes found in the "stand_by_list" conf property once
|
||||
every 5 secs (default). A heartbeat is obtained by invoking the "test_command"
|
||||
conf property.
|
||||
heartbeats from all the valet nodes found in the "stand_by_list" conf property
|
||||
once every 5 secs (default). A heartbeat is obtained by invoking the
|
||||
"test_command" conf property.
|
||||
If exactly one monitored process instance is running, the script does
|
||||
nothing. If no instance is running, then the local instance is activated after
|
||||
waiting for 5*priority seconds to let a higher priority valet take over
|
||||
@ -70,10 +68,10 @@ max_num_of_logs = 10
|
||||
|
||||
|
||||
PRIMARY_SETUP = 1
|
||||
RETRY_COUNT = 3 # How many times to retry ping command
|
||||
CONNECT_TIMEOUT = 3 # Ping timeout
|
||||
MAX_QUICK_STARTS = 10 # we stop if there are > 10 restarts in quick succession
|
||||
QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this
|
||||
RETRY_COUNT = 3 # How many times to retry ping command
|
||||
CONNECT_TIMEOUT = 3 # Ping timeout
|
||||
MAX_QUICK_STARTS = 10 # we stop if there are > 10 restart in quick succession
|
||||
QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this
|
||||
|
||||
# HA Configuration
|
||||
HEARTBEAT_SEC = 5 # Heartbeat interval in seconds
|
||||
@ -111,7 +109,7 @@ CONF.register_opts(havalet_opts, ostro_group)
|
||||
|
||||
|
||||
def read_conf():
|
||||
"""returns dictionary of configured processes"""
|
||||
"""Return dictionary of configured processes."""
|
||||
return dict([
|
||||
('Ostro', {
|
||||
NAME: 'Ostro',
|
||||
@ -143,7 +141,8 @@ def prepare_log(obj, name):
|
||||
obj.log.setLevel(logging.DEBUG)
|
||||
# logging.register_options(CONF)
|
||||
# logging.setup(CONF, 'valet')
|
||||
handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log', maxBytes=max_log_size,
|
||||
handler = logging.handlers.RotatingFileHandler(LOG_DIR + name + '.log',
|
||||
maxBytes=max_log_size,
|
||||
backupCount=max_num_of_logs)
|
||||
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
|
||||
handler.setFormatter(fmt)
|
||||
@ -153,14 +152,16 @@ def prepare_log(obj, name):
|
||||
class HaValetThread (threading.Thread):
|
||||
|
||||
def __init__(self, data, exit_event):
|
||||
"""Initialize HAValetThread."""
|
||||
threading.Thread.__init__(self)
|
||||
self.data = data
|
||||
self.log = None
|
||||
|
||||
def run(self):
|
||||
"""Main function"""
|
||||
"""Main function."""
|
||||
prepare_log(self, self.data[NAME])
|
||||
self.log.info('HA Valet - ' + self.data[NAME] + ' Watcher Thread - starting')
|
||||
self.log.info('HA Valet - ' + self.data[NAME] +
|
||||
' Watcher Thread - starting')
|
||||
|
||||
fqdn_list = []
|
||||
this_node = socket.getfqdn()
|
||||
@ -181,7 +182,8 @@ class HaValetThread (threading.Thread):
|
||||
self.data[STAND_BY_LIST] = standby_list
|
||||
self.log.debug("modified stand by list: " + str(standby_list))
|
||||
except ValueError:
|
||||
self.log.debug("host " + this_node + " is not in standby list: %s - continue"
|
||||
self.log.debug("host " + this_node +
|
||||
" is not in standby list: %s - continue"
|
||||
% str(standby_list))
|
||||
break
|
||||
|
||||
@ -193,7 +195,7 @@ class HaValetThread (threading.Thread):
|
||||
pass
|
||||
|
||||
def _main_loop(self, this_node):
|
||||
""" Main heartbeat and liveness check loop
|
||||
"""Main heartbeat and liveness check loop.
|
||||
|
||||
:param this_node: host name
|
||||
:type this_node: string
|
||||
@ -225,16 +227,19 @@ class HaValetThread (threading.Thread):
|
||||
# No valet running. Wait for higher priority valet to activate.
|
||||
time.sleep(HEARTBEAT_SEC * my_priority)
|
||||
|
||||
self.log.info('checking status here - ' + host + ', my priority: ' + str(my_priority))
|
||||
self.log.info('checking status here - ' + host +
|
||||
', my priority: ' + str(my_priority))
|
||||
i_am_active, priority = self._is_active(eval(test_command))
|
||||
self.log.info(host + ': host_active = ' + str(i_am_active) + ', ' + str(priority))
|
||||
self.log.info(host + ': host_active = ' + str(i_am_active) +
|
||||
', ' + str(priority))
|
||||
any_active = i_am_active
|
||||
self.log.info('any active = ' + str(any_active))
|
||||
|
||||
# Check for active valets
|
||||
standby_list_is_empty = not standby_list
|
||||
if not standby_list_is_empty:
|
||||
self.log.debug('main loop: standby_list is not empty ' + str(standby_list))
|
||||
self.log.debug('main loop: standby_list is not empty ' +
|
||||
str(standby_list))
|
||||
for host_in_list in standby_list:
|
||||
if host_in_list == this_node:
|
||||
self.log.info('host_in_list is this_node - skipping')
|
||||
@ -242,39 +247,51 @@ class HaValetThread (threading.Thread):
|
||||
|
||||
self.log.info('checking status on - ' + host_in_list)
|
||||
host = host_in_list
|
||||
host_active, host_priority = self._is_active(eval(test_command))
|
||||
host_active, host_priority = \
|
||||
self._is_active(eval(test_command))
|
||||
host = self.data.get(HOST, 'localhost')
|
||||
self.log.info(host_in_list + ' - host_active = ' + str(host_active) + ', ' + str(host_priority))
|
||||
self.log.info(host_in_list + ' - host_active = ' +
|
||||
str(host_active) + ', ' + str(host_priority))
|
||||
# Check for split brain: 2 valets active
|
||||
if i_am_active and host_active:
|
||||
self.log.info('found two live instances, checking priorities')
|
||||
self.log.info('found two live instances, '
|
||||
'checking priorities')
|
||||
should_be_active = self._should_be_active(host_priority, my_priority)
|
||||
if should_be_active:
|
||||
self.log.info('deactivate myself, ' + host_in_list + ' already running')
|
||||
self._deactivate_process(eval(stop_command)) # Deactivate myself
|
||||
self.log.info('deactivate myself, ' + host_in_list +
|
||||
' already running')
|
||||
# Deactivate myself
|
||||
self._deactivate_process(eval(stop_command))
|
||||
i_am_active = False
|
||||
else:
|
||||
self.log.info('deactivate ' + self.data[NAME] + ' on ' + host_in_list +
|
||||
self.log.info('deactivate ' + self.data[NAME] +
|
||||
' on ' + host_in_list +
|
||||
', already running here')
|
||||
host = host_in_list
|
||||
self._deactivate_process(eval(stop_command)) # Deactivate other valet
|
||||
# Deactivate other valet
|
||||
self._deactivate_process(eval(stop_command))
|
||||
host = self.data.get(HOST, 'localhost')
|
||||
|
||||
# Track that at-least one valet is active
|
||||
any_active = any_active or host_active
|
||||
|
||||
# If no active process or I'm primary, then we must try to start one
|
||||
if not any_active or (not i_am_active and my_priority == PRIMARY_SETUP):
|
||||
if not any_active or \
|
||||
(not i_am_active and my_priority == PRIMARY_SETUP):
|
||||
self.log.warn('there is no instance up')
|
||||
self.log.info('Im primary instance: ' + str(my_priority is PRIMARY_SETUP))
|
||||
self.log.info('Im primary instance: ' +
|
||||
str(my_priority is PRIMARY_SETUP))
|
||||
if priority_wait or my_priority == PRIMARY_SETUP:
|
||||
now = int(time.time())
|
||||
if now - last_start < QUICK_RESTART_SEC: # quick restart (crash?)
|
||||
# quick restart (crash?)
|
||||
if now - last_start < QUICK_RESTART_SEC:
|
||||
quick_start += 1
|
||||
if quick_start > MAX_QUICK_STARTS:
|
||||
self.log.critical("too many restarts in quick succession.")
|
||||
self.log.critical("too many restarts "
|
||||
"in quick succession.")
|
||||
else:
|
||||
quick_start = 0 # reset if it's been a while since last restart
|
||||
# reset if it's been a while since last restart
|
||||
quick_start = 0
|
||||
|
||||
if last_start == 0:
|
||||
diff = "never by this instance"
|
||||
@ -283,12 +300,16 @@ class HaValetThread (threading.Thread):
|
||||
|
||||
last_start = now
|
||||
priority_wait = False
|
||||
if (not i_am_active and my_priority == PRIMARY_SETUP) or (standby_list is not None):
|
||||
self.log.info('no running instance found, starting here; last start %s' % diff)
|
||||
if (not i_am_active and my_priority == PRIMARY_SETUP) or \
|
||||
(standby_list is not None):
|
||||
self.log.info('no running instance found, '
|
||||
'starting here; last start %s' % diff)
|
||||
self._activate_process(start_command, my_priority)
|
||||
else:
|
||||
host = standby_list[0] # LIMITATION - supporting only 1 stand by host
|
||||
self.log.info('no running instances found, starting on %s; last start %s' % (host, diff))
|
||||
# LIMITATION - supporting only 1 stand by host
|
||||
host = standby_list[0]
|
||||
self.log.info('no running instances found, starting '
|
||||
'on %s; last start %s' % (host, diff))
|
||||
self._activate_process(start_command, my_priority)
|
||||
host = self.data.get(HOST, 'localhost')
|
||||
else:
|
||||
@ -298,10 +319,13 @@ class HaValetThread (threading.Thread):
|
||||
# end loop
|
||||
|
||||
def _should_be_active(self, host_priority, my_priority):
|
||||
""" Returns True if host should be active as opposed to current node, based on the hosts priorities.
|
||||
"""Should Be Active.
|
||||
|
||||
Lower value means higher Priority,
|
||||
0 (zero) - invalid priority (e.g. process is down)
|
||||
Returns True if host should be active as opposed to current node,
|
||||
based on the hosts priorities.
|
||||
|
||||
Lower value means higher Priority,
|
||||
0 (zero) - invalid priority (e.g. process is down)
|
||||
|
||||
:param host_priority: other host's priority
|
||||
:type host_priority: int
|
||||
@ -310,38 +334,42 @@ class HaValetThread (threading.Thread):
|
||||
:return: True/False
|
||||
:rtype: bool
|
||||
"""
|
||||
self.log.info('my priority is %d, remote priority is %d' % (my_priority, host_priority))
|
||||
self.log.info('my priority is %d, remote priority is %d' %
|
||||
(my_priority, host_priority))
|
||||
return host_priority < my_priority
|
||||
|
||||
def _is_active(self, call):
|
||||
""" Return 'True, Priority' if valet is running on host
|
||||
"""_is_active.
|
||||
|
||||
'False, None' Otherwise.
|
||||
Return 'True, Priority' if valet is running on host
|
||||
'False, None' Otherwise.
|
||||
"""
|
||||
|
||||
# must use no-proxy to avoid proxy servers gumming up the works
|
||||
for i in xrange(RETRY_COUNT):
|
||||
try:
|
||||
self.log.info('ping (retry %d): %s' % (i, call))
|
||||
proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
proc = subprocess.Popen(call, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, shell=True)
|
||||
priority = proc.wait()
|
||||
if priority == 255: # no route to host
|
||||
priority = 0
|
||||
out, err = proc.communicate()
|
||||
self.log.debug('out: ' + out + ', err: ' + err)
|
||||
self.log.info('ping result (should be > 0): %s' % (str(priority)))
|
||||
self.log.info('ping result (should be > 0): %s'
|
||||
% (str(priority)))
|
||||
return (priority > 0), priority
|
||||
except subprocess.CalledProcessError:
|
||||
self.log.error('ping error: ' + str(subprocess.CalledProcessError))
|
||||
self.log.error('ping error: ' +
|
||||
str(subprocess.CalledProcessError))
|
||||
continue
|
||||
return False, None
|
||||
|
||||
def _deactivate_process(self, deactivate_command):
|
||||
""" Deactivate valet on a given host. If host is omitted, local
|
||||
"""Deactivate Process.
|
||||
|
||||
valet is stopped. Returns True if successful, False on error.
|
||||
Deactivate valet on a given host. If host is omitted, local
|
||||
valet is stopped. Returns True if successful, False on error.
|
||||
"""
|
||||
|
||||
try:
|
||||
# call = "'" + deactivate_command % (PROTO, host, port) + "'"
|
||||
self.log.info('deactivate_command: ' + deactivate_command)
|
||||
@ -352,11 +380,11 @@ class HaValetThread (threading.Thread):
|
||||
return False
|
||||
|
||||
def _activate_process(self, activate_command, priority):
|
||||
""" Activate valet on a given host. If host is omitted, local
|
||||
"""Activate Process.
|
||||
|
||||
valet is started. Returns True if successful, False on error.
|
||||
Activate valet on a given host. If host is omitted, local
|
||||
valet is started. Returns True if successful, False on error.
|
||||
"""
|
||||
|
||||
try:
|
||||
self.log.info('activate_command: ' + activate_command)
|
||||
subprocess.check_call(activate_command, shell=True)
|
||||
@ -368,27 +396,31 @@ class HaValetThread (threading.Thread):
|
||||
|
||||
|
||||
class HAValet(object):
|
||||
""""""
|
||||
|
||||
def __init__(self):
|
||||
"""Init HAValet object."""
|
||||
if not os.path.exists(LOG_DIR):
|
||||
os.makedirs(LOG_DIR)
|
||||
self.log = None
|
||||
|
||||
@DeprecationWarning
|
||||
def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE, process=''):
|
||||
""" This function reads the valet config file and returns configuration
|
||||
def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE,
|
||||
process=''):
|
||||
"""Parse Valet Conf v010.
|
||||
|
||||
attributes in key/value format
|
||||
This function reads the valet config file and returns configuration
|
||||
attributes in key/value format
|
||||
|
||||
:param conf_file_name: config file name
|
||||
:type conf_file_name: string
|
||||
:param process: specific process name
|
||||
when not supplied - the module launches all the processes in the configuration
|
||||
when not supplied - the module launches all the
|
||||
processes in the configuration
|
||||
:type process: string
|
||||
:return: dictionary of configured monitored processes
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
cdata = {}
|
||||
section = ''
|
||||
|
||||
@ -423,14 +455,16 @@ class HAValet(object):
|
||||
return cdata
|
||||
|
||||
def _valid_process_conf_data(self, process_data):
|
||||
""" verify all mandatory parameters are found in the monitored process configuration only standby_list is optional
|
||||
"""Valid Process conf data.
|
||||
|
||||
verify all mandatory parameters are found in the monitored process
|
||||
configuration only standby_list is optional
|
||||
|
||||
:param process_data: specific process configuration parameters
|
||||
:type process_data: dict
|
||||
:return: are all mandatory parameters are found
|
||||
:rtype: bool
|
||||
"""
|
||||
|
||||
if (process_data.get(HOST) is not None and
|
||||
process_data.get(PRIORITY) is not None and
|
||||
process_data.get(ORDER) is not None and
|
||||
@ -442,7 +476,7 @@ class HAValet(object):
|
||||
return False
|
||||
|
||||
def start(self):
|
||||
"""Start valet HA - Main function"""
|
||||
"""Start valet HA - Main function."""
|
||||
prepare_log(self, 'havalet')
|
||||
self.log.info('ha_valet v1.1 starting')
|
||||
|
||||
@ -460,13 +494,15 @@ class HAValet(object):
|
||||
|
||||
for proc in proc_sorted:
|
||||
if self._valid_process_conf_data(proc):
|
||||
self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' + str(proc))
|
||||
self.log.info('Launching: ' + proc[NAME] + ' - parameters: ' +
|
||||
str(proc))
|
||||
thread = HaValetThread(proc, exit_event)
|
||||
time.sleep(HEARTBEAT_SEC)
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
else:
|
||||
self.log.info(proc[NAME] + " section is missing mandatory parameter.")
|
||||
self.log.info(proc[NAME] +
|
||||
" section is missing mandatory parameter.")
|
||||
continue
|
||||
|
||||
self.log.info('on air.')
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Config."""
|
||||
|
||||
from pecan.hooks import TransactionHook
|
||||
|
||||
from valet.api.db import models
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Conf Test - Test config file (app, connection, session, etc)."""
|
||||
|
||||
from copy import deepcopy
|
||||
import os
|
||||
from pecan import conf
|
||||
@ -33,12 +35,14 @@ BIND = 'mysql+pymysql://root:password@127.0.0.1'
|
||||
|
||||
|
||||
def config_file():
|
||||
"""Return config file."""
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
return os.path.join(here, 'config.py')
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def app(request):
|
||||
"""Return test app based on config file."""
|
||||
config = configuration.conf_from_file(config_file()).to_dict()
|
||||
|
||||
# Add the appropriate connection string to the app config.
|
||||
@ -60,8 +64,10 @@ def connection(app, request):
|
||||
print("=" * 80)
|
||||
print("CREATING TEMPORARY DATABASE FOR TESTS")
|
||||
print("=" * 80)
|
||||
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop', DBNAME])
|
||||
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create', DBNAME])
|
||||
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'drop',
|
||||
DBNAME])
|
||||
subprocess.call(['mysqladmin', '-f', '-uroot', '-ppassword', 'create',
|
||||
DBNAME])
|
||||
|
||||
# Bind and create the database tables
|
||||
_db.clear()
|
||||
@ -94,7 +100,7 @@ def connection(app, request):
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def session(connection, request):
|
||||
"""Creates a new database session for a test."""
|
||||
"""Create new database session for a test."""
|
||||
_config = configuration.conf_from_file(config_file()).to_dict()
|
||||
config = deepcopy(_config)
|
||||
|
||||
@ -137,11 +143,16 @@ def session(connection, request):
|
||||
|
||||
|
||||
class TestApp(object):
|
||||
""" A controller test starts a database transaction and creates a fake WSGI app. """
|
||||
"""Test App Class.
|
||||
|
||||
A controller test starts a database transaction
|
||||
and creates a fake WSGI app.
|
||||
"""
|
||||
|
||||
__headers__ = {}
|
||||
|
||||
def __init__(self, app):
|
||||
"""Init Test App."""
|
||||
self.app = app
|
||||
|
||||
def _do_request(self, url, method='GET', **kwargs):
|
||||
@ -156,7 +167,7 @@ class TestApp(object):
|
||||
return methods.get(method, self.app.get)(str(url), **kwargs)
|
||||
|
||||
def post_json(self, url, **kwargs):
|
||||
""" note:
|
||||
"""Post json.
|
||||
|
||||
@param (string) url - The URL to emulate a POST request to
|
||||
@returns (paste.fixture.TestResponse)
|
||||
@ -164,7 +175,7 @@ class TestApp(object):
|
||||
return self._do_request(url, 'POSTJ', **kwargs)
|
||||
|
||||
def post(self, url, **kwargs):
|
||||
""" note:
|
||||
"""Post.
|
||||
|
||||
@param (string) url - The URL to emulate a POST request to
|
||||
@returns (paste.fixture.TestResponse)
|
||||
@ -172,7 +183,7 @@ class TestApp(object):
|
||||
return self._do_request(url, 'POST', **kwargs)
|
||||
|
||||
def get(self, url, **kwargs):
|
||||
""" note:
|
||||
"""Get.
|
||||
|
||||
@param (string) url - The URL to emulate a GET request to
|
||||
@returns (paste.fixture.TestResponse)
|
||||
@ -180,7 +191,7 @@ class TestApp(object):
|
||||
return self._do_request(url, 'GET', **kwargs)
|
||||
|
||||
def put(self, url, **kwargs):
|
||||
""" note:
|
||||
"""Put.
|
||||
|
||||
@param (string) url - The URL to emulate a PUT request to
|
||||
@returns (paste.fixture.TestResponse)
|
||||
@ -188,7 +199,7 @@ class TestApp(object):
|
||||
return self._do_request(url, 'PUT', **kwargs)
|
||||
|
||||
def delete(self, url, **kwargs):
|
||||
""" note:
|
||||
"""Delete.
|
||||
|
||||
@param (string) url - The URL to emulate a DELETE request to
|
||||
@returns (paste.fixture.TestResponse)
|
||||
|
@ -1,19 +1,25 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Init."""
|
||||
|
||||
from uuid import UUID
|
||||
|
||||
|
||||
def is_valid_uuid4(uuid_string):
|
||||
""" Validate that a UUID string is in fact a valid uuid4.
|
||||
"""Validate that a UUID string is in fact a valid uuid4.
|
||||
|
||||
Happily, the uuid module does the actual
|
||||
checking for us.
|
||||
@ -22,7 +28,6 @@ def is_valid_uuid4(uuid_string):
|
||||
to the UUID() call, otherwise any 32-character
|
||||
hex string is considered valid.
|
||||
"""
|
||||
|
||||
try:
|
||||
val = UUID(uuid_string, version=4)
|
||||
except ValueError:
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test Plans."""
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from valet.api.db.models import Plan, Placement
|
||||
@ -24,18 +26,23 @@ PLAN_NAME = 'ihaveaplan'
|
||||
|
||||
|
||||
class TestPlansController(object):
|
||||
"""Test Plans Controller Class."""
|
||||
|
||||
def test_get_index_no_plans(self, session):
|
||||
"""Test getting plans where there are none, should be empty."""
|
||||
result = session.app.get('/v1/plans/')
|
||||
assert result.status_int == 200
|
||||
assert result.json == []
|
||||
|
||||
def test_get_index_a_plan(self, session):
|
||||
"""Test get a plan using an index, should get a plan name."""
|
||||
Plan(PLAN_NAME, STACK_ID)
|
||||
session.commit()
|
||||
result = session.app.get('/v1/plans/').json
|
||||
assert result == [PLAN_NAME]
|
||||
|
||||
def test_single_plan_should_have_one_item(self, session):
|
||||
"""Test getting a single plan with one item."""
|
||||
Plan(PLAN_NAME, STACK_ID)
|
||||
session.commit()
|
||||
result = session.app.get('/v1/plans/')
|
||||
@ -43,6 +50,7 @@ class TestPlansController(object):
|
||||
assert len(result.json) == 1
|
||||
|
||||
def test_list_a_few_plans(self, session):
|
||||
"""Test returning a list of plans."""
|
||||
for plan_number in range(20):
|
||||
stack_id = str(uuid4())
|
||||
Plan('foo_%s' % plan_number, stack_id)
|
||||
@ -55,21 +63,26 @@ class TestPlansController(object):
|
||||
|
||||
|
||||
class TestPlansItemController(object):
|
||||
"""Test Plans Item Controller Class."""
|
||||
|
||||
def test_get_index_single_plan(self, session):
|
||||
"""Test get index of a single plan."""
|
||||
Plan(PLAN_NAME, STACK_ID)
|
||||
session.commit()
|
||||
result = session.app.get('/v1/plans/%s/' % (STACK_ID))
|
||||
result = session.app.get('/v1/plans/%s/' % STACK_ID)
|
||||
assert result.status_int == 200
|
||||
|
||||
def test_get_index_no_plan(self, session):
|
||||
result = session.app.get('/v1/plans/%s/' % (STACK_ID),
|
||||
"""Test getting index of no plan, should return 404."""
|
||||
result = session.app.get('/v1/plans/%s/' % STACK_ID,
|
||||
expect_errors=True)
|
||||
assert result.status_int == 404
|
||||
|
||||
def test_get_index_single_plan_data(self, session):
|
||||
"""Test getting a single plan data."""
|
||||
Plan(PLAN_NAME, STACK_ID)
|
||||
session.commit()
|
||||
result = session.app.get('/v1/plans/%s/' % (STACK_ID))
|
||||
result = session.app.get('/v1/plans/%s/' % STACK_ID)
|
||||
json = result.json
|
||||
assert is_valid_uuid4(json['id'])
|
||||
assert json['name'] == PLAN_NAME
|
||||
@ -77,6 +90,7 @@ class TestPlansItemController(object):
|
||||
assert json['stack_id'] == STACK_ID
|
||||
|
||||
def test_get_plan_refs(self, session):
|
||||
"""Test get plan refs by getting app json result."""
|
||||
plan = Plan(PLAN_NAME, STACK_ID)
|
||||
Placement(
|
||||
'placement_1', str(uuid4()),
|
||||
@ -89,7 +103,7 @@ class TestPlansItemController(object):
|
||||
location='foo_2'
|
||||
)
|
||||
session.commit()
|
||||
result = session.app.get('/v1/plans/%s/' % (STACK_ID))
|
||||
result = session.app.get('/v1/plans/%s/' % STACK_ID)
|
||||
json = result.json
|
||||
assert is_valid_uuid4(json['id'])
|
||||
assert json['name'] == PLAN_NAME
|
||||
|
@ -1,18 +1,20 @@
|
||||
#
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Base."""
|
||||
|
||||
from oslo_config import fixture as fixture_config
|
||||
from oslo_log import log as logging
|
||||
from oslotest.base import BaseTestCase
|
||||
@ -26,24 +28,31 @@ class Base(BaseTestCase):
|
||||
"""Test case base class for all unit tests."""
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
''' '''
|
||||
"""Init Base."""
|
||||
super(Base, self).__init__(*args, **kwds)
|
||||
|
||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||
init.prepare(self.CONF)
|
||||
|
||||
def setUp(self):
|
||||
"""Setup."""
|
||||
super(Base, self).setUp()
|
||||
|
||||
def run_test(self, stack_name, template_path):
|
||||
''' main function '''
|
||||
"""Main Function."""
|
||||
pass
|
||||
|
||||
def validate(self, result):
|
||||
"""Validate."""
|
||||
# TODO(CM): Maybe fix unnecessary obfuscation of assertEqual code.
|
||||
self.assertEqual(True, result.ok, result.message)
|
||||
|
||||
def validate_test(self, result):
|
||||
"""Validate Test."""
|
||||
# TODO(CM): Maybe fix unnecessary obfuscation of assertTrue code.
|
||||
self.assertTrue(result)
|
||||
|
||||
def get_name(self):
|
||||
"""Get Name."""
|
||||
# TODO(CM): Make this function actually do something.
|
||||
pass
|
||||
|
@ -5,7 +5,7 @@ default_log_levels="valet_validator=DEBUG,tests=DEBUG,compute=DEBUG,common=DEBUG
|
||||
[auth]
|
||||
OS_AUTH_URL_WITH_VERSION=http://controller:5000/v2.0
|
||||
OS_USERNAME=admin
|
||||
OS_PASSWORD=qwer4321
|
||||
OS_PASSWORD=PASSWORD
|
||||
OS_TENANT_NAME=demo
|
||||
TOKEN_EXPIRATION=600
|
||||
|
||||
@ -20,7 +20,7 @@ VALUE=output_value
|
||||
VERSION=1
|
||||
|
||||
[valet]
|
||||
HOST=http://192.168.10.18:8090/v1
|
||||
HOST=http://127.0.0.1:8090/v1
|
||||
DELAY_DURATION=30
|
||||
PAUSE=10
|
||||
TRIES_TO_CREATE=5
|
||||
@ -32,7 +32,7 @@ TEMPLATE_NAME=affinity_basic_2_instances
|
||||
|
||||
[test_affinity_3]
|
||||
STACK_NAME=affinity_3_stack
|
||||
TEMPLATE_NAME=affinity_ 3_Instances
|
||||
TEMPLATE_NAME=affinity_3_Instances
|
||||
|
||||
[test_diversity]
|
||||
STACK_NAME=basic_diversity_stack
|
||||
@ -45,4 +45,3 @@ TEMPLATE_NAME=diversity_between_2_affinity
|
||||
[test_exclusivity]
|
||||
STACK_NAME=basic_exclusivity_stack
|
||||
TEMPLATE_NAME=exclusivity_basic_2_instances
|
||||
|
||||
|
@ -1,14 +1,20 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Copyright 2014-2017 AT&T Intellectual Property
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Init."""
|
||||
|
||||
from oslo_log import log as logging
|
||||
import time
|
||||
from valet.tests.functional.valet_validator.common.init import CONF, COLORS
|
||||
@ -17,32 +23,42 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Result(object):
|
||||
"""Class consisting of ok (bool) and a string message."""
|
||||
|
||||
ok = False
|
||||
message = ""
|
||||
|
||||
def __init__(self, ok=True, msg=""):
|
||||
"""Init a Result."""
|
||||
self.ok = ok
|
||||
self.message = msg
|
||||
|
||||
|
||||
class GeneralLogger(object):
|
||||
"""Class consisting of different logging functions."""
|
||||
|
||||
@staticmethod
|
||||
def delay(duration=None):
|
||||
"""Delay method by performing time sleep."""
|
||||
time.sleep(duration or CONF.heat.DELAY_DURATION)
|
||||
|
||||
@staticmethod
|
||||
def log_info(msg):
|
||||
"""Generic log info method."""
|
||||
LOG.info("%s %s %s" % (COLORS["L_GREEN"], msg, COLORS["WHITE"]))
|
||||
|
||||
@staticmethod
|
||||
def log_error(msg, trc_back=""):
|
||||
"""Log error mthd with msg and trace back."""
|
||||
LOG.error("%s %s %s" % (COLORS["L_RED"], msg, COLORS["WHITE"]))
|
||||
LOG.error("%s %s %s" % (COLORS["L_RED"], trc_back, COLORS["WHITE"]))
|
||||
|
||||
@staticmethod
|
||||
def log_debug(msg):
|
||||
"""Log debug method."""
|
||||
LOG.debug("%s %s %s" % (COLORS["L_BLUE"], msg, COLORS["WHITE"]))
|
||||
|
||||
@staticmethod
|
||||
def log_group(msg):
|
||||
"""Log info method for group."""
|
||||
LOG.info("%s %s %s" % (COLORS["Yellow"], msg, COLORS["WHITE"]))
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Auth."""
|
||||
|
||||
from keystoneclient.auth.identity import v2 as identity
|
||||
from keystoneclient import session
|
||||
from oslo_log import log as logging
|
||||
@ -24,37 +26,46 @@ MIN_TOKEN_LIFE_SECONDS = 120
|
||||
|
||||
|
||||
class Auth(object):
|
||||
''' Singleton class for authentication token '''
|
||||
"""Singleton class for authentication token."""
|
||||
|
||||
auth = None
|
||||
session = None
|
||||
|
||||
@staticmethod
|
||||
def _init():
|
||||
if Auth.is_auth_invalid():
|
||||
Auth.auth = identity.Password(auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION,
|
||||
username=CONF.auth.OS_USERNAME,
|
||||
password=CONF.auth.OS_PASSWORD,
|
||||
tenant_name=CONF.auth.OS_TENANT_NAME)
|
||||
Auth.auth = identity.Password(
|
||||
auth_url=CONF.auth.OS_AUTH_URL_WITH_VERSION,
|
||||
username=CONF.auth.OS_USERNAME,
|
||||
password=CONF.auth.OS_PASSWORD,
|
||||
tenant_name=CONF.auth.OS_TENANT_NAME)
|
||||
Auth.session = session.Session(auth=Auth.auth)
|
||||
|
||||
@staticmethod
|
||||
def get_password_plugin():
|
||||
"""Return auth after init."""
|
||||
Auth._init()
|
||||
return Auth.auth
|
||||
|
||||
@staticmethod
|
||||
def get_auth_token():
|
||||
"""Return auth token for session."""
|
||||
return Auth.get_password_plugin().get_token(Auth.get_auth_session())
|
||||
|
||||
@staticmethod
|
||||
def get_auth_session():
|
||||
"""Return auth session."""
|
||||
Auth._init()
|
||||
return Auth.session
|
||||
|
||||
@staticmethod
|
||||
def get_project_id():
|
||||
return Auth.get_password_plugin().get_project_id(Auth.get_auth_session())
|
||||
"""Return auth_session based on project_id."""
|
||||
return Auth.get_password_plugin().get_project_id(
|
||||
Auth.get_auth_session())
|
||||
|
||||
@staticmethod
|
||||
def is_auth_invalid():
|
||||
return Auth.auth is None or Auth.auth.get_auth_ref(Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION)
|
||||
"""Return True/False based on status of auth."""
|
||||
return Auth.auth is None or Auth.auth.get_auth_ref(
|
||||
Auth.session).will_expire_soon(CONF.auth.TOKEN_EXPIRATION)
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Init."""
|
||||
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
@ -45,7 +47,8 @@ COLORS = \
|
||||
|
||||
opts_auth = \
|
||||
[
|
||||
cfg.StrOpt('OS_AUTH_URL_WITH_VERSION', default='http://controller:5000/v2.0'),
|
||||
cfg.StrOpt('OS_AUTH_URL_WITH_VERSION',
|
||||
default='http://controller:5000/v2.0'),
|
||||
cfg.StrOpt('OS_USERNAME', default="addddmin"),
|
||||
cfg.StrOpt('OS_PASSWORD', default="qwer4321"),
|
||||
cfg.StrOpt('OS_TENANT_NAME', default="demo"),
|
||||
@ -87,6 +90,7 @@ _initialized = False
|
||||
|
||||
|
||||
def prepare(CONF):
|
||||
"""Prepare config options."""
|
||||
global _initialized
|
||||
try:
|
||||
if _initialized is False:
|
||||
@ -94,9 +98,12 @@ def prepare(CONF):
|
||||
_initialized = True
|
||||
|
||||
# Adding config file
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir))
|
||||
possible_topdir = os.path.normpath(
|
||||
os.path.join(os.path.abspath(__file__), os.pardir,
|
||||
os.pardir, os.pardir))
|
||||
conf_file = os.path.join(possible_topdir, 'etc', DOMAIN + '.cfg')
|
||||
CONF([], project=DOMAIN, default_config_files=[conf_file] or None, validate_default_values=True)
|
||||
CONF([], project=DOMAIN, default_config_files=[conf_file] or None,
|
||||
validate_default_values=True)
|
||||
|
||||
logging.setup(CONF, DOMAIN)
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Resources."""
|
||||
|
||||
from oslo_log import log as logging
|
||||
import traceback
|
||||
import yaml
|
||||
@ -23,8 +25,10 @@ TEMPLATE_RES = "resources"
|
||||
|
||||
|
||||
class TemplateResources(object):
|
||||
''' Heat template parser '''
|
||||
"""Heat template parser."""
|
||||
|
||||
def __init__(self, template):
|
||||
"""Init Template resources."""
|
||||
self.instances = []
|
||||
self.groups = {}
|
||||
self.template_data = None
|
||||
@ -47,7 +51,10 @@ class TemplateResources(object):
|
||||
|
||||
|
||||
class Instance(object):
|
||||
"""Contains instance details from template (name, image, flavor, etc)."""
|
||||
|
||||
def __init__(self, doc, instance_name):
|
||||
"""Init Instance Object."""
|
||||
self.resource_name = instance_name
|
||||
self.name = None
|
||||
self.image = None
|
||||
@ -57,6 +64,7 @@ class Instance(object):
|
||||
self.fill(doc, instance_name)
|
||||
|
||||
def fill(self, doc, instance_name):
|
||||
"""Fill Instance details from template properties."""
|
||||
try:
|
||||
template_property = doc[TEMPLATE_RES][instance_name]["properties"]
|
||||
|
||||
@ -69,12 +77,17 @@ class Instance(object):
|
||||
LOG.error(traceback.format_exc())
|
||||
|
||||
def get_ins(self):
|
||||
"""Return instance data."""
|
||||
return("type: %s, name: %s, image: %s, flavor: %s, resource_name: %s "
|
||||
% (self.type, self.name, self.image, self.flavor, self.resource_name))
|
||||
% (self.type, self.name, self.image,
|
||||
self.flavor, self.resource_name))
|
||||
|
||||
|
||||
class Group(object):
|
||||
"""Class containing group details (type, name, resources) from template."""
|
||||
|
||||
def __init__(self, doc, group_name):
|
||||
"""Init Group Object."""
|
||||
self.group_type = None
|
||||
self.group_name = None
|
||||
self.level = None
|
||||
@ -83,11 +96,13 @@ class Group(object):
|
||||
self.fill(doc, group_name)
|
||||
|
||||
def fill(self, doc, group_name):
|
||||
"""Fill group from template properties."""
|
||||
try:
|
||||
template_property = doc[TEMPLATE_RES][group_name]["properties"]
|
||||
|
||||
self.group_type = template_property["group_type"]
|
||||
self.group_name = template_property["group_name"] if "group_name" in template_property else None
|
||||
self.group_name = template_property["group_name"] \
|
||||
if "group_name" in template_property else None
|
||||
self.level = template_property["level"]
|
||||
for res in template_property[TEMPLATE_RES]:
|
||||
self.group_resources.append(res["get_resource"])
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Analyzer."""
|
||||
|
||||
from novaclient import client
|
||||
import traceback
|
||||
from valet.tests.functional.valet_validator.common import Result, GeneralLogger
|
||||
@ -21,24 +23,27 @@ from valet.tests.functional.valet_validator.common.init import CONF
|
||||
|
||||
|
||||
class Analyzer(object):
|
||||
"""Methods to perform analysis on hosts, vms, racks."""
|
||||
|
||||
def __init__(self):
|
||||
''' initializing the analyzer - connecting to nova '''
|
||||
"""Initializing the analyzer - connecting to nova."""
|
||||
GeneralLogger.log_info("Initializing Analyzer")
|
||||
self.nova = client.Client(CONF.nova.VERSION, session=Auth.get_auth_session())
|
||||
self.nova = client.Client(CONF.nova.VERSION,
|
||||
session=Auth.get_auth_session())
|
||||
|
||||
def get_host_name(self, instance_name):
|
||||
''' Returning host by instance name '''
|
||||
"""Returning host by instance name."""
|
||||
serv = self.nova.servers.find(name=instance_name)
|
||||
return self.get_hostname(serv)
|
||||
|
||||
def get_all_hosts(self, instances_list):
|
||||
''' Returning all hosts of all instances '''
|
||||
"""Returning all hosts of all instances."""
|
||||
GeneralLogger.log_debug("Getting hosts names")
|
||||
return [self.get_host_name(instance.name) for instance in instances_list]
|
||||
return [self.get_host_name(instance.name)
|
||||
for instance in instances_list]
|
||||
|
||||
def check(self, resources):
|
||||
''' Checking if all instances are on the Appropriate hosts and racks '''
|
||||
"""Check if all instances are on the Appropriate hosts and racks."""
|
||||
GeneralLogger.log_debug("Starting to check instances location")
|
||||
result = True
|
||||
|
||||
@ -46,39 +51,51 @@ class Analyzer(object):
|
||||
for key in resources.groups:
|
||||
group = resources.groups[key]
|
||||
|
||||
resources_to_compare = self.get_resources_to_compare(resources, group.group_resources) or group.group_resources
|
||||
instances_for_group = self.get_group_instances(resources, resources_to_compare)
|
||||
resources_to_compare = self.get_resources_to_compare(
|
||||
resources, group.group_resources) or group.group_resources
|
||||
instances_for_group = self.get_group_instances(
|
||||
resources, resources_to_compare)
|
||||
hosts_list = self.get_all_hosts(instances_for_group)
|
||||
|
||||
# switch case
|
||||
result = result and \
|
||||
{
|
||||
"affinity": self.are_the_same(hosts_list, group.level),
|
||||
"diversity": self.are_different(hosts_list, group.level),
|
||||
"exclusivity": self.are_we_alone(hosts_list, instances_for_group)
|
||||
"affinity": self.are_the_same(hosts_list,
|
||||
group.level),
|
||||
"diversity": self.are_different(hosts_list,
|
||||
group.level),
|
||||
"exclusivity": self.are_we_alone(hosts_list,
|
||||
instances_for_group)
|
||||
}[group.group_type]
|
||||
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method check: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method check: %s" % ex,
|
||||
traceback.format_exc())
|
||||
result = False
|
||||
|
||||
return Result(result)
|
||||
|
||||
def get_resources_to_compare(self, resources, group_resources):
|
||||
"""Return resources to compare."""
|
||||
resources_to_compare = []
|
||||
|
||||
try:
|
||||
for group_name in group_resources: # ['test-affinity-group1', 'test-affinity-group2']
|
||||
# ['test-affinity-group1', 'test-affinity-group2']
|
||||
for group_name in group_resources:
|
||||
if "test" in group_name:
|
||||
resources_to_compare.append(resources.groups[group_name].group_resources)
|
||||
resources_to_compare.append(
|
||||
resources.groups[group_name].group_resources)
|
||||
else:
|
||||
return None
|
||||
return resources_to_compare
|
||||
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method get_resources_to_compare: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method "
|
||||
"get_resources_to_compare: %s"
|
||||
% ex, traceback.format_exc())
|
||||
|
||||
def are_we_alone(self, hosts_list, ins_for_group):
|
||||
"""Return result of whether any instances on host."""
|
||||
try:
|
||||
# instances is all the instances on this host
|
||||
all_instances_on_host = self.get_instances_per_host(hosts_list)
|
||||
@ -88,10 +105,11 @@ class Analyzer(object):
|
||||
return not all_instances_on_host
|
||||
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method are_we_alone: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method are_we_alone: %s"
|
||||
% ex, traceback.format_exc())
|
||||
|
||||
def get_instances_per_host(self, hosts_list):
|
||||
''' get_instances_per_host '''
|
||||
"""Get number of instances per host."""
|
||||
instances = []
|
||||
try:
|
||||
for host in set(hosts_list):
|
||||
@ -100,39 +118,50 @@ class Analyzer(object):
|
||||
|
||||
return instances
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method get_instances_per_host: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method "
|
||||
"get_instances_per_host: %s"
|
||||
% ex, traceback.format_exc())
|
||||
|
||||
def are_different(self, hosts_list, level):
|
||||
''' Checking if all hosts (and racks) are different for all instances '''
|
||||
"""Check if all hosts (and racks) are different for all instances."""
|
||||
diction = {}
|
||||
|
||||
try:
|
||||
for h in hosts_list:
|
||||
if self.is_already_exists(diction, self.get_host_or_rack(level, h)):
|
||||
if self.is_already_exists(diction,
|
||||
self.get_host_or_rack(level, h)):
|
||||
return False
|
||||
return True
|
||||
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method "
|
||||
"are_all_hosts_different: %s"
|
||||
% ex, traceback.format_exc())
|
||||
return False
|
||||
|
||||
def are_the_same(self, hosts_list, level):
|
||||
"""Check if all hosts (and racks) are the same for all instances."""
|
||||
GeneralLogger.log_debug("Hosts are:")
|
||||
try:
|
||||
for h in hosts_list:
|
||||
if self.compare_host(self.get_host_or_rack(level, h), self.get_host_or_rack(level, hosts_list[0])) is False:
|
||||
if self.compare_host(
|
||||
self.get_host_or_rack(level, h),
|
||||
self.get_host_or_rack(level, hosts_list[0])) is False:
|
||||
return False
|
||||
return True
|
||||
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method are_all_hosts_different: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method "
|
||||
"are_all_hosts_different: %s"
|
||||
% ex, traceback.format_exc())
|
||||
return False
|
||||
|
||||
def get_group_instances(self, resources, group_ins):
|
||||
''' gets the instance object according to the group_ins
|
||||
"""Get the instance object according to the group_ins.
|
||||
|
||||
group_ins - the group_resources name of the instances belong to this group (['my-instance-1', 'my-instance-2'])
|
||||
'''
|
||||
group_ins - the group_resources name of the instances belong to
|
||||
this group (['my-instance-1', 'my-instance-2']).
|
||||
"""
|
||||
ins_for_group = []
|
||||
try:
|
||||
for instance in resources.instances:
|
||||
@ -141,13 +170,17 @@ class Analyzer(object):
|
||||
return ins_for_group
|
||||
|
||||
except Exception as ex:
|
||||
GeneralLogger.log_error("Exception at method get_group_instances: %s" % ex, traceback.format_exc())
|
||||
GeneralLogger.log_error("Exception at method "
|
||||
"get_group_instances: %s"
|
||||
% ex, traceback.format_exc())
|
||||
return None
|
||||
|
||||
def get_hostname(self, vm):
|
||||
"""Get hostname of vm."""
|
||||
return str(getattr(vm, CONF.nova.ATTR))
|
||||
|
||||
def is_already_exists(self, diction, item):
|
||||
"""If item exists, return True, otherwise return False."""
|
||||
if item in diction:
|
||||
return True
|
||||
|
||||
@ -155,18 +188,24 @@ class Analyzer(object):
|
||||
return False
|
||||
|
||||
def compare_rack(self, current_host, first_host):
|
||||
"""Return True if racks of current and first host are equal."""
|
||||
GeneralLogger.log_debug(current_host)
|
||||
return self.get_rack(current_host) == self.get_rack(first_host)
|
||||
|
||||
def compare_host(self, current_host, first_host):
|
||||
"""Compare current host to first host."""
|
||||
GeneralLogger.log_debug(current_host)
|
||||
return current_host == first_host
|
||||
|
||||
def get_rack(self, host):
|
||||
"""Get rack from host."""
|
||||
return (host.split("r")[1])[:2]
|
||||
|
||||
def get_host_or_rack(self, level, host):
|
||||
"""Return host if current level is host, otherwise return rack."""
|
||||
return host if level == "host" else self.get_rack(host)
|
||||
|
||||
def get_vms_by_hypervisor(self, host):
|
||||
return [vm for vm in self.nova.servers.list(search_opts={"all_tenants": True}) if self.get_hostname(vm) == host]
|
||||
"""Return vms based on hypervisor(host)."""
|
||||
return [vm for vm in self.nova.servers.list(
|
||||
search_opts={"all_tenants": True}) if self.get_hostname(vm) == host]
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Valet Group."""
|
||||
|
||||
import json
|
||||
import requests
|
||||
import traceback
|
||||
@ -22,61 +24,73 @@ from valet.tests.functional.valet_validator.common.init import CONF
|
||||
|
||||
|
||||
class ValetGroup(object):
|
||||
"""Class of helpers and basic functions for Valet Groups."""
|
||||
|
||||
def __init__(self):
|
||||
"""Init groups_url and headers for Valet Group."""
|
||||
self.groups_url = "%s/groups" % CONF.valet.HOST
|
||||
|
||||
self.headers = {"X-Auth-Token": Auth.get_auth_token(),
|
||||
"Content-Type": "application/json"}
|
||||
|
||||
def create_group(self, group_name, group_type):
|
||||
"""Create group given name and type."""
|
||||
grp_data = {"name": group_name, "type": group_type}
|
||||
return requests.post(self.groups_url, data=json.dumps(grp_data), headers=self.headers)
|
||||
return requests.post(self.groups_url, data=json.dumps(grp_data),
|
||||
headers=self.headers)
|
||||
|
||||
def get_list_groups(self):
|
||||
"""Return a list of groups."""
|
||||
list_response = requests.get(self.groups_url, headers=self.headers)
|
||||
return list_response.json()["groups"]
|
||||
|
||||
def get_group_details(self, group_id):
|
||||
"""Return Group Details based on group_id."""
|
||||
url = self.groups_url + "/" + group_id
|
||||
return requests.get(url, headers=self.headers)
|
||||
|
||||
def update_group_members(self, group_id, members=None):
|
||||
"""Update group members based on group_id."""
|
||||
add_member_url = self.groups_url + "/%s/members" % group_id
|
||||
data = json.dumps({"members": [members or Auth.get_project_id()]})
|
||||
|
||||
return requests.put(add_member_url, data=data, headers=self.headers)
|
||||
|
||||
def update_group(self, group_id, new_description):
|
||||
"""Update group based on its id with a new description."""
|
||||
url = self.groups_url + "/" + group_id
|
||||
new_data = json.dumps({"description": new_description})
|
||||
|
||||
return requests.put(url, new_data, headers=self.headers)
|
||||
|
||||
def delete_group_member(self, group_id, member_id):
|
||||
"""Delete a single group member based on its member_id."""
|
||||
url = self.groups_url + "/%s/members/%s" % (group_id, member_id)
|
||||
return requests.delete(url, headers=self.headers)
|
||||
|
||||
def delete_all_group_member(self, group_id):
|
||||
"""Delete all members of a group based on group_id."""
|
||||
url = self.groups_url + "/%s/members" % group_id
|
||||
return requests.delete(url, headers=self.headers)
|
||||
|
||||
def delete_group(self, group_id):
|
||||
"""Delete group based on its id."""
|
||||
url = self.groups_url + "/%s" % group_id
|
||||
return requests.delete(url, headers=self.headers)
|
||||
|
||||
def get_group_id_and_members(self, group_name, group_type="exclusivity"):
|
||||
''' Checks if group name exists, if not - creates it
|
||||
"""Check if group name exists, if not - creates it.
|
||||
|
||||
returns group's id and members list
|
||||
'''
|
||||
Returns group's id and members list.
|
||||
"""
|
||||
group_details = self.check_group_exists(group_name)
|
||||
|
||||
try:
|
||||
if group_details is None:
|
||||
GeneralLogger.log_info("Creating group")
|
||||
create_response = self.create_group(group_name, group_type)
|
||||
return create_response.json()["id"], create_response.json()["members"]
|
||||
return create_response.json()["id"], \
|
||||
create_response.json()["members"]
|
||||
else:
|
||||
GeneralLogger.log_info("Group exists")
|
||||
|
||||
@ -86,17 +100,18 @@ class ValetGroup(object):
|
||||
GeneralLogger.log_error(traceback.format_exc())
|
||||
|
||||
def add_group_member(self, group_details):
|
||||
''' Checks if member exists in group, if not - adds it '''
|
||||
"""Check if member exists in group, if not - adds it."""
|
||||
# group_details - group id, group members
|
||||
try:
|
||||
if Auth.get_project_id() not in group_details[1]:
|
||||
GeneralLogger.log_info("Adding member to group")
|
||||
self.update_group_members(group_details[0])
|
||||
except Exception:
|
||||
GeneralLogger.log_error("Failed to add group member", traceback.format_exc())
|
||||
GeneralLogger.log_error("Failed to add group member",
|
||||
traceback.format_exc())
|
||||
|
||||
def check_group_exists(self, group_name):
|
||||
''' Checks if group exists in group list, if not returns None '''
|
||||
"""Check if group exists in group list, if not returns None."""
|
||||
for grp in self.get_list_groups():
|
||||
if grp["name"] == group_name:
|
||||
return grp["id"], grp["members"]
|
||||
@ -104,12 +119,14 @@ class ValetGroup(object):
|
||||
return None
|
||||
|
||||
def delete_all_groups(self):
|
||||
DELETED = 204
|
||||
"""Return deleted code 204 if all groups deleted."""
|
||||
deleted = 204
|
||||
for group in self.get_list_groups():
|
||||
codes = [self.delete_all_group_member(group["id"]).status_code, self.delete_group(group["id"]).status_code]
|
||||
codes = [self.delete_all_group_member(group["id"]).status_code,
|
||||
self.delete_group(group["id"]).status_code]
|
||||
|
||||
res = filter(lambda a: a != DELETED, codes)
|
||||
res = filter(lambda a: a != deleted, codes)
|
||||
if res:
|
||||
return res[0]
|
||||
|
||||
return DELETED
|
||||
return deleted
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Loader."""
|
||||
|
||||
from heatclient.client import Client
|
||||
import sys
|
||||
import time
|
||||
@ -24,9 +26,10 @@ from valet.tests.functional.valet_validator.group_api.valet_group import ValetGr
|
||||
|
||||
|
||||
class Loader(object):
|
||||
"""Class is responsible for loading stacks and groups."""
|
||||
|
||||
def __init__(self):
|
||||
''' initializing the loader - connecting to heat '''
|
||||
"""Initializing the loader - connecting to heat."""
|
||||
GeneralLogger.log_info("Initializing Loader")
|
||||
|
||||
heat_url = CONF.heat.HEAT_URL + str(Auth.get_project_id())
|
||||
@ -36,6 +39,7 @@ class Loader(object):
|
||||
self.stacks = heat.stacks
|
||||
|
||||
def create_stack(self, stack_name, template_resources):
|
||||
"""Create stack from template resources."""
|
||||
GeneralLogger.log_info("Starting to create stacks")
|
||||
groups = template_resources.groups
|
||||
|
||||
@ -44,40 +48,50 @@ class Loader(object):
|
||||
if groups[key].group_type == "exclusivity":
|
||||
self.create_valet_group(groups[key].group_name)
|
||||
|
||||
self.stacks.create(stack_name=stack_name, template=template_resources.template_data)
|
||||
self.stacks.create(stack_name=stack_name,
|
||||
template=template_resources.template_data)
|
||||
return self.wait(stack_name, operation="create")
|
||||
|
||||
except Exception:
|
||||
GeneralLogger.log_error("Failed to create stack", traceback.format_exc())
|
||||
GeneralLogger.log_error("Failed to create stack",
|
||||
traceback.format_exc())
|
||||
sys.exit(1)
|
||||
|
||||
def create_valet_group(self, group_name):
|
||||
"""Create valet group."""
|
||||
try:
|
||||
v_group = ValetGroup()
|
||||
|
||||
group_details = v_group.get_group_id_and_members(group_name) # (group_name, group_type)
|
||||
# (group_name, group_type)
|
||||
group_details = v_group.get_group_id_and_members(group_name)
|
||||
v_group.add_group_member(group_details)
|
||||
|
||||
except Exception:
|
||||
GeneralLogger.log_error("Failed to create valet group", traceback.format_exc())
|
||||
GeneralLogger.log_error("Failed to create valet group",
|
||||
traceback.format_exc())
|
||||
sys.exit(1)
|
||||
|
||||
def delete_stack(self, stack_name):
|
||||
"""Delete stack according to stack_name."""
|
||||
self.stacks.delete(stack_id=stack_name)
|
||||
return self.wait(stack_name, operation="delete")
|
||||
|
||||
def delete_all_stacks(self):
|
||||
"""Delete all stacks."""
|
||||
GeneralLogger.log_info("Starting to delete stacks")
|
||||
try:
|
||||
for stack in self.stacks.list():
|
||||
self.delete_stack(stack.id)
|
||||
|
||||
except Exception:
|
||||
GeneralLogger.log_error("Failed to delete stacks", traceback.format_exc())
|
||||
GeneralLogger.log_error("Failed to delete stacks",
|
||||
traceback.format_exc())
|
||||
|
||||
def wait(self, stack_name, count=CONF.valet.TIME_CAP, operation="Operation"):
|
||||
''' Checking the result of the process (create/delete) and writing the result to log '''
|
||||
while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" and count > 0:
|
||||
def wait(self, stack_name, count=CONF.valet.TIME_CAP,
|
||||
operation="Operation"):
|
||||
"""Check result of process (create/delete) and write result to log."""
|
||||
while str(self.stacks.get(stack_name).status) == "IN_PROGRESS" \
|
||||
and count > 0:
|
||||
count -= 1
|
||||
time.sleep(1)
|
||||
|
||||
@ -85,7 +99,8 @@ class Loader(object):
|
||||
GeneralLogger.log_info(operation + " Successfully completed")
|
||||
return Result()
|
||||
elif str(self.stacks.get(stack_name).status) == "FAILED":
|
||||
msg = operation + " failed - " + self.stacks.get(stack_name).stack_status_reason
|
||||
msg = operation + " failed - " + \
|
||||
self.stacks.get(stack_name).stack_status_reason
|
||||
else:
|
||||
msg = operation + " timed out"
|
||||
GeneralLogger.log_error(msg)
|
||||
|
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Functional Base."""
|
||||
|
||||
import os
|
||||
from oslo_log import log as logging
|
||||
@ -31,24 +32,32 @@ class FunctionalTestCase(Base):
|
||||
"""Test case base class for all unit tests."""
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
''' initializing the FunctionalTestCase - loading the logger, loader and analyzer '''
|
||||
"""Init.
|
||||
|
||||
Initializing the FunctionalTestCase - loading the
|
||||
logger, loader and analyzer.
|
||||
"""
|
||||
super(FunctionalTestCase, self).__init__(*args, **kwds)
|
||||
|
||||
def setUp(self):
|
||||
"""Start loader and analyzer."""
|
||||
super(FunctionalTestCase, self).setUp()
|
||||
|
||||
self.load = Loader()
|
||||
self.compute = Analyzer()
|
||||
|
||||
LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"], self.get_name(), COLORS["WHITE"]))
|
||||
LOG.info("%s %s is starting... %s" % (COLORS["L_BLUE"],
|
||||
self.get_name(),
|
||||
COLORS["WHITE"]))
|
||||
|
||||
def run_test(self, stack_name, template_path):
|
||||
''' scenario -
|
||||
"""Run Test.
|
||||
|
||||
scenario -
|
||||
deletes all stacks
|
||||
create new stack
|
||||
checks if host (or rack) is the same for all instances
|
||||
'''
|
||||
"""
|
||||
# delete all stacks
|
||||
self.load.delete_all_stacks()
|
||||
|
||||
@ -60,17 +69,21 @@ class FunctionalTestCase(Base):
|
||||
res = self.try_again(res, stack_name, my_resources)
|
||||
|
||||
self.validate(res)
|
||||
LOG.info("%s stack creation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"]))
|
||||
LOG.info("%s stack creation is done successfully %s"
|
||||
% (COLORS["L_PURPLE"], COLORS["WHITE"]))
|
||||
time.sleep(self.CONF.valet.DELAY_DURATION)
|
||||
|
||||
# validation
|
||||
self.validate(self.compute.check(my_resources))
|
||||
LOG.info("%s validation is done successfully %s" % (COLORS["L_PURPLE"], COLORS["WHITE"]))
|
||||
LOG.info("%s validation is done successfully %s"
|
||||
% (COLORS["L_PURPLE"], COLORS["WHITE"]))
|
||||
|
||||
def try_again(self, res, stack_name, my_resources):
|
||||
"""Try creating stack again."""
|
||||
tries = CONF.valet.TRIES_TO_CREATE
|
||||
while "Ostro error" in res.message and tries > 0:
|
||||
LOG.error("Ostro error - try number %d" % (CONF.valet.TRIES_TO_CREATE - tries + 2))
|
||||
LOG.error("Ostro error - try number %d"
|
||||
% (CONF.valet.TRIES_TO_CREATE - tries + 2))
|
||||
self.load.delete_all_stacks()
|
||||
res = self.load.create_stack(stack_name, my_resources)
|
||||
tries -= 1
|
||||
@ -79,9 +92,13 @@ class FunctionalTestCase(Base):
|
||||
return res
|
||||
|
||||
def get_template_path(self, template_name):
|
||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir))
|
||||
return os.path.join(possible_topdir, 'tests/templates', template_name + '.yml')
|
||||
"""Return template path for the template name given."""
|
||||
possible_topdir = os.path.normpath(os.path.join(
|
||||
os.path.abspath(__file__), os.pardir, os.pardir))
|
||||
return os.path.join(possible_topdir, 'tests/templates',
|
||||
template_name + '.yml')
|
||||
|
||||
def init_template(self, test):
|
||||
"""Init template, call get path for test template."""
|
||||
self.stack_name = test.STACK_NAME
|
||||
self.template_path = self.get_template_path(test.TEMPLATE_NAME)
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test Affinity."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from valet.tests.functional.valet_validator.common.init import CONF
|
||||
@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestAffinity(FunctionalTestCase):
|
||||
"""Test Affinity Functional Tests."""
|
||||
|
||||
def setUp(self):
|
||||
''' Adding configuration and logging mechanism '''
|
||||
"""Adding configuration and logging mechanism."""
|
||||
super(TestAffinity, self).setUp()
|
||||
self.init_template(CONF.test_affinity)
|
||||
|
||||
def test_affinity(self):
|
||||
"""Test Affinity."""
|
||||
self.run_test(self.stack_name, self.template_path)
|
||||
|
||||
def get_name(self):
|
||||
"""Return Name."""
|
||||
return __name__
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test Affinity 3 Instances."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from valet.tests.functional.valet_validator.common.init import CONF
|
||||
@ -29,14 +31,17 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestAffinity_3(FunctionalTestCase):
|
||||
"""Test Affinity 3 Functional Test."""
|
||||
|
||||
def setUp(self):
|
||||
''' Adding configuration and logging mechanism '''
|
||||
"""Adding configuration and logging mechanism."""
|
||||
super(TestAffinity_3, self).setUp()
|
||||
self.init_template(CONF.test_affinity_3)
|
||||
|
||||
def test_affinity(self):
|
||||
"""Test Affinity."""
|
||||
self.run_test(self.stack_name, self.template_path)
|
||||
|
||||
def get_name(self):
|
||||
"""Return Name."""
|
||||
return __name__
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test Diversity."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from valet.tests.functional.valet_validator.common.init import CONF
|
||||
@ -30,15 +32,17 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestDiversity(FunctionalTestCase):
|
||||
"""Test Diversity Functional Test."""
|
||||
|
||||
def setUp(self):
|
||||
''' Initiating template '''
|
||||
"""Initiating template."""
|
||||
super(TestDiversity, self).setUp()
|
||||
self.init_template(CONF.test_diversity)
|
||||
|
||||
def test_diversity(self):
|
||||
|
||||
"""Test diversity."""
|
||||
self.run_test(self.stack_name, self.template_path)
|
||||
|
||||
def get_name(self):
|
||||
"""Return Name."""
|
||||
return __name__
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test Exclusivity."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from valet.tests.functional.valet_validator.common.init import CONF
|
||||
@ -30,14 +32,17 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestExclusivity(FunctionalTestCase):
|
||||
"""Test Exclusivity Function Test."""
|
||||
|
||||
def setUp(self):
|
||||
''' Initiating template '''
|
||||
"""Initiating template."""
|
||||
super(TestExclusivity, self).setUp()
|
||||
self.init_template(CONF.test_exclusivity)
|
||||
|
||||
def test_exclusivity(self):
|
||||
"""Nested run test on stack_name and template_path."""
|
||||
self.run_test(self.stack_name, self.template_path)
|
||||
|
||||
def get_name(self):
|
||||
"""Return name."""
|
||||
return __name__
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Test Groups."""
|
||||
|
||||
from valet.tests.functional.valet_validator.common.auth import Auth
|
||||
from valet.tests.functional.valet_validator.common import GeneralLogger
|
||||
from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup
|
||||
@ -20,32 +22,42 @@ from valet.tests.functional.valet_validator.tests.functional_base import Functio
|
||||
|
||||
|
||||
class TestGroups(FunctionalTestCase):
|
||||
"""Test valet groups functional."""
|
||||
|
||||
def setUp(self):
|
||||
''' Adding configuration and logging mechanism '''
|
||||
"""Add configuration and logging mechanism."""
|
||||
super(TestGroups, self).setUp()
|
||||
self.groups = ValetGroup()
|
||||
self.group_name = "test_group"
|
||||
self.group_type = "exclusivity"
|
||||
|
||||
def test_groups(self):
|
||||
"""Test groups using multiple methods and checking response codes."""
|
||||
GeneralLogger.log_group("Delete all stacks")
|
||||
self.load.delete_all_stacks()
|
||||
|
||||
GeneralLogger.log_group("Delete all members and groups")
|
||||
|
||||
respose_code = self.groups.delete_all_groups()
|
||||
self.assertEqual(204, respose_code, "delete_all_groups failed with code %s" % respose_code)
|
||||
self.assertEqual(204, respose_code,
|
||||
"delete_all_groups failed with code %s"
|
||||
% respose_code)
|
||||
|
||||
self.assertEqual([], self.groups.get_list_groups(), "delete_all_groups failed")
|
||||
self.assertEqual([], self.groups.get_list_groups(),
|
||||
"delete_all_groups failed")
|
||||
|
||||
GeneralLogger.log_group("Try to delete not existing group")
|
||||
response = self.groups.delete_group("d68f62b1-4758-4ea5-a93a-8f9d9c0ae912")
|
||||
self.assertEqual(404, response.status_code, "delete_group failed with code %s" % response.status_code)
|
||||
response = self.groups.delete_group(
|
||||
"d68f62b1-4758-4ea5-a93a-8f9d9c0ae912")
|
||||
self.assertEqual(404, response.status_code,
|
||||
"delete_group failed with code %s"
|
||||
% response.status_code)
|
||||
|
||||
GeneralLogger.log_group("Create test_group")
|
||||
group_info = self.groups.create_group(self.group_name, self.group_type)
|
||||
self.assertEqual(201, group_info.status_code, "create_group failed with code %s" % group_info.status_code)
|
||||
self.assertEqual(201, group_info.status_code,
|
||||
"create_group failed with code %s"
|
||||
% group_info.status_code)
|
||||
|
||||
grp_id = group_info.json()["id"]
|
||||
|
||||
@ -53,30 +65,43 @@ class TestGroups(FunctionalTestCase):
|
||||
GeneralLogger.log_group(str(self.groups.get_list_groups()))
|
||||
|
||||
GeneralLogger.log_group("Create test member (NOT tenant ID)")
|
||||
member_respone = self.groups.update_group_members(grp_id, members="test_member")
|
||||
self.assertEqual(409, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code)
|
||||
member_respone = self.groups.update_group_members(grp_id,
|
||||
members="test_member")
|
||||
self.assertEqual(409, member_respone.status_code,
|
||||
"update_group_members failed with code %s"
|
||||
% member_respone.status_code)
|
||||
|
||||
GeneralLogger.log_group("Add description to group")
|
||||
desc_response = self.groups.update_group(grp_id, "new_description")
|
||||
self.assertEqual(201, desc_response.status_code, "update_group failed with code %s" % desc_response.status_code)
|
||||
self.assertEqual(201, desc_response.status_code,
|
||||
"update_group failed with code %s"
|
||||
% desc_response.status_code)
|
||||
|
||||
GeneralLogger.log_group("Create member (tenant ID)")
|
||||
member_respone = self.groups.update_group_members(grp_id)
|
||||
self.assertEqual(201, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code)
|
||||
self.assertEqual(201, member_respone.status_code,
|
||||
"update_group_members failed with code %s"
|
||||
% member_respone.status_code)
|
||||
|
||||
GeneralLogger.log_group("Return list of groups")
|
||||
GeneralLogger.log_group(self.groups.get_group_details(grp_id).json())
|
||||
|
||||
GeneralLogger.log_group("Delete test member (NOT tenant ID)")
|
||||
member_respone = self.groups.delete_group_member(grp_id, "test_member")
|
||||
self.assertEqual(404, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code)
|
||||
self.assertEqual(404, member_respone.status_code,
|
||||
"delete_group_member failed with code %s"
|
||||
% member_respone.status_code)
|
||||
|
||||
GeneralLogger.log_group("Delete member (tenant ID)")
|
||||
member_respone = self.groups.delete_group_member(grp_id, Auth.get_project_id())
|
||||
self.assertEqual(204, member_respone.status_code, "delete_group_member failed with code %s" % member_respone.status_code)
|
||||
member_respone = self.groups.delete_group_member(grp_id,
|
||||
Auth.get_project_id())
|
||||
self.assertEqual(204, member_respone.status_code,
|
||||
"delete_group_member failed with code %s"
|
||||
% member_respone.status_code)
|
||||
|
||||
GeneralLogger.log_group("Return list of groups")
|
||||
GeneralLogger.log_group(self.groups.get_group_details(grp_id).json())
|
||||
|
||||
def get_name(self):
|
||||
"""Return name."""
|
||||
return __name__
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user